1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Lowering for ARM XX
XX XX
XX This encapsulates all the logic for lowering trees for the ARM XX
XX architecture. For a more detailed view of what is lowering, please XX
XX take a look at Lower.cpp XX
XX XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#ifndef LEGACY_BACKEND // This file is ONLY used for the RyuJIT backend that uses the linear scan register allocator
#ifdef _TARGET_ARM_
#include "jit.h"
#include "sideeffects.h"
#include "lower.h"
#include "lsra.h"
//------------------------------------------------------------------------
// LowerStoreLoc: Lower a store of a lclVar
//
// Arguments:
// storeLoc - the local store (GT_STORE_LCL_FLD or GT_STORE_LCL_VAR)
//
// Notes:
// This involves:
// - Widening operations of unsigneds.
//
void Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
{
// Try to widen the ops if they are going into a local var.
GenTree* op1 = storeLoc->gtGetOp1();
if ((storeLoc->gtOper == GT_STORE_LCL_VAR) && (op1->gtOper == GT_CNS_INT))
{
GenTreeIntCon* con = op1->AsIntCon();
ssize_t ival = con->gtIconVal;
unsigned varNum = storeLoc->gtLclNum;
LclVarDsc* varDsc = comp->lvaTable + varNum;
if (varDsc->lvIsSIMDType())
{
noway_assert(storeLoc->gtType != TYP_STRUCT);
}
unsigned size = genTypeSize(storeLoc);
// If we are storing a constant into a local variable
// we extend the size of the store here
if ((size < 4) && !varTypeIsStruct(varDsc))
{
if (!varTypeIsUnsigned(varDsc))
{
if (genTypeSize(storeLoc) == 1)
{
if ((ival & 0x7f) != ival)
{
ival = ival | 0xffffff00;
}
}
else
{
assert(genTypeSize(storeLoc) == 2);
if ((ival & 0x7fff) != ival)
{
ival = ival | 0xffff0000;
}
}
}
// A local stack slot is at least 4 bytes in size, regardless of
// what the local var is typed as, so auto-promote it here
// unless it is a field of a promoted struct
// TODO-ARM-CQ: if the field is promoted shouldn't we also be able to do this?
if (!varDsc->lvIsStructField)
{
storeLoc->gtType = TYP_INT;
con->SetIconValue(ival);
}
}
}
}
void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
{
GenTree* dstAddr = blkNode->Addr();
unsigned size = blkNode->gtBlkSize;
GenTree* source = blkNode->Data();
Compiler* compiler = comp;
// Sources are dest address and initVal or source.
GenTreePtr srcAddrOrFill = nullptr;
bool isInitBlk = blkNode->OperIsInitBlkOp();
if (!isInitBlk)
{
// CopyObj or CopyBlk
if ((blkNode->OperGet() == GT_STORE_OBJ) && ((blkNode->AsObj()->gtGcPtrCount == 0) || blkNode->gtBlkOpGcUnsafe))
{
blkNode->SetOper(GT_STORE_BLK);
}
if (source->gtOper == GT_IND)
{
srcAddrOrFill = blkNode->Data()->gtGetOp1();
}
}
if (isInitBlk)
{
GenTreePtr initVal = source;
if (initVal->OperIsInitVal())
{
initVal = initVal->gtGetOp1();
}
srcAddrOrFill = initVal;
#if 0
if ((size != 0) && (size <= INITBLK_UNROLL_LIMIT) && initVal->IsCnsIntOrI())
{
// TODO-ARM-CQ: Currently we generate a helper call for every
// initblk we encounter. Later on we should implement loop unrolling
// code sequences to improve CQ.
// For reference see the code in LowerXArch.cpp.
NYI_ARM("initblk loop unrolling is currently not implemented.");
}
else
#endif // 0
{
blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindHelper;
}
}
else
{
if (blkNode->OperGet() == GT_STORE_OBJ)
{
// CopyObj
NYI_ARM("Lowering for GT_STORE_OBJ isn't implemented");
}
else
{
// CopyBlk
short internalIntCount = 0;
regMaskTP internalIntCandidates = RBM_NONE;
#if 0
// In case of a CpBlk with a constant size and less than CPBLK_UNROLL_LIMIT size
// we should unroll the loop to improve CQ.
// For reference see the code in lowerxarch.cpp.
// TODO-ARM-CQ: cpblk loop unrolling is currently not implemented.
if ((size != 0) && (size <= INITBLK_UNROLL_LIMIT))
{
blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll;
}
else
#endif // 0
{
// In case we have a constant integer this means we went beyond
// CPBLK_UNROLL_LIMIT bytes of size, still we should never have the case of
// any GC-Pointers in the src struct.
blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindHelper;
}
}
}
}
//------------------------------------------------------------------------
// LowerCast: Lower GT_CAST(srcType, DstType) nodes.
//
// Arguments:
// tree - GT_CAST node to be lowered
//
// Return Value:
// None.
//
// Notes:
// Casts from small int type to float/double are transformed as follows:
// GT_CAST(byte, float/double) = GT_CAST(GT_CAST(byte, int32), float/double)
// GT_CAST(sbyte, float/double) = GT_CAST(GT_CAST(sbyte, int32), float/double)
// GT_CAST(int16, float/double) = GT_CAST(GT_CAST(int16, int32), float/double)
// GT_CAST(uint16, float/double) = GT_CAST(GT_CAST(uint16, int32), float/double)
//
// Similarly casts from float/double to a smaller int type are transformed as follows:
// GT_CAST(float/double, byte) = GT_CAST(GT_CAST(float/double, int32), byte)
// GT_CAST(float/double, sbyte) = GT_CAST(GT_CAST(float/double, int32), sbyte)
// GT_CAST(float/double, int16) = GT_CAST(GT_CAST(double/double, int32), int16)
// GT_CAST(float/double, uint16) = GT_CAST(GT_CAST(double/double, int32), uint16)
//
// Note that for the overflow conversions we still depend on helper calls and
// don't expect to see them here.
// i) GT_CAST(float/double, int type with overflow detection)
//
void Lowering::LowerCast(GenTree* tree)
{
assert(tree->OperGet() == GT_CAST);
JITDUMP("LowerCast for: ");
DISPNODE(tree);
JITDUMP("\n");
GenTreePtr op1 = tree->gtOp.gtOp1;
var_types dstType = tree->CastToType();
var_types srcType = op1->TypeGet();
var_types tmpType = TYP_UNDEF;
if (varTypeIsFloating(srcType))
{
noway_assert(!tree->gtOverflow());
}
// Case of src is a small type and dst is a floating point type.
if (varTypeIsSmall(srcType) && varTypeIsFloating(dstType))
{
NYI_ARM("Lowering for cast from small type to float"); // Not tested yet.
// These conversions can never be overflow detecting ones.
noway_assert(!tree->gtOverflow());
tmpType = TYP_INT;
}
// case of src is a floating point type and dst is a small type.
else if (varTypeIsFloating(srcType) && varTypeIsSmall(dstType))
{
NYI_ARM("Lowering for cast from float to small type"); // Not tested yet.
tmpType = TYP_INT;
}
if (tmpType != TYP_UNDEF)
{
GenTreePtr tmp = comp->gtNewCastNode(tmpType, op1, tmpType);
tmp->gtFlags |= (tree->gtFlags & (GTF_UNSIGNED | GTF_OVERFLOW | GTF_EXCEPT));
tree->gtFlags &= ~GTF_UNSIGNED;
tree->gtOp.gtOp1 = tmp;
BlockRange().InsertAfter(op1, tmp);
}
}
//------------------------------------------------------------------------
// LowerRotate: Lower GT_ROL and GT_ROL nodes.
//
// Arguments:
// tree - the node to lower
//
// Return Value:
// None.
//
void Lowering::LowerRotate(GenTreePtr tree)
{
if (tree->OperGet() == GT_ROL)
{
// There is no ROL instruction on ARM. Convert ROL into ROR.
GenTreePtr rotatedValue = tree->gtOp.gtOp1;
unsigned rotatedValueBitSize = genTypeSize(rotatedValue->gtType) * 8;
GenTreePtr rotateLeftIndexNode = tree->gtOp.gtOp2;
if (rotateLeftIndexNode->IsCnsIntOrI())
{
ssize_t rotateLeftIndex = rotateLeftIndexNode->gtIntCon.gtIconVal;
ssize_t rotateRightIndex = rotatedValueBitSize - rotateLeftIndex;
rotateLeftIndexNode->gtIntCon.gtIconVal = rotateRightIndex;
}
else
{
GenTreePtr tmp =
comp->gtNewOperNode(GT_NEG, genActualType(rotateLeftIndexNode->gtType), rotateLeftIndexNode);
BlockRange().InsertAfter(rotateLeftIndexNode, tmp);
tree->gtOp.gtOp2 = tmp;
}
tree->ChangeOper(GT_ROR);
}
}
//------------------------------------------------------------------------
// LowerPutArgStk: Lower a GT_PUTARG_STK node
//
// Arguments:
// argNode - a GT_PUTARG_STK node
//
// Return Value:
// None.
//
// Notes:
// There is currently no Lowering required for this on ARM.
//
void Lowering::LowerPutArgStk(GenTreePutArgStk* argNode, fgArgTabEntryPtr info)
{
}
//------------------------------------------------------------------------
// IsCallTargetInRange: Can a call target address be encoded in-place?
//
// Return Value:
// True if the addr fits into the range.
//
bool Lowering::IsCallTargetInRange(void* addr)
{
return comp->codeGen->validImmForBL((ssize_t)addr);
}
//------------------------------------------------------------------------
// IsContainableImmed: Is an immediate encodable in-place?
//
// Return Value:
// True if the immediate can be folded into an instruction,
// for example small enough and non-relocatable.
bool Lowering::IsContainableImmed(GenTree* parentNode, GenTree* childNode)
{
if (varTypeIsFloating(parentNode->TypeGet()))
{
switch (parentNode->OperGet())
{
default:
return false;
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
if (childNode->IsIntegralConst(0))
{
// TODO-ARM-Cleanup: not tested yet.
NYI_ARM("ARM IsContainableImmed for floating point type");
// We can contain a floating point 0.0 constant in a compare instruction
return true;
}
break;
}
}
else
{
// Make sure we have an actual immediate
if (!childNode->IsCnsIntOrI())
return false;
if (childNode->IsIconHandle() && comp->opts.compReloc)
return false;
ssize_t immVal = childNode->gtIntCon.gtIconVal;
emitAttr attr = emitActualTypeSize(childNode->TypeGet());
emitAttr size = EA_SIZE(attr);
switch (parentNode->OperGet())
{
default:
return false;
case GT_ADD:
case GT_SUB:
if (emitter::emitIns_valid_imm_for_add(immVal, INS_FLAGS_DONT_CARE))
return true;
break;
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
case GT_AND:
case GT_OR:
case GT_XOR:
if (emitter::emitIns_valid_imm_for_alu(immVal))
return true;
break;
}
}
return false;
}
#endif // _TARGET_ARM_
#endif // !LEGACY_BACKEND
|