summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEgor Chesakov <Egor.Chesakov@microsoft.com>2017-12-15 20:30:58 -0800
committerJan Kotas <jkotas@microsoft.com>2017-12-15 23:30:58 -0500
commitef74ed68fdb108ed9432c40eb2acae4bf7c35bd3 (patch)
treeb9c498403c3296680a0df372b2a522ac839bc6da
parent3c958f345832f649889973e84bdac2f215a0d0e3 (diff)
downloadcoreclr-ef74ed68fdb108ed9432c40eb2acae4bf7c35bd3.tar.gz
coreclr-ef74ed68fdb108ed9432c40eb2acae4bf7c35bd3.tar.bz2
coreclr-ef74ed68fdb108ed9432c40eb2acae4bf7c35bd3.zip
Clean up clrjit from using sizeof(void*) when meant TARGET_POINTER_SIZE (#15524)
-rw-r--r--src/jit/codegencommon.cpp18
-rw-r--r--src/jit/codegenlegacy.cpp59
-rw-r--r--src/jit/compiler.cpp2
-rw-r--r--src/jit/compiler.h2
-rw-r--r--src/jit/compiler.hpp2
-rw-r--r--src/jit/ee_il_dll.cpp4
-rw-r--r--src/jit/emit.cpp32
-rw-r--r--src/jit/emitarm.cpp6
-rw-r--r--src/jit/emitarm64.cpp6
-rw-r--r--src/jit/emitxarch.cpp24
-rw-r--r--src/jit/gcencode.cpp16
-rw-r--r--src/jit/gcinfo.cpp2
-rw-r--r--src/jit/importer.cpp16
-rw-r--r--src/jit/inlinepolicy.cpp8
-rw-r--r--src/jit/lclvars.cpp44
-rw-r--r--src/jit/morph.cpp8
-rw-r--r--src/jit/typelist.h2
17 files changed, 126 insertions, 125 deletions
diff --git a/src/jit/codegencommon.cpp b/src/jit/codegencommon.cpp
index 7be0f4f8d8..bf0412dd66 100644
--- a/src/jit/codegencommon.cpp
+++ b/src/jit/codegencommon.cpp
@@ -3113,7 +3113,7 @@ void CodeGen::genGenerateCode(void** codePtr, ULONG* nativeSizeOfCode)
// and thus saved on the frame).
// Compute the maximum estimated spill temp size.
- unsigned maxTmpSize = sizeof(double) + sizeof(float) + sizeof(__int64) + sizeof(void*);
+ unsigned maxTmpSize = sizeof(double) + sizeof(float) + sizeof(__int64) + TARGET_POINTER_SIZE;
maxTmpSize += (compiler->tmpDoubleSpillMax * sizeof(double)) + (compiler->tmpIntSpillMax * sizeof(int));
@@ -5122,7 +5122,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere
// idea of how to ignore it.
// On Arm, a long can be passed in register
- noway_assert(genTypeSize(genActualType(varDsc->TypeGet())) == sizeof(void*));
+ noway_assert(genTypeSize(genActualType(varDsc->TypeGet())) == TARGET_POINTER_SIZE);
#endif
#endif //_TARGET_64BIT_
@@ -5618,7 +5618,7 @@ void CodeGen::genCheckUseBlockInit()
initStkLclCnt += varDsc->lvStructGcCount;
}
- if ((compiler->lvaLclSize(varNum) > (3 * sizeof(void*))) && (largeGcStructs <= 4))
+ if ((compiler->lvaLclSize(varNum) > (3 * TARGET_POINTER_SIZE)) && (largeGcStructs <= 4))
{
largeGcStructs++;
}
@@ -9151,10 +9151,10 @@ void CodeGen::genFnProlog()
if (compiler->ehNeedsShadowSPslots() && !compiler->info.compInitMem)
{
// The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
- unsigned filterEndOffsetSlotOffs = compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - (sizeof(void*));
+ unsigned filterEndOffsetSlotOffs = compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE;
// Zero out the slot for nesting level 0
- unsigned firstSlotOffs = filterEndOffsetSlotOffs - (sizeof(void*));
+ unsigned firstSlotOffs = filterEndOffsetSlotOffs - TARGET_POINTER_SIZE;
if (!initRegZeroed)
{
@@ -9779,7 +9779,7 @@ void CodeGen::genFnEpilog(BasicBlock* block)
/* Add 'compiler->compLclFrameSize' to ESP */
/* Use pop ECX to increment ESP by 4, unless compiler->compJmpOpUsed is true */
- if ((compiler->compLclFrameSize == sizeof(void*)) && !compiler->compJmpOpUsed)
+ if ((compiler->compLclFrameSize == TARGET_POINTER_SIZE) && !compiler->compJmpOpUsed)
{
inst_RV(INS_pop, REG_ECX, TYP_I_IMPL);
regTracker.rsTrackRegTrash(REG_ECX);
@@ -10008,8 +10008,8 @@ void CodeGen::genFnEpilog(BasicBlock* block)
if (fCalleePop)
{
- noway_assert(compiler->compArgSize >= intRegState.rsCalleeRegArgCount * sizeof(void*));
- stkArgSize = compiler->compArgSize - intRegState.rsCalleeRegArgCount * sizeof(void*);
+ noway_assert(compiler->compArgSize >= intRegState.rsCalleeRegArgCount * REGSIZE_BYTES);
+ stkArgSize = compiler->compArgSize - intRegState.rsCalleeRegArgCount * REGSIZE_BYTES;
noway_assert(compiler->compArgSize < 0x10000); // "ret" only has 2 byte operand
}
@@ -11948,7 +11948,7 @@ void CodeGen::genSetScopeInfo(unsigned which,
noway_assert(cookieOffset < varOffset);
unsigned offset = varOffset - cookieOffset;
- unsigned stkArgSize = compiler->compArgSize - intRegState.rsCalleeRegArgCount * sizeof(void*);
+ unsigned stkArgSize = compiler->compArgSize - intRegState.rsCalleeRegArgCount * REGSIZE_BYTES;
noway_assert(offset < stkArgSize);
offset = stkArgSize - offset;
diff --git a/src/jit/codegenlegacy.cpp b/src/jit/codegenlegacy.cpp
index d741a446e7..1b430031c5 100644
--- a/src/jit/codegenlegacy.cpp
+++ b/src/jit/codegenlegacy.cpp
@@ -5090,11 +5090,12 @@ void CodeGen::genCodeForTreeLeaf(GenTreePtr tree, regMaskTP destReg, regMaskTP b
// The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
unsigned filterEndOffsetSlotOffs;
PREFIX_ASSUME(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) >
- sizeof(void*)); // below doesn't underflow.
- filterEndOffsetSlotOffs = (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - (sizeof(void*)));
+ TARGET_POINTER_SIZE); // below doesn't underflow.
+ filterEndOffsetSlotOffs =
+ (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE);
unsigned curNestingSlotOffs;
- curNestingSlotOffs = filterEndOffsetSlotOffs - ((finallyNesting + 1) * sizeof(void*));
+ curNestingSlotOffs = filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE);
instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, 0, compiler->lvaShadowSPslotsVar, curNestingSlotOffs);
reg = REG_STK;
break;
@@ -13047,14 +13048,14 @@ void CodeGen::genCodeForBBlist()
// The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
unsigned filterEndOffsetSlotOffs;
filterEndOffsetSlotOffs =
- (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - (sizeof(void*)));
+ (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE);
unsigned curNestingSlotOffs;
- curNestingSlotOffs = (unsigned)(filterEndOffsetSlotOffs - ((finallyNesting + 1) * sizeof(void*)));
+ curNestingSlotOffs = (unsigned)(filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE));
// Zero out the slot for the next nesting level
instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, 0, compiler->lvaShadowSPslotsVar,
- curNestingSlotOffs - sizeof(void*));
+ curNestingSlotOffs - TARGET_POINTER_SIZE);
instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, LCL_FINALLY_MARK, compiler->lvaShadowSPslotsVar,
curNestingSlotOffs);
@@ -15977,7 +15978,8 @@ size_t CodeGen::genPushArgList(GenTreeCall* call)
addrReg = 0;
// Get the number of BYTES to copy to the stack
- opsz = roundUp(compiler->info.compCompHnd->getClassSize(arg->gtObj.gtClass), sizeof(void*));
+ opsz = roundUp(compiler->info.compCompHnd->getClassSize(arg->gtObj.gtClass),
+ TARGET_POINTER_SIZE);
size_t bytesToBeCopied = opsz;
// postponedFields is true if we have any postponed fields
@@ -16020,14 +16022,14 @@ size_t CodeGen::genPushArgList(GenTreeCall* call)
if (fieldVarDsc->lvStackAligned())
{
if (fieldVarDsc->lvExactSize != 2 * sizeof(unsigned) &&
- fieldVarDsc->lvFldOffset + sizeof(void*) != bytesToBeCopied)
+ fieldVarDsc->lvFldOffset + TARGET_POINTER_SIZE != bytesToBeCopied)
{
// Might need 4-bytes paddings for fields other than LONG and DOUBLE.
// Just push some junk (i.e EAX) on the stack.
inst_RV(INS_push, REG_EAX, TYP_INT);
genSinglePush();
- bytesToBeCopied -= sizeof(void*);
+ bytesToBeCopied -= TARGET_POINTER_SIZE;
}
// If we have an expectedAlignedOffset make sure that this push instruction
@@ -16060,11 +16062,11 @@ size_t CodeGen::genPushArgList(GenTreeCall* call)
}
else
{
- getEmitter()->emitIns_S(INS_push, EA_4BYTE, varNum, sizeof(void*));
+ getEmitter()->emitIns_S(INS_push, EA_4BYTE, varNum, TARGET_POINTER_SIZE);
genSinglePush();
}
- bytesToBeCopied -= sizeof(void*);
+ bytesToBeCopied -= TARGET_POINTER_SIZE;
}
// Push the "upper half" of DOUBLE var if it is not enregistered.
@@ -16073,11 +16075,11 @@ size_t CodeGen::genPushArgList(GenTreeCall* call)
{
if (!fieldVarDsc->lvRegister)
{
- getEmitter()->emitIns_S(INS_push, EA_4BYTE, varNum, sizeof(void*));
+ getEmitter()->emitIns_S(INS_push, EA_4BYTE, varNum, TARGET_POINTER_SIZE);
genSinglePush();
}
- bytesToBeCopied -= sizeof(void*);
+ bytesToBeCopied -= TARGET_POINTER_SIZE;
}
//
@@ -16156,7 +16158,7 @@ size_t CodeGen::genPushArgList(GenTreeCall* call)
genSinglePush();
}
- bytesToBeCopied -= sizeof(void*);
+ bytesToBeCopied -= TARGET_POINTER_SIZE;
}
else // not stack aligned
{
@@ -16171,11 +16173,12 @@ size_t CodeGen::genPushArgList(GenTreeCall* call)
// This should never change until it is set back to UINT_MAX by an aligned
// offset
noway_assert(expectedAlignedOffset ==
- roundUp(fieldVarDsc->lvFldOffset, sizeof(void*)) - sizeof(void*));
+ roundUp(fieldVarDsc->lvFldOffset, TARGET_POINTER_SIZE) -
+ TARGET_POINTER_SIZE);
}
expectedAlignedOffset =
- roundUp(fieldVarDsc->lvFldOffset, sizeof(void*)) - sizeof(void*);
+ roundUp(fieldVarDsc->lvFldOffset, TARGET_POINTER_SIZE) - TARGET_POINTER_SIZE;
noway_assert(expectedAlignedOffset < bytesToBeCopied);
@@ -16302,8 +16305,8 @@ size_t CodeGen::genPushArgList(GenTreeCall* call)
noway_assert(arg->gtObj.gtOp1->InReg());
regNumber reg = arg->gtObj.gtOp1->gtRegNum;
// Get the number of DWORDS to copy to the stack
- opsz = roundUp(compiler->info.compCompHnd->getClassSize(arg->gtObj.gtClass), sizeof(void*));
- unsigned slots = (unsigned)(opsz / sizeof(void*));
+ opsz = roundUp(compiler->info.compCompHnd->getClassSize(arg->gtObj.gtClass), sizeof(DWORD));
+ unsigned slots = (unsigned)(opsz / sizeof(DWORD));
BYTE* gcLayout = new (compiler, CMK_Codegen) BYTE[slots];
@@ -16355,7 +16358,7 @@ size_t CodeGen::genPushArgList(GenTreeCall* call)
if (opsz & 0x4)
{
- stkDisp -= sizeof(void*);
+ stkDisp -= TARGET_POINTER_SIZE;
getEmitter()->emitIns_AR_R(INS_push, EA_4BYTE, REG_NA, reg, stkDisp);
genSinglePush();
}
@@ -16367,7 +16370,7 @@ size_t CodeGen::genPushArgList(GenTreeCall* call)
{
getEmitter()->emitIns_R_AR(INS_movq, EA_8BYTE, xmmReg, reg, curDisp);
getEmitter()->emitIns_AR_R(INS_movq, EA_8BYTE, xmmReg, REG_SPBASE, curDisp);
- curDisp += 2 * sizeof(void*);
+ curDisp += 2 * TARGET_POINTER_SIZE;
}
noway_assert(curDisp == stkDisp);
}
@@ -16385,7 +16388,7 @@ size_t CodeGen::genPushArgList(GenTreeCall* call)
noway_assert(gcLayout[i] == TYPE_GC_BYREF);
fieldSize = EA_BYREF;
}
- getEmitter()->emitIns_AR_R(INS_push, fieldSize, REG_NA, reg, i * sizeof(void*));
+ getEmitter()->emitIns_AR_R(INS_push, fieldSize, REG_NA, reg, i * TARGET_POINTER_SIZE);
genSinglePush();
}
}
@@ -19290,7 +19293,7 @@ regMaskTP CodeGen::genCodeForCall(GenTreeCall* call, bool valUsed)
/* Keep track of ESP for EBP-less frames */
genSinglePush();
- argSize += sizeof(void*);
+ argSize += REGSIZE_BYTES;
#elif defined(_TARGET_ARM_)
@@ -19683,16 +19686,16 @@ regMaskTP CodeGen::genCodeForCall(GenTreeCall* call, bool valUsed)
// Push the count of the incoming stack arguments
unsigned nOldStkArgs =
- (unsigned)((compiler->compArgSize - (intRegState.rsCalleeRegArgCount * sizeof(void*))) / sizeof(void*));
+ (unsigned)((compiler->compArgSize - (intRegState.rsCalleeRegArgCount * REGSIZE_BYTES)) / REGSIZE_BYTES);
getEmitter()->emitIns_I(INS_push, EA_4BYTE, nOldStkArgs);
genSinglePush(); // Keep track of ESP for EBP-less frames
- args += sizeof(void*);
+ args += REGSIZE_BYTES;
// Push the count of the outgoing stack arguments
- getEmitter()->emitIns_I(INS_push, EA_4BYTE, argSize / sizeof(void*));
+ getEmitter()->emitIns_I(INS_push, EA_4BYTE, argSize / REGSIZE_BYTES);
genSinglePush(); // Keep track of ESP for EBP-less frames
- args += sizeof(void*);
+ args += REGSIZE_BYTES;
// Push info about the callee-saved registers to be restored
// For now, we always spill all registers if compiler->compTailCallUsed
@@ -19701,13 +19704,13 @@ regMaskTP CodeGen::genCodeForCall(GenTreeCall* call, bool valUsed)
(fTailCallTargetIsVSD ? 0x2 : 0x0); // Stub dispatch flag
getEmitter()->emitIns_I(INS_push, EA_4BYTE, calleeSavedRegInfo);
genSinglePush(); // Keep track of ESP for EBP-less frames
- args += sizeof(void*);
+ args += REGSIZE_BYTES;
// Push the address of the target function
getEmitter()->emitIns_R(INS_push, EA_4BYTE, REG_TAILCALL_ADDR);
genSinglePush(); // Keep track of ESP for EBP-less frames
- args += sizeof(void*);
+ args += REGSIZE_BYTES;
#else // _TARGET_X86_
diff --git a/src/jit/compiler.cpp b/src/jit/compiler.cpp
index d359c6f68c..c496de520c 100644
--- a/src/jit/compiler.cpp
+++ b/src/jit/compiler.cpp
@@ -2179,7 +2179,7 @@ unsigned Compiler::compGetTypeSize(CorInfoType cit, CORINFO_CLASS_HANDLE clsHnd)
}
else if (cit == CORINFO_TYPE_REFANY)
{
- sigSize = 2 * sizeof(void*);
+ sigSize = 2 * TARGET_POINTER_SIZE;
}
return sigSize;
}
diff --git a/src/jit/compiler.h b/src/jit/compiler.h
index cf2f68f569..5e448d8f6a 100644
--- a/src/jit/compiler.h
+++ b/src/jit/compiler.h
@@ -743,7 +743,7 @@ public:
bool lvStackAligned() const
{
assert(lvIsStructField);
- return ((lvFldOffset % sizeof(void*)) == 0);
+ return ((lvFldOffset % TARGET_POINTER_SIZE) == 0);
}
bool lvNormalizeOnLoad() const
{
diff --git a/src/jit/compiler.hpp b/src/jit/compiler.hpp
index 2340780531..096c81d10a 100644
--- a/src/jit/compiler.hpp
+++ b/src/jit/compiler.hpp
@@ -2131,7 +2131,7 @@ inline void LclVarDsc::addPrefReg(regMaskTP regMask, Compiler* comp)
#ifdef _TARGET_ARM_
// Don't set a preferred register for a TYP_STRUCT that takes more than one register slot
- if ((lvType == TYP_STRUCT) && (lvSize() > sizeof(void*)))
+ if ((lvType == TYP_STRUCT) && (lvSize() > REGSIZE_BYTES))
return;
#endif
diff --git a/src/jit/ee_il_dll.cpp b/src/jit/ee_il_dll.cpp
index 87073760ee..e87c46c89a 100644
--- a/src/jit/ee_il_dll.cpp
+++ b/src/jit/ee_il_dll.cpp
@@ -441,7 +441,7 @@ unsigned Compiler::eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO*
return structSize; // TODO: roundUp() needed here?
}
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
- return sizeof(size_t);
+ return TARGET_POINTER_SIZE;
#else // !_TARGET_AMD64_
@@ -454,7 +454,7 @@ unsigned Compiler::eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO*
unsigned structSize = info.compCompHnd->getClassSize(argClass);
// make certain the EE passes us back the right thing for refanys
- assert(argTypeJit != CORINFO_TYPE_REFANY || structSize == 2 * sizeof(void*));
+ assert(argTypeJit != CORINFO_TYPE_REFANY || structSize == 2 * TARGET_POINTER_SIZE);
// For each target that supports passing struct args in multiple registers
// apply the target specific rules for them here:
diff --git a/src/jit/emit.cpp b/src/jit/emit.cpp
index c72deee89c..9f42aea86c 100644
--- a/src/jit/emit.cpp
+++ b/src/jit/emit.cpp
@@ -2129,7 +2129,7 @@ void emitter::emitSetFrameRangeGCRs(int offsLo, int offsHi)
if (emitComp->verbose)
{
- unsigned count = (offsHi - offsLo) / sizeof(void*);
+ unsigned count = (offsHi - offsLo) / TARGET_POINTER_SIZE;
printf("%u tracked GC refs are at stack offsets ", count);
if (offsLo >= 0)
@@ -2164,13 +2164,13 @@ void emitter::emitSetFrameRangeGCRs(int offsLo, int offsHi)
#endif // DEBUG
- assert(((offsHi - offsLo) % sizeof(void*)) == 0);
- assert((offsLo % sizeof(void*)) == 0);
- assert((offsHi % sizeof(void*)) == 0);
+ assert(((offsHi - offsLo) % TARGET_POINTER_SIZE) == 0);
+ assert((offsLo % TARGET_POINTER_SIZE) == 0);
+ assert((offsHi % TARGET_POINTER_SIZE) == 0);
emitGCrFrameOffsMin = offsLo;
emitGCrFrameOffsMax = offsHi;
- emitGCrFrameOffsCnt = (offsHi - offsLo) / sizeof(void*);
+ emitGCrFrameOffsCnt = (offsHi - offsLo) / TARGET_POINTER_SIZE;
}
/*****************************************************************************
@@ -2847,7 +2847,7 @@ void emitter::emitDispVarSet()
int of;
bool sp = false;
- for (vn = 0, of = emitGCrFrameOffsMin; vn < emitGCrFrameOffsCnt; vn += 1, of += sizeof(void*))
+ for (vn = 0, of = emitGCrFrameOffsMin; vn < emitGCrFrameOffsCnt; vn += 1, of += TARGET_POINTER_SIZE)
{
if (emitGCrFrameLiveTab[vn])
{
@@ -4528,7 +4528,7 @@ unsigned emitter::emitEndCodeGen(Compiler* comp,
UNATIVE_OFFSET roDataAlignmentDelta = 0;
if (emitConsDsc.dsdOffs)
{
- UNATIVE_OFFSET roDataAlignment = sizeof(void*); // 8 Byte align by default.
+ UNATIVE_OFFSET roDataAlignment = TARGET_POINTER_SIZE; // 8 Byte align by default.
roDataAlignmentDelta = (UNATIVE_OFFSET)ALIGN_UP(emitTotalHotCodeSize, roDataAlignment) - emitTotalHotCodeSize;
assert((roDataAlignmentDelta == 0) || (roDataAlignmentDelta == 4));
}
@@ -4925,7 +4925,7 @@ unsigned emitter::emitEndCodeGen(Compiler* comp,
varPtrDsc** dp;
for (vn = 0, of = emitGCrFrameOffsMin, dp = emitGCrFrameLiveTab; vn < emitGCrFrameOffsCnt;
- vn++, of += sizeof(void*), dp++)
+ vn++, of += TARGET_POINTER_SIZE, dp++)
{
if (*dp)
{
@@ -5459,7 +5459,7 @@ void emitter::emitOutputDataSec(dataSecDsc* sec, BYTE* dst)
{
JITDUMP(" section %u, size %u, block absolute addr\n", secNum++, dscSize);
- assert(dscSize && dscSize % sizeof(BasicBlock*) == 0);
+ assert(dscSize && dscSize % TARGET_POINTER_SIZE == 0);
size_t numElems = dscSize / TARGET_POINTER_SIZE;
BYTE** bDst = (BYTE**)dst;
for (unsigned i = 0; i < numElems; i++)
@@ -5548,14 +5548,14 @@ void emitter::emitGCvarLiveSet(int offs, GCtype gcType, BYTE* addr, ssize_t disp
varPtrDsc* desc;
- assert((abs(offs) % sizeof(ssize_t)) == 0);
+ assert((abs(offs) % TARGET_POINTER_SIZE) == 0);
assert(needsGC(gcType));
/* Compute the index into the GC frame table if the caller didn't do it */
if (disp == -1)
{
- disp = (offs - emitGCrFrameOffsMin) / sizeof(void*);
+ disp = (offs - emitGCrFrameOffsMin) / TARGET_POINTER_SIZE;
}
assert((size_t)disp < emitGCrFrameOffsCnt);
@@ -5645,7 +5645,7 @@ void emitter::emitGCvarDeadSet(int offs, BYTE* addr, ssize_t disp)
if (disp == -1)
{
- disp = (offs - emitGCrFrameOffsMin) / sizeof(void*);
+ disp = (offs - emitGCrFrameOffsMin) / TARGET_POINTER_SIZE;
}
assert((unsigned)disp < emitGCrFrameOffsCnt);
@@ -5898,7 +5898,7 @@ void emitter::emitRecordGCcall(BYTE* codePos, unsigned char callInstrSize)
if (needsGC(gcType))
{
- call->cdArgTable[gcArgs] = i * sizeof(void*);
+ call->cdArgTable[gcArgs] = i * TARGET_POINTER_SIZE;
if (gcType == GCT_BYREF)
{
@@ -6144,7 +6144,7 @@ unsigned char emitter::emitOutputSizeT(BYTE* dst, ssize_t val)
}
#endif // DEBUG
- return sizeof(size_t);
+ return TARGET_POINTER_SIZE;
}
//------------------------------------------------------------------------
@@ -6528,7 +6528,7 @@ void emitter::emitGCvarLiveUpd(int offs, int varNum, GCtype gcType, BYTE* addr)
/* Compute the index into the GC frame table */
- disp = (offs - emitGCrFrameOffsMin) / sizeof(void*);
+ disp = (offs - emitGCrFrameOffsMin) / TARGET_POINTER_SIZE;
assert(disp < emitGCrFrameOffsCnt);
/* If the variable is currently dead, mark it as live */
@@ -6559,7 +6559,7 @@ void emitter::emitGCvarDeadUpd(int offs, BYTE* addr)
/* Compute the index into the GC frame table */
- disp = (offs - emitGCrFrameOffsMin) / sizeof(void*);
+ disp = (offs - emitGCrFrameOffsMin) / TARGET_POINTER_SIZE;
assert(disp < emitGCrFrameOffsCnt);
/* If the variable is currently live, mark it as dead */
diff --git a/src/jit/emitarm.cpp b/src/jit/emitarm.cpp
index a35c72973c..f844888f0a 100644
--- a/src/jit/emitarm.cpp
+++ b/src/jit/emitarm.cpp
@@ -4502,8 +4502,8 @@ void emitter::emitIns_Call(EmitCallType callType,
}
#endif
- assert(argSize % (int)sizeof(void*) == 0);
- argCnt = argSize / (int)sizeof(void*);
+ assert(argSize % (int)REGSIZE_BYTES == 0);
+ argCnt = argSize / (int)REGSIZE_BYTES;
/* Managed RetVal: emit sequence point for the call */
if (emitComp->opts.compDbgInfo && ilOffset != BAD_IL_OFFSET)
@@ -6387,7 +6387,7 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp)
if (emitInsWritesToLclVarStackLoc(id))
{
int varNum = id->idAddr()->iiaLclVar.lvaVarNum();
- unsigned ofs = AlignDown(id->idAddr()->iiaLclVar.lvaOffset(), sizeof(size_t));
+ unsigned ofs = AlignDown(id->idAddr()->iiaLclVar.lvaOffset(), TARGET_POINTER_SIZE);
regNumber regBase;
int adr = emitComp->lvaFrameAddress(varNum, true, &regBase, ofs);
if (id->idGCref() != GCT_NONE)
diff --git a/src/jit/emitarm64.cpp b/src/jit/emitarm64.cpp
index 9b45d3d589..abde69c46a 100644
--- a/src/jit/emitarm64.cpp
+++ b/src/jit/emitarm64.cpp
@@ -7271,7 +7271,7 @@ void emitter::emitIns_Call(EmitCallType callType,
#endif
assert(argSize % REGSIZE_BYTES == 0);
- argCnt = (int)(argSize / (int)sizeof(void*));
+ argCnt = (int)(argSize / (int)REGSIZE_BYTES);
/* Managed RetVal: emit sequence point for the call */
if (emitComp->opts.compDbgInfo && ilOffset != BAD_IL_OFFSET)
@@ -9928,7 +9928,7 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp)
if (emitInsWritesToLclVarStackLoc(id) || emitInsWritesToLclVarStackLocPair(id))
{
int varNum = id->idAddr()->iiaLclVar.lvaVarNum();
- unsigned ofs = AlignDown(id->idAddr()->iiaLclVar.lvaOffset(), sizeof(size_t));
+ unsigned ofs = AlignDown(id->idAddr()->iiaLclVar.lvaOffset(), TARGET_POINTER_SIZE);
bool FPbased;
int adr = emitComp->lvaFrameAddress(varNum, &FPbased);
if (id->idGCref() != GCT_NONE)
@@ -9954,7 +9954,7 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp)
}
if (emitInsWritesToLclVarStackLocPair(id))
{
- unsigned ofs2 = ofs + sizeof(size_t);
+ unsigned ofs2 = ofs + TARGET_POINTER_SIZE;
if (id->idGCrefReg2() != GCT_NONE)
{
emitGCvarLiveUpd(adr + ofs2, varNum, id->idGCrefReg2(), dst);
diff --git a/src/jit/emitxarch.cpp b/src/jit/emitxarch.cpp
index fe46c19886..d2d28a460b 100644
--- a/src/jit/emitxarch.cpp
+++ b/src/jit/emitxarch.cpp
@@ -3679,7 +3679,7 @@ void emitter::emitIns_C(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE fld
if (EA_IS_OFFSET(attr))
{
assert(ins == INS_push);
- sz = 1 + sizeof(void*);
+ sz = 1 + TARGET_POINTER_SIZE;
id = emitNewInstrDsp(EA_1BYTE, offs);
id->idIns(ins);
@@ -3888,7 +3888,7 @@ void emitter::emitIns_R_C(instruction ins, emitAttr attr, regNumber reg, CORINFO
assert(ins == INS_mov && reg == REG_EAX);
// Special case: "mov eax, [addr]" is smaller
- sz = 1 + sizeof(void*);
+ sz = 1 + TARGET_POINTER_SIZE;
}
else
{
@@ -3905,7 +3905,7 @@ void emitter::emitIns_R_C(instruction ins, emitAttr attr, regNumber reg, CORINFO
// instruction.
if (ins == INS_mov && reg == REG_EAX)
{
- sz = 1 + sizeof(void*);
+ sz = 1 + TARGET_POINTER_SIZE;
if (size == EA_2BYTE)
sz += 1;
}
@@ -3979,7 +3979,7 @@ void emitter::emitIns_C_R(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE f
// the instruction.
if (ins == INS_mov && reg == REG_EAX)
{
- sz = 1 + sizeof(void*);
+ sz = 1 + TARGET_POINTER_SIZE;
if (size == EA_2BYTE)
sz += 1;
}
@@ -5291,7 +5291,7 @@ void emitter::emitIns_Call(EmitCallType callType,
//
//
//
- if ((sizeof(void*) + // return address for call
+ if ((TARGET_POINTER_SIZE + // return address for call
emitComp->genStackLevel +
// Current stack level. This gets resetted on every
// localloc and on the prolog (invariant is that
@@ -5301,7 +5301,7 @@ void emitter::emitIns_Call(EmitCallType callType,
// we've consumed more than JIT_RESERVED_STACK bytes
// of stack, which is what the prolog probe covers (in
// addition to the EE requested size)
- (emitComp->compHndBBtabCount * sizeof(void*))
+ (emitComp->compHndBBtabCount * TARGET_POINTER_SIZE)
// Hidden slots for calling finallys
) >= JIT_RESERVED_STACK)
{
@@ -5400,8 +5400,8 @@ void emitter::emitIns_Call(EmitCallType callType,
}
#endif
- assert(argSize % sizeof(void*) == 0);
- argCnt = (int)(argSize / (ssize_t)sizeof(void*)); // we need a signed-divide
+ assert(argSize % REGSIZE_BYTES == 0);
+ argCnt = (int)(argSize / (int)REGSIZE_BYTES); // we need a signed-divide
/* Managed RetVal: emit sequence point for the call */
if (emitComp->opts.compDbgInfo && ilOffset != BAD_IL_OFFSET)
@@ -6395,7 +6395,7 @@ void emitter::emitDispAddrMode(instrDesc* id, bool noDetail)
if (jdsc && !noDetail)
{
- unsigned cnt = (jdsc->dsSize - 1) / sizeof(void*);
+ unsigned cnt = (jdsc->dsSize - 1) / TARGET_POINTER_SIZE;
BasicBlock** bbp = (BasicBlock**)jdsc->dsCont;
#ifdef _TARGET_AMD64_
@@ -8759,7 +8759,7 @@ BYTE* emitter::emitOutputCV(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc)
if (id->idIsDspReloc())
{
- emitRecordRelocation((void*)(dst - sizeof(void*)), target, IMAGE_REL_BASED_MOFFSET);
+ emitRecordRelocation((void*)(dst - TARGET_POINTER_SIZE), target, IMAGE_REL_BASED_MOFFSET);
}
#endif //_TARGET_X86_
@@ -11225,7 +11225,7 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp)
if (ins == INS_sub && id->idInsFmt() == IF_RRW_CNS && id->idReg1() == REG_ESP)
{
assert((size_t)emitGetInsSC(id) < 0x00000000FFFFFFFFLL);
- emitStackPushN(dst, (unsigned)(emitGetInsSC(id) / sizeof(void*)));
+ emitStackPushN(dst, (unsigned)(emitGetInsSC(id) / TARGET_POINTER_SIZE));
}
break;
@@ -11235,7 +11235,7 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp)
{
assert((size_t)emitGetInsSC(id) < 0x00000000FFFFFFFFLL);
emitStackPop(dst, /*isCall*/ false, /*callInstrSize*/ 0,
- (unsigned)(emitGetInsSC(id) / sizeof(void*)));
+ (unsigned)(emitGetInsSC(id) / TARGET_POINTER_SIZE));
}
break;
diff --git a/src/jit/gcencode.cpp b/src/jit/gcencode.cpp
index bdea7f173b..ea95e5cf13 100644
--- a/src/jit/gcencode.cpp
+++ b/src/jit/gcencode.cpp
@@ -1634,7 +1634,7 @@ size_t GCInfo::gcInfoBlockHdrSave(
assert((compiler->compArgSize & 0x3) == 0);
size_t argCount =
- (compiler->compArgSize - (compiler->codeGen->intRegState.rsCalleeRegArgCount * sizeof(void*))) / sizeof(void*);
+ (compiler->compArgSize - (compiler->codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES)) / REGSIZE_BYTES;
assert(argCount <= MAX_USHORT_SIZE_T);
header->argCount = static_cast<unsigned short>(argCount);
@@ -2085,7 +2085,7 @@ unsigned PendingArgsStack::pasEnumGCoffs(unsigned iter, unsigned* offs)
{
unsigned offset;
- offset = (pasDepth - i) * sizeof(void*);
+ offset = (pasDepth - i) * TARGET_POINTER_SIZE;
if (curArg == GCT_BYREF)
offset |= byref_OFFSET_FLAG;
@@ -2110,7 +2110,7 @@ unsigned PendingArgsStack::pasEnumGCoffs(unsigned iter, unsigned* offs)
lvl += i;
unsigned offset;
- offset = lvl * sizeof(void*);
+ offset = lvl * TARGET_POINTER_SIZE;
if (mask & pasByrefBottomMask)
offset |= byref_OFFSET_FLAG;
@@ -2329,7 +2329,7 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest, int mask, const InfoHdr& header, un
// A struct will have gcSlots only if it is at least TARGET_POINTER_SIZE.
if (varDsc->lvType == TYP_STRUCT && varDsc->lvOnFrame && (varDsc->lvExactSize >= TARGET_POINTER_SIZE))
{
- unsigned slots = compiler->lvaLclSize(varNum) / sizeof(void*);
+ unsigned slots = compiler->lvaLclSize(varNum) / TARGET_POINTER_SIZE;
BYTE* gcPtrs = compiler->lvaGetGcLayout(varNum);
// walk each member of the array
@@ -2344,7 +2344,7 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest, int mask, const InfoHdr& header, un
{
assert(pass == 1);
- unsigned offset = varDsc->lvStkOffs + i * sizeof(void*);
+ unsigned offset = varDsc->lvStkOffs + i * TARGET_POINTER_SIZE;
#if DOUBLE_ALIGN
// For genDoubleAlign(), locals are addressed relative to ESP and
// arguments are addressed relative to EBP.
@@ -2489,7 +2489,7 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest, int mask, const InfoHdr& header, un
unsigned begOffs;
unsigned endOffs;
- assert(~OFFSET_MASK % sizeof(void*) == 0);
+ assert(~OFFSET_MASK % TARGET_POINTER_SIZE == 0);
/* Get hold of the variable's stack offset */
@@ -4276,7 +4276,7 @@ void GCInfo::gcMakeRegPtrTable(
// Note that the enregisterable struct types cannot have GC pointers in them.
if ((varDsc->lvType == TYP_STRUCT) && varDsc->lvOnFrame && (varDsc->lvExactSize >= TARGET_POINTER_SIZE))
{
- unsigned slots = compiler->lvaLclSize(varNum) / sizeof(void*);
+ unsigned slots = compiler->lvaLclSize(varNum) / TARGET_POINTER_SIZE;
BYTE* gcPtrs = compiler->lvaGetGcLayout(varNum);
// walk each member of the array
@@ -4287,7 +4287,7 @@ void GCInfo::gcMakeRegPtrTable(
continue;
}
- int offset = varDsc->lvStkOffs + i * sizeof(void*);
+ int offset = varDsc->lvStkOffs + i * TARGET_POINTER_SIZE;
#if DOUBLE_ALIGN
// For genDoubleAlign(), locals are addressed relative to ESP and
// arguments are addressed relative to EBP.
diff --git a/src/jit/gcinfo.cpp b/src/jit/gcinfo.cpp
index 293abd5c3e..f330a86042 100644
--- a/src/jit/gcinfo.cpp
+++ b/src/jit/gcinfo.cpp
@@ -515,7 +515,7 @@ void GCInfo::gcCountForHeader(UNALIGNED unsigned int* untrackedCount, UNALIGNED
}
else if (varDsc->lvType == TYP_STRUCT && varDsc->lvOnFrame && (varDsc->lvExactSize >= TARGET_POINTER_SIZE))
{
- unsigned slots = compiler->lvaLclSize(varNum) / sizeof(void*);
+ unsigned slots = compiler->lvaLclSize(varNum) / TARGET_POINTER_SIZE;
BYTE* gcPtrs = compiler->lvaGetGcLayout(varNum);
// walk each member of the array
diff --git a/src/jit/importer.cpp b/src/jit/importer.cpp
index 07531529dc..23262f372d 100644
--- a/src/jit/importer.cpp
+++ b/src/jit/importer.cpp
@@ -5672,7 +5672,7 @@ void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
GenTreePtr asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
- op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
+ op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
if (varTypeIsStruct(exprToBox))
@@ -6528,9 +6528,9 @@ GenTreePtr Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolve
op1->gtType = TYP_REF; // points at boxed object
FieldSeqNode* firstElemFldSeq =
GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
- op1 =
- gtNewOperNode(GT_ADD, TYP_BYREF, op1,
- new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), firstElemFldSeq));
+ op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
+ new (this, GT_CNS_INT)
+ GenTreeIntCon(TYP_I_IMPL, TARGET_POINTER_SIZE, firstElemFldSeq));
if (varTypeIsStruct(lclTyp))
{
@@ -6586,7 +6586,7 @@ GenTreePtr Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolve
FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
- new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), fldSeq));
+ new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, TARGET_POINTER_SIZE, fldSeq));
}
if (!(access & CORINFO_ACCESS_ADDRESS))
@@ -11291,7 +11291,7 @@ void Compiler::impImportBlockCode(BasicBlock* block)
// remember the element size
if (lclTyp == TYP_REF)
{
- op1->gtIndex.gtIndElemSize = sizeof(void*);
+ op1->gtIndex.gtIndElemSize = TARGET_POINTER_SIZE;
}
else
{
@@ -14686,7 +14686,7 @@ void Compiler::impImportBlockCode(BasicBlock* block)
// UNBOX(exp) morphs into
// clone = pop(exp);
// ((*clone == typeToken) ? nop : helper(clone, typeToken));
- // push(clone + sizeof(void*))
+ // push(clone + TARGET_POINTER_SIZE)
//
GenTreePtr cloneOperand;
op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
@@ -14719,7 +14719,7 @@ void Compiler::impImportBlockCode(BasicBlock* block)
// to the beginning of the value-type. Today this means adjusting
// past the base of the objects vtable field which is pointer sized.
- op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
+ op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
}
else
diff --git a/src/jit/inlinepolicy.cpp b/src/jit/inlinepolicy.cpp
index eaf739612c..2d4ebfd327 100644
--- a/src/jit/inlinepolicy.cpp
+++ b/src/jit/inlinepolicy.cpp
@@ -758,10 +758,8 @@ int DefaultPolicy::DetermineCallsiteNativeSizeEstimate(CORINFO_METHOD_INFO* meth
callsiteSize += 10; // "lea EAX, bword ptr [EBP-14H]"
- // NB sizeof (void*) fails to convey intent when cross-jitting.
-
- unsigned opsz = (unsigned)(roundUp(comp->getClassSize(verType.GetClassHandle()), sizeof(void*)));
- unsigned slots = opsz / sizeof(void*);
+ unsigned opsz = (unsigned)(roundUp(comp->getClassSize(verType.GetClassHandle()), TARGET_POINTER_SIZE));
+ unsigned slots = opsz / TARGET_POINTER_SIZE;
callsiteSize += slots * 20; // "push gword ptr [EAX+offs] "
}
@@ -1578,7 +1576,7 @@ void DiscretionaryPolicy::MethodInfoObservations(CORINFO_METHOD_INFO* methodInfo
const unsigned argCount = args.numArgs;
m_ArgCount = argCount;
- const unsigned pointerSize = sizeof(void*);
+ const unsigned pointerSize = TARGET_POINTER_SIZE;
unsigned i = 0;
// Implicit arguments
diff --git a/src/jit/lclvars.cpp b/src/jit/lclvars.cpp
index d75e81b815..bc2470c71f 100644
--- a/src/jit/lclvars.cpp
+++ b/src/jit/lclvars.cpp
@@ -364,7 +364,7 @@ void Compiler::lvaInitArgs(InitVarDscInfo* varDscInfo)
#endif // FEATURE_FASTTAILCALL
// The total argument size must be aligned.
- noway_assert((compArgSize % sizeof(void*)) == 0);
+ noway_assert((compArgSize % TARGET_POINTER_SIZE) == 0);
#ifdef _TARGET_X86_
/* We can not pass more than 2^16 dwords as arguments as the "ret"
@@ -2263,9 +2263,9 @@ void Compiler::lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool
varDsc->lvExactSize = info.compCompHnd->getClassSize(typeHnd);
size_t lvSize = varDsc->lvSize();
- assert((lvSize % sizeof(void*)) ==
- 0); // The struct needs to be a multiple of sizeof(void*) bytes for getClassGClayout() to be valid.
- varDsc->lvGcLayout = (BYTE*)compGetMem((lvSize / sizeof(void*)) * sizeof(BYTE), CMK_LvaTable);
+ assert((lvSize % TARGET_POINTER_SIZE) ==
+ 0); // The struct needs to be a multiple of TARGET_POINTER_SIZE bytes for getClassGClayout() to be valid.
+ varDsc->lvGcLayout = (BYTE*)compGetMem((lvSize / TARGET_POINTER_SIZE) * sizeof(BYTE), CMK_LvaTable);
unsigned numGCVars;
var_types simdBaseType = TYP_UNKNOWN;
varDsc->lvType = impNormStructType(typeHnd, varDsc->lvGcLayout, &numGCVars, &simdBaseType);
@@ -4756,7 +4756,7 @@ void Compiler::lvaFixVirtualFrameOffsets()
// We need to re-adjust the offsets of the parameters so they are EBP
// relative rather than stack/frame pointer relative
- varDsc->lvStkOffs += (2 * sizeof(void*)); // return address and pushed EBP
+ varDsc->lvStkOffs += (2 * TARGET_POINTER_SIZE); // return address and pushed EBP
noway_assert(varDsc->lvStkOffs >= FIRST_ARG_STACK_OFFS);
}
@@ -4871,10 +4871,10 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs()
/* Update the argOffs to reflect arguments that are passed in registers */
noway_assert(codeGen->intRegState.rsCalleeRegArgCount <= MAX_REG_ARG);
- noway_assert(compArgSize >= codeGen->intRegState.rsCalleeRegArgCount * sizeof(void*));
+ noway_assert(compArgSize >= codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES);
#ifdef _TARGET_X86_
- argOffs -= codeGen->intRegState.rsCalleeRegArgCount * sizeof(void*);
+ argOffs -= codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES;
#endif
#ifndef LEGACY_BACKEND
@@ -4913,13 +4913,13 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs()
if (info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE)
{
noway_assert(lclNum == (unsigned)info.compTypeCtxtArg);
- argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, sizeof(void*),
+ argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, REGSIZE_BYTES,
argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset));
}
if (info.compIsVarArgs)
{
- argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, sizeof(void*),
+ argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, REGSIZE_BYTES,
argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset));
}
@@ -5018,13 +5018,13 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs()
if (info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE)
{
noway_assert(lclNum == (unsigned)info.compTypeCtxtArg);
- argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, sizeof(void*),
+ argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, REGSIZE_BYTES,
argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset));
}
if (info.compIsVarArgs)
{
- argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, sizeof(void*),
+ argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, REGSIZE_BYTES,
argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset));
}
@@ -5201,19 +5201,19 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum,
#if !defined(_TARGET_ARMARCH_)
#if DEBUG
- // TODO: Remove this noway_assert and replace occurrences of sizeof(void *) with argSize
+ // TODO: Remove this noway_assert and replace occurrences of TARGET_POINTER_SIZE with argSize
// Also investigate why we are incrementing argOffs for X86 as this seems incorrect
//
- noway_assert(argSize == sizeof(void*));
+ noway_assert(argSize == TARGET_POINTER_SIZE);
#endif // DEBUG
#endif
#if defined(_TARGET_X86_)
- argOffs += sizeof(void*);
+ argOffs += TARGET_POINTER_SIZE;
#elif defined(_TARGET_AMD64_)
// Register arguments on AMD64 also takes stack space. (in the backing store)
varDsc->lvStkOffs = argOffs;
- argOffs += sizeof(void*);
+ argOffs += TARGET_POINTER_SIZE;
#elif defined(_TARGET_ARM64_)
// Register arguments on ARM64 only take stack space when they have a frame home.
#elif defined(_TARGET_ARM_)
@@ -5471,7 +5471,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
#ifdef _TARGET_XARCH_
// On x86/amd64, the return address has already been pushed by the call instruction in the caller.
- stkOffs -= sizeof(void*); // return address;
+ stkOffs -= TARGET_POINTER_SIZE; // return address;
// TODO-AMD64-CQ: for X64 eventually this should be pushed with all the other
// calleeregs. When you fix this, you'll also need to fix
@@ -6094,9 +6094,9 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
if (lvaOutgoingArgSpaceSize > 0)
{
#if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI) // No 4 slots for outgoing params on System V.
- noway_assert(lvaOutgoingArgSpaceSize >= (4 * sizeof(void*)));
+ noway_assert(lvaOutgoingArgSpaceSize >= (4 * TARGET_POINTER_SIZE));
#endif
- noway_assert((lvaOutgoingArgSpaceSize % sizeof(void*)) == 0);
+ noway_assert((lvaOutgoingArgSpaceSize % TARGET_POINTER_SIZE) == 0);
// Give it a value so we can avoid asserts in CHK builds.
// Since this will always use an SP relative offset of zero
@@ -6125,7 +6125,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
pushedCount += 1; // pushed PC (return address)
#endif
- noway_assert(compLclFrameSize == (unsigned)-(stkOffs + (pushedCount * (int)sizeof(void*))));
+ noway_assert(compLclFrameSize == (unsigned)-(stkOffs + (pushedCount * (int)TARGET_POINTER_SIZE)));
}
int Compiler::lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, int stkOffs)
@@ -6328,11 +6328,11 @@ void Compiler::lvaAlignFrame()
//
bool lclFrameSizeAligned = (compLclFrameSize % sizeof(double)) == 0;
bool regPushedCountAligned = ((compCalleeRegsPushed + genCountBits(codeGen->regSet.rsMaskPreSpillRegs(true))) %
- (sizeof(double) / sizeof(void*))) == 0;
+ (sizeof(double) / TARGET_POINTER_SIZE)) == 0;
if (regPushedCountAligned != lclFrameSizeAligned)
{
- lvaIncrementFrameSize(sizeof(void*));
+ lvaIncrementFrameSize(TARGET_POINTER_SIZE);
}
#elif defined(_TARGET_X86_)
@@ -6345,7 +6345,7 @@ void Compiler::lvaAlignFrame()
if (compLclFrameSize == 0)
{
// This can only happen with JitStress=1 or JitDoubleAlign=2
- lvaIncrementFrameSize(sizeof(void*));
+ lvaIncrementFrameSize(TARGET_POINTER_SIZE);
}
}
#endif
diff --git a/src/jit/morph.cpp b/src/jit/morph.cpp
index c1a79738e8..fbe3758054 100644
--- a/src/jit/morph.cpp
+++ b/src/jit/morph.cpp
@@ -146,7 +146,7 @@ bool Compiler::fgMorphRelopToQmark(GenTreePtr tree)
GenTreePtr Compiler::fgMorphCast(GenTreePtr tree)
{
noway_assert(tree->gtOper == GT_CAST);
- noway_assert(genTypeSize(TYP_I_IMPL) == sizeof(void*));
+ noway_assert(genTypeSize(TYP_I_IMPL) == TARGET_POINTER_SIZE);
/* The first sub-operand is the thing being cast */
@@ -204,7 +204,7 @@ GenTreePtr Compiler::fgMorphCast(GenTreePtr tree)
tree->gtFlags &= ~GTF_UNSIGNED;
}
#else
- if (dstSize < sizeof(void*))
+ if (dstSize < TARGET_POINTER_SIZE)
{
oper = gtNewCastNodeL(TYP_I_IMPL, oper, TYP_I_IMPL);
oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT));
@@ -3957,7 +3957,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
}
else
{
- // If the valuetype size is not a multiple of sizeof(void*),
+ // If the valuetype size is not a multiple of TARGET_POINTER_SIZE,
// we must copyblk to a temp before doing the obj to avoid
// the obj reading memory past the end of the valuetype
CLANG_FORMAT_COMMENT_ANCHOR;
@@ -6371,7 +6371,7 @@ GenTreePtr Compiler::fgMorphStackArgForVarArgs(unsigned lclNum, var_types varTyp
// Create a node representing the local pointing to the base of the args
GenTreePtr ptrArg =
gtNewOperNode(GT_SUB, TYP_I_IMPL, gtNewLclvNode(lvaVarargsBaseOfStkArgs, TYP_I_IMPL),
- gtNewIconNode(varDsc->lvStkOffs - codeGen->intRegState.rsCalleeRegArgCount * sizeof(void*) +
+ gtNewIconNode(varDsc->lvStkOffs - codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES +
lclOffs));
// Access the argument through the local
diff --git a/src/jit/typelist.h b/src/jit/typelist.h
index 2e3667b40d..f7b803fec1 100644
--- a/src/jit/typelist.h
+++ b/src/jit/typelist.h
@@ -5,7 +5,7 @@
#define GCS EA_GCREF
#define BRS EA_BYREF
#define PS EA_PTRSIZE
-#define PST (sizeof(void*) / sizeof(int))
+#define PST (TARGET_POINTER_SIZE / sizeof(int))
#ifdef _TARGET_64BIT_
#define VTF_I32 0