From 4585d5ab0cb0cda434be6cfcb89bd1252d2703b3 Mon Sep 17 00:00:00 2001 From: Egor Chesakov Date: Tue, 27 Mar 2018 14:12:56 -0700 Subject: Add crossbitness support to ClrJit: * Add FEATURE_CROSSBITNESS in crosscomponents.cmake * Exclude mscordaccore mscordbi sos from CLR_CROSS_COMPONENTS_LIST when FEATURE_CROSSBITNESS is defined in crosscomponents.cmake * Introduce target_size_t in src/jit/target.h * Use size_t value in genMov32RelocatableImmediate in src/jit/codegen.h src/jit/codegencommon.cpp * Fix definition/declaration inconsistency for emitter::emitIns_R_I in emitarm.cpp * Zero HiVal when GetTree::SetOper GenTreeLngCon->GetTreeIntCon in src/jit/compiler.hpp * Explicity specify roundUp(expr, TARGET_POINTER_SIZE) * Use target_size_t* target in emitOutputDataSec in src/jit/emit.cpp --- crosscomponents.cmake | 6 +++++- src/jit/codegen.h | 2 +- src/jit/codegencommon.cpp | 7 ++++--- src/jit/compiler.hpp | 9 +++++++++ src/jit/emit.cpp | 6 +++--- src/jit/emitarm.cpp | 2 +- src/jit/lclvars.cpp | 5 +++-- src/jit/target.h | 8 ++++++++ 8 files changed, 34 insertions(+), 11 deletions(-) diff --git a/crosscomponents.cmake b/crosscomponents.cmake index be50ffa9d9..cedb1e2013 100644 --- a/crosscomponents.cmake +++ b/crosscomponents.cmake @@ -1,12 +1,16 @@ add_definitions(-DCROSS_COMPILE) +if(CLR_CMAKE_PLATFORM_ARCH_AMD64 AND CLR_CMAKE_TARGET_ARCH_ARM) + set(FEATURE_CROSSBITNESS 1) +endif(CLR_CMAKE_PLATFORM_ARCH_AMD64 AND CLR_CMAKE_TARGET_ARCH_ARM) + set (CLR_CROSS_COMPONENTS_LIST crossgen clrjit legacyjit ) -if(NOT CLR_CMAKE_PLATFORM_LINUX) +if(NOT CLR_CMAKE_PLATFORM_LINUX AND NOT FEATURE_CROSSBITNESS) list (APPEND CLR_CROSS_COMPONENTS_LIST mscordaccore mscordbi diff --git a/src/jit/codegen.h b/src/jit/codegen.h index 5a086e99fe..f38b4dc13a 100644 --- a/src/jit/codegen.h +++ b/src/jit/codegen.h @@ -361,7 +361,7 @@ protected: void genMov32RelocatableDisplacement(BasicBlock* block, regNumber reg); void genMov32RelocatableDataLabel(unsigned value, regNumber reg); - void genMov32RelocatableImmediate(emitAttr size, unsigned value, regNumber reg); + void genMov32RelocatableImmediate(emitAttr size, size_t value, regNumber reg); bool genUsedPopToReturn; // True if we use the pop into PC to return, // False if we didn't and must branch to LR to return. diff --git a/src/jit/codegencommon.cpp b/src/jit/codegencommon.cpp index 4a479af777..72a91d9a0e 100644 --- a/src/jit/codegencommon.cpp +++ b/src/jit/codegencommon.cpp @@ -5685,7 +5685,8 @@ void CodeGen::genCheckUseBlockInit() { // Var is completely on the stack, in the legacy JIT case, or // on the stack at entry, in the RyuJIT case. - initStkLclCnt += (unsigned)roundUp(compiler->lvaLclSize(varNum)) / sizeof(int); + initStkLclCnt += + (unsigned)roundUp(compiler->lvaLclSize(varNum), TARGET_POINTER_SIZE) / sizeof(int); } } else @@ -5716,7 +5717,7 @@ void CodeGen::genCheckUseBlockInit() { varDsc->lvMustInit = true; - initStkLclCnt += (unsigned)roundUp(compiler->lvaLclSize(varNum)) / sizeof(int); + initStkLclCnt += (unsigned)roundUp(compiler->lvaLclSize(varNum), TARGET_POINTER_SIZE) / sizeof(int); } continue; @@ -6621,7 +6622,7 @@ void CodeGen::genMov32RelocatableDataLabel(unsigned value, regNumber reg) * * Move of relocatable immediate to register */ -void CodeGen::genMov32RelocatableImmediate(emitAttr size, unsigned value, regNumber reg) +void CodeGen::genMov32RelocatableImmediate(emitAttr size, size_t value, regNumber reg) { _ASSERTE(EA_IS_RELOC(size)); diff --git a/src/jit/compiler.hpp b/src/jit/compiler.hpp index ab3f32e548..2b9f3aa94b 100644 --- a/src/jit/compiler.hpp +++ b/src/jit/compiler.hpp @@ -1436,6 +1436,15 @@ inline void GenTree::SetOper(genTreeOps oper, ValueNumberUpdate vnUpdate) assert(GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_SMALL || GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_LARGE); assert(GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_SMALL || (gtDebugFlags & GTF_DEBUG_NODE_LARGE)); +#if defined(_HOST_64BIT_) && !defined(_TARGET_64BIT_) + if (gtOper == GT_CNS_LNG && oper == GT_CNS_INT) + { + // When casting from LONG to INT, we need to force cast of the value, + // if the host architecture represents INT and LONG with the same data size. + gtLngCon.gtLconVal = (INT64)(INT32)gtLngCon.gtLconVal; + } +#endif // defined(_HOST_64BIT_) && !defined(_TARGET_64BIT_) + SetOperRaw(oper); #ifdef DEBUG diff --git a/src/jit/emit.cpp b/src/jit/emit.cpp index 5c70e08e37..7a1d106f17 100644 --- a/src/jit/emit.cpp +++ b/src/jit/emit.cpp @@ -5460,8 +5460,8 @@ void emitter::emitOutputDataSec(dataSecDsc* sec, BYTE* dst) JITDUMP(" section %u, size %u, block absolute addr\n", secNum++, dscSize); assert(dscSize && dscSize % TARGET_POINTER_SIZE == 0); - size_t numElems = dscSize / TARGET_POINTER_SIZE; - BYTE** bDst = (BYTE**)dst; + size_t numElems = dscSize / TARGET_POINTER_SIZE; + target_size_t* bDst = (target_size_t*)dst; for (unsigned i = 0; i < numElems; i++) { BasicBlock* block = ((BasicBlock**)dsc->dsCont)[i]; @@ -5475,7 +5475,7 @@ void emitter::emitOutputDataSec(dataSecDsc* sec, BYTE* dst) #ifdef _TARGET_ARM_ target = (BYTE*)((size_t)target | 1); // Or in thumb bit #endif - bDst[i] = target; + bDst[i] = (target_size_t)target; if (emitComp->opts.compReloc) { emitRecordRelocation(&(bDst[i]), target, IMAGE_REL_BASED_HIGHLOW); diff --git a/src/jit/emitarm.cpp b/src/jit/emitarm.cpp index bcf0cb4f33..ca5575a470 100644 --- a/src/jit/emitarm.cpp +++ b/src/jit/emitarm.cpp @@ -1673,7 +1673,7 @@ void emitter::emitIns_R(instruction ins, emitAttr attr, regNumber reg) */ void emitter::emitIns_R_I( - instruction ins, emitAttr attr, regNumber reg, int imm, insFlags flags /* = INS_FLAGS_DONT_CARE */) + instruction ins, emitAttr attr, regNumber reg, ssize_t imm, insFlags flags /* = INS_FLAGS_DONT_CARE */) { insFormat fmt = IF_NONE; diff --git a/src/jit/lclvars.cpp b/src/jit/lclvars.cpp index de1266ea65..82c37d6bb8 100644 --- a/src/jit/lclvars.cpp +++ b/src/jit/lclvars.cpp @@ -4970,7 +4970,8 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs() argLcls++; // Early out if we can. If size is 8 and base reg is 2, then the mask is 0x1100 - tempMask |= ((((1 << (roundUp(argSize) / REGSIZE_BYTES))) - 1) << lvaTable[preSpillLclNum].lvArgReg); + tempMask |= ((((1 << (roundUp(argSize, TARGET_POINTER_SIZE) / REGSIZE_BYTES))) - 1) + << lvaTable[preSpillLclNum].lvArgReg); if (tempMask == preSpillMask) { // We won't encounter more pre-spilled registers, @@ -7334,7 +7335,7 @@ Compiler::fgWalkResult Compiler::lvaStressLclFldCB(GenTree** pTree, fgWalkData* // Change the variable to a TYP_BLK if (varType != TYP_BLK) { - varDsc->lvExactSize = (unsigned)(roundUp(padding + pComp->lvaLclSize(lclNum))); + varDsc->lvExactSize = (unsigned)(roundUp(padding + pComp->lvaLclSize(lclNum), TARGET_POINTER_SIZE)); varDsc->lvType = TYP_BLK; pComp->lvaSetVarAddrExposed(lclNum); } diff --git a/src/jit/target.h b/src/jit/target.h index 453f8f9e6c..397ecbdb00 100644 --- a/src/jit/target.h +++ b/src/jit/target.h @@ -2394,6 +2394,14 @@ C_ASSERT((RBM_INT_CALLEE_SAVED & RBM_FPBASE) == RBM_NONE); #endif /*****************************************************************************/ +#ifdef _TARGET_64BIT_ +typedef unsigned __int64 target_size_t; +#else +typedef unsigned int target_size_t; +#endif + +C_ASSERT(sizeof(target_size_t) == TARGET_POINTER_SIZE); + /*****************************************************************************/ #endif // _TARGET_H_ /*****************************************************************************/ -- cgit v1.2.3