summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCarol Eidt <carol.eidt@microsoft.com>2018-03-15 08:21:25 -0700
committerCarol Eidt <carol.eidt@microsoft.com>2018-04-17 16:15:14 -0700
commit32480530dbcd0936e4292bdd50e5a94002f8dba2 (patch)
tree333c33b54dce6addf25ec10419e7eb8ed742e05f
parent963e5a9f08ed7c664036ef9b7050374b66f71d64 (diff)
downloadcoreclr-32480530dbcd0936e4292bdd50e5a94002f8dba2.tar.gz
coreclr-32480530dbcd0936e4292bdd50e5a94002f8dba2.tar.bz2
coreclr-32480530dbcd0936e4292bdd50e5a94002f8dba2.zip
Unix/x64 ABI cleanup
Eliminate `FEATURE_UNIX_AMD64_STRUCT_PASSING` and replace it with `UNIX_AMD64_ABI` when used alone. Both are currently defined; it is highly unlikely the latter will work alone; and it significantly clutters up the code, especially the JIT. Also, fix the altjit support (now `UNIX_AMD64_ABI_ITF`) to *not* call `ClassifyEightBytes` if the struct is too large. Otherwise it asserts.
-rw-r--r--clrdefinitions.cmake5
-rw-r--r--src/debug/daccess/nidump.cpp4
-rw-r--r--src/jit/CMakeLists.txt2
-rw-r--r--src/jit/armelnonjit/CMakeLists.txt1
-rw-r--r--src/jit/codegen.h4
-rw-r--r--src/jit/codegencommon.cpp66
-rw-r--r--src/jit/codegenxarch.cpp44
-rw-r--r--src/jit/compiler.cpp14
-rw-r--r--src/jit/compiler.h29
-rw-r--r--src/jit/compiler.hpp4
-rw-r--r--src/jit/ee_il_dll.cpp8
-rw-r--r--src/jit/emit.cpp4
-rw-r--r--src/jit/emitxarch.cpp4
-rw-r--r--src/jit/gentree.cpp16
-rw-r--r--src/jit/gentree.h6
-rw-r--r--src/jit/importer.cpp34
-rw-r--r--src/jit/jit.h26
-rw-r--r--src/jit/lclvars.cpp58
-rw-r--r--src/jit/linuxnonjit/CMakeLists.txt1
-rw-r--r--src/jit/lower.cpp16
-rw-r--r--src/jit/lsra.h4
-rw-r--r--src/jit/lsrabuild.cpp8
-rw-r--r--src/jit/lsraxarch.cpp8
-rw-r--r--src/jit/morph.cpp173
-rw-r--r--src/jit/protononjit/CMakeLists.txt1
-rw-r--r--src/jit/scopeinfo.cpp4
-rw-r--r--src/jit/target.h10
-rw-r--r--src/vm/amd64/calldescrworkeramd64.S4
-rw-r--r--src/vm/amd64/cgenamd64.cpp6
-rw-r--r--src/vm/argdestination.h6
-rw-r--r--src/vm/callhelpers.cpp8
-rw-r--r--src/vm/callingconvention.h58
-rw-r--r--src/vm/class.h22
-rw-r--r--src/vm/class.inl4
-rw-r--r--src/vm/comdelegate.cpp26
-rw-r--r--src/vm/fcall.h6
-rw-r--r--src/vm/jitinterface.cpp24
-rw-r--r--src/vm/method.cpp4
-rw-r--r--src/vm/methodtable.cpp4
-rw-r--r--src/vm/methodtable.h26
-rw-r--r--src/vm/methodtablebuilder.cpp16
-rw-r--r--src/vm/methodtablebuilder.h4
-rw-r--r--src/vm/object.cpp10
-rw-r--r--src/vm/siginfo.cpp4
-rw-r--r--src/vm/threads.h2
-rw-r--r--src/vm/threadsuspend.cpp8
46 files changed, 393 insertions, 403 deletions
diff --git a/clrdefinitions.cmake b/clrdefinitions.cmake
index d7583809a2..06a2df17e4 100644
--- a/clrdefinitions.cmake
+++ b/clrdefinitions.cmake
@@ -200,13 +200,14 @@ add_definitions(-DFEATURE_SYMDIFF)
add_definitions(-DFEATURE_TIERED_COMPILATION)
if (CLR_CMAKE_PLATFORM_ARCH_AMD64)
# Enable the AMD64 Unix struct passing JIT-EE interface for all AMD64 platforms, to enable altjit.
- add_definitions(-DFEATURE_UNIX_AMD64_STRUCT_PASSING_ITF)
+ add_definitions(-DUNIX_AMD64_ABI_ITF)
endif (CLR_CMAKE_PLATFORM_ARCH_AMD64)
if(CLR_CMAKE_PLATFORM_UNIX_AMD64)
add_definitions(-DFEATURE_MULTIREG_RETURN)
+ add_definitions(-DUNIX_AMD64_ABI)
endif (CLR_CMAKE_PLATFORM_UNIX_AMD64)
if(CLR_CMAKE_PLATFORM_UNIX AND CLR_CMAKE_TARGET_ARCH_AMD64)
- add_definitions(-DFEATURE_UNIX_AMD64_STRUCT_PASSING)
+ add_definitions(-DUNIX_AMD64_ABI)
endif(CLR_CMAKE_PLATFORM_UNIX AND CLR_CMAKE_TARGET_ARCH_AMD64)
add_definitions(-DFEATURE_USE_ASM_GC_WRITE_BARRIERS)
if(CLR_CMAKE_PLATFORM_ARCH_AMD64 OR (CLR_CMAKE_PLATFORM_ARCH_ARM64 AND NOT WIN32))
diff --git a/src/debug/daccess/nidump.cpp b/src/debug/daccess/nidump.cpp
index e948687c0e..0cd03ae944 100644
--- a/src/debug/daccess/nidump.cpp
+++ b/src/debug/daccess/nidump.cpp
@@ -5530,9 +5530,9 @@ NativeImageDumper::EnumMnemonics s_MTFlagsLow[] =
#if defined(FEATURE_HFA)
MTFLAG_ENTRY(IsHFA),
#endif // FEATURE_HFA
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
MTFLAG_ENTRY(IsRegStructPassed),
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
MTFLAG_ENTRY(IsByRefLike),
MTFLAG_ENTRY(UNUSED_ComponentSize_5),
MTFLAG_ENTRY(UNUSED_ComponentSize_6),
diff --git a/src/jit/CMakeLists.txt b/src/jit/CMakeLists.txt
index 8d87336948..ed25156eb1 100644
--- a/src/jit/CMakeLists.txt
+++ b/src/jit/CMakeLists.txt
@@ -394,7 +394,7 @@ endif ()
if ((CLR_CMAKE_PLATFORM_ARCH_I386 OR CLR_CMAKE_PLATFORM_ARCH_AMD64) AND WIN32)
# On Windows, build altjit that targets the Linux ABI:
# On x86, build Linux/x86 altjit. This enables UNIX_X86_ABI.
- # On amd64, build Linux/AMD64 altjit. This enables UNIX_AMD64_ABI and FEATURE_UNIX_AMD64_STRUCT_PASSING.
+ # On amd64, build Linux/AMD64 altjit. This enables UNIX_AMD64_ABI.
add_subdirectory(linuxnonjit)
endif ()
diff --git a/src/jit/armelnonjit/CMakeLists.txt b/src/jit/armelnonjit/CMakeLists.txt
index 9ee4b8ffc0..6bfde4c8fa 100644
--- a/src/jit/armelnonjit/CMakeLists.txt
+++ b/src/jit/armelnonjit/CMakeLists.txt
@@ -32,7 +32,6 @@ if (NOT WIN32)
remove_definitions(-DUNIX_X86_ABI)
elseif(CLR_CMAKE_PLATFORM_ARCH_AMD64)
remove_definitions(-DUNIX_AMD64_ABI)
- remove_definitions(-DFEATURE_UNIX_AMD64_STRUCT_PASSING)
else()
clr_unknown_arch()
endif()
diff --git a/src/jit/codegen.h b/src/jit/codegen.h
index f38b4dc13a..d67ad1dca7 100644
--- a/src/jit/codegen.h
+++ b/src/jit/codegen.h
@@ -306,9 +306,9 @@ protected:
void genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbered, RegState* regState);
void genEnregisterIncomingStackArgs();
void genCheckUseBlockInit();
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) && defined(FEATURE_SIMD)
+#if defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
void genClearStackVec3ArgUpperBits();
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING && FEATURE_SIMD
+#endif // UNIX_AMD64_ABI && FEATURE_SIMD
#if defined(_TARGET_ARM64_)
bool genInstrWithConstant(instruction ins,
diff --git a/src/jit/codegencommon.cpp b/src/jit/codegencommon.cpp
index 99902dc7bc..f17d2a804d 100644
--- a/src/jit/codegencommon.cpp
+++ b/src/jit/codegencommon.cpp
@@ -4302,9 +4302,9 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere
struct regArgElem
{
unsigned varNum; // index into compiler->lvaTable[] for this register argument
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
var_types type; // the Jit type of this regArgTab entry
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
unsigned trashBy; // index into this regArgTab[] table of the register that will be copied to this register.
// That is, for regArgTab[x].trashBy = y, argument register number 'y' will be copied to
// argument register number 'x'. Only used when circular = true.
@@ -4315,7 +4315,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere
bool processed; // true after we've processed the argument (and it is in its final location)
bool circular; // true if this register participates in a circular dependency loop.
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// For UNIX AMD64 struct passing, the type of the register argument slot can differ from
// the type of the lclVar in ways that are not ascertainable from lvType.
@@ -4326,7 +4326,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere
return type; // UNIX_AMD64 implementation
}
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // !UNIX_AMD64_ABI
// In other cases, we simply use the type of the lclVar to determine the type of the register.
var_types getRegType(Compiler* compiler)
@@ -4340,7 +4340,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere
return varDsc.lvType;
}
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // !UNIX_AMD64_ABI
} regArgTab[max(MAX_REG_ARG + 1, MAX_FLOAT_REG_ARG)] = {};
unsigned varNum;
@@ -4403,9 +4403,9 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere
regType = varDsc->GetHfaType();
}
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
if (!varTypeIsStruct(regType))
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
{
// A struct might be passed partially in XMM register for System V calls.
// So a single arg might use both register files.
@@ -4417,7 +4417,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere
int slots = 0;
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
if (varTypeIsStruct(varDsc))
{
CORINFO_CLASS_HANDLE typeHnd = varDsc->lvVerTypeInfo.GetClassHandle();
@@ -4502,7 +4502,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere
regArgNum = firstRegSlot;
}
else
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
{
// Bingo - add it to our table
regArgNum = genMapRegNumToRegArgNum(varDsc->lvArgReg, regType);
@@ -4512,10 +4512,10 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere
// register)
noway_assert(regArgTab[regArgNum].slot == 0);
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// Set the register type.
regArgTab[regArgNum].type = regType;
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
regArgTab[regArgNum].varNum = varNum;
regArgTab[regArgNum].slot = 1;
@@ -4584,14 +4584,14 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere
regType = regArgTab[regArgNum + i].getRegType(compiler);
regNumber regNum = genMapRegArgNumToRegNum(regArgNum + i, regType);
-#if !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if !defined(UNIX_AMD64_ABI)
// lvArgReg could be INT or FLOAT reg. So the following assertion doesn't hold.
// The type of the register depends on the classification of the first eightbyte
// of the struct. For information on classification refer to the System V x86_64 ABI at:
// http://www.x86-64.org/documentation/abi.pdf
assert((i > 0) || (regNum == varDsc->lvArgReg));
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
// Is the arg dead on entry to the method ?
if ((regArgMaskLive & genRegMask(regNum)) == 0)
@@ -4810,7 +4810,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere
{
emitAttr size;
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// If this is the wrong register file, just continue.
if (regArgTab[argNum].type == TYP_UNDEF)
{
@@ -4819,7 +4819,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere
// The next register file processing will process it.
continue;
}
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
// If the arg is dead on entry to the method, skip it
if (regArgTab[argNum].processed)
@@ -4887,9 +4887,9 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere
// Must be <= MAX_PASS_MULTIREG_BYTES or else it wouldn't be passed in registers
noway_assert(varDsc->lvSize() <= MAX_PASS_MULTIREG_BYTES);
#endif // FEATURE_MULTIREG_ARGS
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
storeType = regArgTab[argNum].type;
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // !UNIX_AMD64_ABI
if (varDsc->lvIsHfaRegArg())
{
#ifdef _TARGET_ARM_
@@ -4926,13 +4926,13 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere
getEmitter()->emitIns_S_R(ins_Store(storeType), size, srcRegNum, varNum, baseOffset);
-#ifndef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifndef UNIX_AMD64_ABI
// Check if we are writing past the end of the struct
if (varTypeIsStruct(varDsc))
{
assert(varDsc->lvSize() >= baseOffset + (unsigned)size);
}
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // !UNIX_AMD64_ABI
if (regArgTab[argNum].slot == 1)
{
@@ -4965,7 +4965,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere
if (doingFloat)
{
-#if defined(FEATURE_HFA) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(FEATURE_HFA) || defined(UNIX_AMD64_ABI)
insCopy = ins_Copy(TYP_DOUBLE);
// Compute xtraReg here when we have a float argument
assert(xtraReg == REG_NA);
@@ -4976,9 +4976,9 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere
#if defined(FEATURE_HFA)
fpAvailMask &= RBM_ALLDOUBLE;
#else
-#if !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if !defined(UNIX_AMD64_ABI)
#error Error. Wrong architecture.
-#endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // !defined(UNIX_AMD64_ABI)
#endif // defined(FEATURE_HFA)
if (fpAvailMask == RBM_NONE)
@@ -4987,9 +4987,9 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere
#if defined(FEATURE_HFA)
fpAvailMask &= RBM_ALLDOUBLE;
#else
-#if !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if !defined(UNIX_AMD64_ABI)
#error Error. Wrong architecture.
-#endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // !defined(UNIX_AMD64_ABI)
#endif // defined(FEATURE_HFA)
}
@@ -5251,7 +5251,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere
var_types regType = regArgTab[argNum].getRegType(compiler);
regNumber regNum = genMapRegArgNumToRegNum(argNum, regType);
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
if (regType == TYP_UNDEF)
{
// This could happen if the reg in regArgTab[argNum] is of the other register file -
@@ -5260,7 +5260,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere
regArgMaskLive &= ~genRegMask(regNum);
continue;
}
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
noway_assert(varDsc->lvIsParam && varDsc->lvIsRegArg);
#ifndef _TARGET_64BIT_
@@ -5348,7 +5348,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere
destRegNum = REG_NEXT(varDsc->lvRegNum);
}
#endif // !_TARGET_64BIT_
-#if (defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) || defined(_TARGET_ARM64_)) && defined(FEATURE_SIMD)
+#if (defined(UNIX_AMD64_ABI) || defined(_TARGET_ARM64_)) && defined(FEATURE_SIMD)
else
{
assert(regArgTab[argNum].slot == 2);
@@ -5359,7 +5359,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere
noway_assert(regNum != destRegNum);
continue;
}
-#endif // (defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) || defined(_TARGET_ARM64_)) && defined(FEATURE_SIMD)
+#endif // (defined(UNIX_AMD64_ABI) || defined(_TARGET_ARM64_)) && defined(FEATURE_SIMD)
noway_assert(destRegNum != REG_NA);
if (destRegNum != regNum)
{
@@ -5415,7 +5415,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere
argRegCount = 2;
}
#endif
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) && defined(FEATURE_SIMD)
+#if defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
if (varTypeIsStruct(varDsc) && argNum < (argMax - 1) && regArgTab[argNum + 1].slot == 2)
{
argRegCount = 2;
@@ -5429,7 +5429,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere
// but mark argNum as processed and clear regNum from the live mask.
destRegNum = regNum;
}
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) && defined(FEATURE_SIMD)
+#endif // defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
#if defined(_TARGET_ARM64_) && defined(FEATURE_SIMD)
if (varTypeIsSIMD(varDsc) && argNum < (argMax - 1) && regArgTab[argNum + 1].slot == 2)
{
@@ -9421,7 +9421,7 @@ void CodeGen::genFnProlog()
getEmitter()->emitMarkPrologEnd();
}
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) && defined(FEATURE_SIMD)
+#if defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
// The unused bits of Vector3 arguments must be cleared
// since native compiler doesn't initize the upper bits to zeros.
//
@@ -9429,7 +9429,7 @@ void CodeGen::genFnProlog()
// genFnPrologCalleeRegArgs() for argument registers and
// genEnregisterIncomingStackArgs() for stack arguments.
genClearStackVec3ArgUpperBits();
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING && FEATURE_SIMD
+#endif // UNIX_AMD64_ABI && FEATURE_SIMD
/*-----------------------------------------------------------------------------
* Take care of register arguments first
@@ -11591,7 +11591,7 @@ instruction CodeGen::genMapShiftInsToShiftByConstantIns(instruction ins, int shi
//
unsigned CodeGen::getFirstArgWithStackSlot()
{
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) || defined(_TARGET_ARMARCH_)
+#if defined(UNIX_AMD64_ABI) || defined(_TARGET_ARMARCH_)
unsigned baseVarNum = 0;
#if defined(FEATURE_UNIX_AMR64_STRUCT_PASSING)
baseVarNum = compiler->lvaFirstStackIncomingArgNum;
diff --git a/src/jit/codegenxarch.cpp b/src/jit/codegenxarch.cpp
index a6dc3b57f5..e677923863 100644
--- a/src/jit/codegenxarch.cpp
+++ b/src/jit/codegenxarch.cpp
@@ -1065,12 +1065,12 @@ bool CodeGen::isStructReturn(GenTree* treeNode)
return false;
}
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
return varTypeIsStruct(treeNode);
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // !UNIX_AMD64_ABI
assert(!varTypeIsStruct(treeNode));
return false;
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
}
//------------------------------------------------------------------------
@@ -1089,7 +1089,7 @@ void CodeGen::genStructReturn(GenTree* treeNode)
assert(treeNode->OperGet() == GT_RETURN);
GenTree* op1 = treeNode->gtGetOp1();
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
if (op1->OperGet() == GT_LCL_VAR)
{
GenTreeLclVarCommon* lclVar = op1->AsLclVarCommon();
@@ -1963,7 +1963,7 @@ void CodeGen::genMultiRegCallStoreToLocal(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_STORE_LCL_VAR);
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// Structs of size >=9 and <=16 are returned in two return registers on x64 Unix.
assert(varTypeIsStruct(treeNode));
@@ -2121,9 +2121,9 @@ void CodeGen::genMultiRegCallStoreToLocal(GenTree* treeNode)
}
varDsc->lvRegNum = REG_STK;
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING && !_TARGET_X86_
+#else // !UNIX_AMD64_ABI && !_TARGET_X86_
assert(!"Unreached");
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING && !_TARGET_X86_
+#endif // !UNIX_AMD64_ABI && !_TARGET_X86_
}
//------------------------------------------------------------------------
@@ -3208,7 +3208,7 @@ void CodeGen::genStructPutArgRepMovs(GenTreePutArgStk* putArgNode)
// must be cleared to zeroes. The native compiler doesn't clear the upper bits
// and there is no way to know if the caller is native or not. So, the upper
// 32 bits of Vector argument on stack are always cleared to zero.
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) && defined(FEATURE_SIMD)
+#if defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
void CodeGen::genClearStackVec3ArgUpperBits()
{
#ifdef DEBUG
@@ -3252,7 +3252,7 @@ void CodeGen::genClearStackVec3ArgUpperBits()
}
}
}
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) && defined(FEATURE_SIMD)
+#endif // defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
#endif // FEATURE_PUT_STRUCT_ARG_STK
// Generate code for CpObj nodes wich copy structs that have interleaved
@@ -4972,7 +4972,7 @@ void CodeGen::genCallInstruction(GenTreeCall* call)
continue;
}
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// Deal with multi register passed struct args.
if (argNode->OperGet() == GT_FIELD_LIST)
{
@@ -5009,7 +5009,7 @@ void CodeGen::genCallInstruction(GenTreeCall* call)
}
}
else
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
{
regNumber argReg = curArgTabEntry->regNum;
genConsumeReg(argNode);
@@ -5033,7 +5033,7 @@ void CodeGen::genCallInstruction(GenTreeCall* call)
#endif // FEATURE_VARARG
}
-#if defined(_TARGET_X86_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(_TARGET_X86_) || defined(UNIX_AMD64_ABI)
// The call will pop its arguments.
// for each putarg_stk:
ssize_t stackArgBytes = 0;
@@ -5065,7 +5065,7 @@ void CodeGen::genCallInstruction(GenTreeCall* call)
}
args = args->gtOp.gtOp2;
}
-#endif // defined(_TARGET_X86_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(_TARGET_X86_) || defined(UNIX_AMD64_ABI)
// Insert a null check on "this" pointer if asked.
if (call->NeedsNullCheck())
@@ -5623,7 +5623,7 @@ void CodeGen::genJmpMethod(GenTree* jmp)
continue;
}
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
if (varTypeIsStruct(varDsc))
{
CORINFO_CLASS_HANDLE typeHnd = varDsc->lvVerTypeInfo.GetClassHandle();
@@ -5669,7 +5669,7 @@ void CodeGen::genJmpMethod(GenTree* jmp)
}
}
else
-#endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // !defined(UNIX_AMD64_ABI)
{
// Register argument
noway_assert(isRegParamType(genActualType(varDsc->TypeGet())));
@@ -7442,12 +7442,12 @@ unsigned CodeGen::getBaseVarForPutArgStk(GenTree* treeNode)
LclVarDsc* varDsc = &(compiler->lvaTable[baseVarNum]);
assert(varDsc != nullptr);
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
assert(!varDsc->lvIsRegArg && varDsc->lvArgReg == REG_STK);
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // !UNIX_AMD64_ABI
// On Windows this assert is always true. The first argument will always be in REG_ARG_0 or REG_FLTARG_0.
assert(varDsc->lvIsRegArg && (varDsc->lvArgReg == REG_ARG_0 || varDsc->lvArgReg == REG_FLTARG_0));
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // !UNIX_AMD64_ABI
#endif // !DEBUG
}
else
@@ -7890,7 +7890,7 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* putArgStk)
{
unsigned baseVarNum = getBaseVarForPutArgStk(putArgStk);
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
if (varTypeIsStruct(targetType))
{
@@ -7900,7 +7900,7 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* putArgStk)
m_stkArgVarNum = BAD_VAR_NUM;
return;
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
noway_assert(targetType != TYP_STRUCT);
@@ -7947,9 +7947,9 @@ void CodeGen::genPutArgReg(GenTreeOp* tree)
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->gtRegNum;
-#ifndef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifndef UNIX_AMD64_ABI
assert(targetType != TYP_STRUCT);
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // !UNIX_AMD64_ABI
GenTree* op1 = tree->gtOp1;
genConsumeReg(op1);
diff --git a/src/jit/compiler.cpp b/src/jit/compiler.cpp
index 0719857564..8f43bcd086 100644
--- a/src/jit/compiler.cpp
+++ b/src/jit/compiler.cpp
@@ -765,7 +765,7 @@ var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
}
assert(structSize > 0);
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// An 8-byte struct may need to be passed in a floating point register
// So we always consult the struct "Classifier" routine
@@ -829,7 +829,7 @@ var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
else // Not an HFA struct type
{
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// The case of (structDesc.eightByteCount == 1) should have already been handled
if (structDesc.eightByteCount > 1)
@@ -982,7 +982,7 @@ var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
}
assert(structSize > 0);
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// An 8-byte struct may need to be returned in a floating point register
// So we always consult the struct "Classifier" routine
@@ -1016,7 +1016,7 @@ var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
}
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#ifdef _TARGET_64BIT_
// Note this handles an odd case when FEATURE_MULTIREG_RET is disabled and HFAs are enabled
@@ -1064,7 +1064,7 @@ var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
else // Not an HFA struct type
{
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// The case of (structDesc.eightByteCount == 1) should have already been handled
if (structDesc.eightByteCount > 1)
@@ -6962,7 +6962,7 @@ START:
return result;
}
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// GetTypeFromClassificationAndSizes:
// Returns the type of the eightbyte accounting for the classification and size of the eightbyte.
@@ -7151,7 +7151,7 @@ void Compiler::GetStructTypeOffset(CORINFO_CLASS_HANDLE typeHnd,
GetStructTypeOffset(structDesc, type0, type1, offset0, offset1);
}
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
/*****************************************************************************/
/*****************************************************************************/
diff --git a/src/jit/compiler.h b/src/jit/compiler.h
index ebaf58e3c6..ea29fd4c58 100644
--- a/src/jit/compiler.h
+++ b/src/jit/compiler.h
@@ -1204,13 +1204,13 @@ struct FuncInfoDsc
struct fgArgTabEntry
{
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
fgArgTabEntry()
{
otherRegNum = REG_NA;
isStruct = false; // is this a struct arg
}
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
GenTree* node; // Initially points at the Op1 field of 'parent', but if the argument is replaced with an GT_ASG or
// placeholder
@@ -1242,7 +1242,7 @@ struct fgArgTabEntry
bool isNonStandard : 1; // True if it is an arg that is passed in a reg other than a standard arg reg, or is forced
// to be on the stack despite its arg list position.
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
bool isStruct : 1; // True if this is a struct arg
regNumber otherRegNum; // The (second) register to use when passing this argument.
@@ -1337,7 +1337,7 @@ public:
fgArgTabEntry* AddRegArg(
unsigned argNum, GenTree* node, GenTree* parent, regNumber regNum, unsigned numRegs, unsigned alignment);
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
fgArgTabEntry* AddRegArg(unsigned argNum,
GenTree* node,
GenTree* parent,
@@ -1347,13 +1347,13 @@ public:
const bool isStruct,
const regNumber otherRegNum = REG_NA,
const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr = nullptr);
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
fgArgTabEntry* AddStkArg(unsigned argNum,
GenTree* node,
GenTree* parent,
unsigned numSlots,
- unsigned alignment FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(const bool isStruct));
+ unsigned alignment UNIX_AMD64_ABI_ONLY_ARG(const bool isStruct));
void RemorphReset();
fgArgTabEntry* RemorphRegArg(
@@ -2433,10 +2433,10 @@ public:
unsigned short lvaTrackedCount; // actual # of locals being tracked
unsigned lvaTrackedCountInSizeTUnits; // min # of size_t's sufficient to hold a bit for all the locals being tracked
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// Only for AMD64 System V cache the first caller stack homed argument.
unsigned lvaFirstStackIncomingArgNum; // First argument with stack slot in the caller.
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // !UNIX_AMD64_ABI
#ifdef DEBUG
VARSET_TP lvaTrackedVars; // set of tracked variables
@@ -4586,8 +4586,7 @@ public:
bool fgCastNeeded(GenTree* tree, var_types toType);
GenTree* fgDoNormalizeOnStore(GenTree* tree);
- GenTree* fgMakeTmpArgNode(
- unsigned tmpVarNum FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(const bool passedInRegisters));
+ GenTree* fgMakeTmpArgNode(unsigned tmpVarNum UNIX_AMD64_ABI_ONLY_ARG(const bool passedInRegisters));
// The following check for loops that don't execute calls
bool fgLoopCallMarked;
@@ -4925,7 +4924,7 @@ private:
void fgMakeOutgoingStructArgCopy(GenTreeCall* call,
GenTree* args,
unsigned argIndex,
- CORINFO_CLASS_HANDLE copyBlkClass FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(
+ CORINFO_CLASS_HANDLE copyBlkClass UNIX_AMD64_ABI_ONLY_ARG(
const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structDescPtr));
void fgFixupStructReturn(GenTree* call);
@@ -7018,7 +7017,7 @@ public:
bool eeTryResolveToken(CORINFO_RESOLVED_TOKEN* resolvedToken);
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
#ifdef DEBUG
static void dumpSystemVClassificationType(SystemVClassificationType ct);
#endif // DEBUG
@@ -7026,7 +7025,7 @@ public:
void eeGetSystemVAmd64PassStructInRegisterDescriptor(
/*IN*/ CORINFO_CLASS_HANDLE structHnd,
/*OUT*/ SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structPassInRegDescPtr);
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
template <typename ParamType>
bool eeRunWithErrorTrap(void (*function)(ParamType*), ParamType* param)
@@ -9726,7 +9725,7 @@ public:
static HelperCallProperties s_helperCallProperties;
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
static var_types GetTypeFromClassificationAndSizes(SystemVClassificationType classType, int size);
static var_types GetEightByteType(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc,
unsigned slotNum);
@@ -9744,7 +9743,7 @@ public:
unsigned __int8* offset1);
void fgMorphSystemVStructArgs(GenTreeCall* call, bool hasStructArgument);
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
void fgMorphMultiregStructArgs(GenTreeCall* call);
GenTree* fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntryPtr);
diff --git a/src/jit/compiler.hpp b/src/jit/compiler.hpp
index d2a11db234..d77173c15a 100644
--- a/src/jit/compiler.hpp
+++ b/src/jit/compiler.hpp
@@ -2390,10 +2390,10 @@ inline
if (lvaDoneFrameLayout > REGALLOC_FRAME_LAYOUT && !varDsc->lvOnFrame)
{
#ifdef _TARGET_AMD64_
-#ifndef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifndef UNIX_AMD64_ABI
// On amd64, every param has a stack location, except on Unix-like systems.
assert(varDsc->lvIsParam);
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#elif !defined(LEGACY_BACKEND)
// For !LEGACY_BACKEND on other targets, a stack parameter that is enregistered or prespilled
// for profiling on ARM will have a stack location.
diff --git a/src/jit/ee_il_dll.cpp b/src/jit/ee_il_dll.cpp
index f7ea585948..b1321bcc25 100644
--- a/src/jit/ee_il_dll.cpp
+++ b/src/jit/ee_il_dll.cpp
@@ -431,7 +431,7 @@ unsigned Compiler::eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO*
// to accommodate irregular sized structs, they are passed byref
CLANG_FORMAT_COMMENT_ANCHOR;
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
CORINFO_CLASS_HANDLE argClass;
CorInfoType argTypeJit = strip(info.compCompHnd->getArgType(sig, list, &argClass));
var_types argType = JITtype2varType(argTypeJit);
@@ -440,7 +440,7 @@ unsigned Compiler::eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO*
unsigned structSize = info.compCompHnd->getClassSize(argClass);
return structSize; // TODO: roundUp() needed here?
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
return TARGET_POINTER_SIZE;
#else // !_TARGET_AMD64_
@@ -1158,7 +1158,7 @@ int Compiler::eeGetJitDataOffs(CORINFO_FIELD_HANDLE field)
* ICorStaticInfo wrapper functions
*/
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
#ifdef DEBUG
void Compiler::dumpSystemVClassificationType(SystemVClassificationType ct)
@@ -1224,7 +1224,7 @@ void Compiler::eeGetSystemVAmd64PassStructInRegisterDescriptor(
#endif // DEBUG
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
bool Compiler::eeTryResolveToken(CORINFO_RESOLVED_TOKEN* resolvedToken)
{
diff --git a/src/jit/emit.cpp b/src/jit/emit.cpp
index 32524ea5ed..487e849011 100644
--- a/src/jit/emit.cpp
+++ b/src/jit/emit.cpp
@@ -5843,9 +5843,9 @@ void emitter::emitRecordGCcall(BYTE* codePos, unsigned char callInstrSize)
call->cdByrefRegs = (regMaskSmall)emitThisByrefRegs;
#if EMIT_TRACK_STACK_DEPTH
-#ifndef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifndef UNIX_AMD64_ABI
noway_assert(FitsIn<USHORT>(emitCurStackLvl / ((unsigned)sizeof(unsigned))));
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#endif
// Append the call descriptor to the list */
diff --git a/src/jit/emitxarch.cpp b/src/jit/emitxarch.cpp
index 3d6342ee1b..5a0bdd31f3 100644
--- a/src/jit/emitxarch.cpp
+++ b/src/jit/emitxarch.cpp
@@ -11975,7 +11975,7 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp)
byrefRegs |= RBM_EAX;
}
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// If is a multi-register return method is called, mark RDX appropriately (for System V AMD64).
if (id->idIsLargeCall())
{
@@ -11989,7 +11989,7 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp)
byrefRegs |= RBM_RDX;
}
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
// If the GC register set has changed, report the new set
if (gcrefRegs != emitThisGCrefRegs)
diff --git a/src/jit/gentree.cpp b/src/jit/gentree.cpp
index d3234036e3..61ccb66888 100644
--- a/src/jit/gentree.cpp
+++ b/src/jit/gentree.cpp
@@ -288,13 +288,13 @@ void GenTree::InitNodeSize()
CLANG_FORMAT_COMMENT_ANCHOR;
// clang-format off
-#if defined(FEATURE_HFA) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(FEATURE_HFA) || defined(UNIX_AMD64_ABI)
// On ARM32, ARM64 and System V for struct returning
// there is code that does GT_ASG-tree.CopyObj call.
// CopyObj is a large node and the GT_ASG is small, which triggers an exception.
GenTree::s_gtNodeSizes[GT_ASG] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_RETURN] = TREE_NODE_SZ_LARGE;
-#endif // defined(FEATURE_HFA) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(FEATURE_HFA) || defined(UNIX_AMD64_ABI)
GenTree::s_gtNodeSizes[GT_CALL] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_CAST] = TREE_NODE_SZ_LARGE;
@@ -5423,7 +5423,7 @@ bool GenTree::IsAddWithI32Const(GenTree** addr, int* offset)
// When FEATURE_MULTIREG_ARGS is defined we can get here with GT_OBJ tree.
// This happens when we have a struct that is passed in multiple registers.
//
-// Also note that when FEATURE_UNIX_AMD64_STRUCT_PASSING is defined the GT_LDOBJ
+// Also note that when UNIX_AMD64_ABI is defined the GT_LDOBJ
// later gets converted to a GT_FIELD_LIST with two GT_LCL_FLDs in Lower/LowerXArch.
//
@@ -12242,12 +12242,12 @@ void Compiler::gtGetLateArgMsg(
if (curArgTabEntry->numRegs >= 2)
{
regNumber otherRegNum;
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
assert(curArgTabEntry->numRegs == 2);
otherRegNum = curArgTabEntry->otherRegNum;
#else
otherRegNum = (regNumber)(((unsigned)curArgTabEntry->regNum) + curArgTabEntry->numRegs - 1);
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
if (listCount == -1)
{
@@ -18401,7 +18401,7 @@ void ReturnTypeDesc::InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HA
{
assert(varTypeIsStruct(returnType));
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
comp->eeGetSystemVAmd64PassStructInRegisterDescriptor(retClsHnd, &structDesc);
@@ -18432,7 +18432,7 @@ void ReturnTypeDesc::InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HA
//
NYI("Unsupported TARGET returning a TYP_STRUCT in InitializeStructReturnType");
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
break; // for case SPK_ByValue
}
@@ -18508,7 +18508,7 @@ regNumber ReturnTypeDesc::GetABIReturnReg(unsigned idx)
regNumber resultReg = REG_NA;
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
var_types regType0 = GetReturnRegType(0);
if (idx == 0)
diff --git a/src/jit/gentree.h b/src/jit/gentree.h
index 8b9198fd13..a35e75a6c8 100644
--- a/src/jit/gentree.h
+++ b/src/jit/gentree.h
@@ -5667,7 +5667,7 @@ struct GenTreeCopyOrReload : public GenTreeUnOp
{
assert(OperGet() == from->OperGet());
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
gtOtherRegs[i] = from->gtOtherRegs[i];
@@ -5978,7 +5978,7 @@ inline bool GenTree::IsValidCallArgument()
#else // we have RyuJIT backend and FEATURE_MULTIREG_ARGS or FEATURE_PUT_STRUCT_ARG_STK
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// For UNIX ABI we currently only allow a GT_FIELD_LIST of GT_LCL_FLDs nodes
GenTree* gtListPtr = this;
while (gtListPtr != nullptr)
@@ -5997,7 +5997,7 @@ inline bool GenTree::IsValidCallArgument()
}
gtListPtr = gtListPtr->MoveNext();
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
// Note that for non-UNIX ABI the GT_FIELD_LIST may contain any node
//
diff --git a/src/jit/importer.cpp b/src/jit/importer.cpp
index 182a049820..6383ba51b3 100644
--- a/src/jit/importer.cpp
+++ b/src/jit/importer.cpp
@@ -1090,7 +1090,7 @@ GenTree* Compiler::impAssignStructPtr(GenTree* destAddr,
GenTree* dest = nullptr;
unsigned destFlags = 0;
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
// TODO-ARM-BUG: Does ARM need this?
// TODO-ARM64-BUG: Does ARM64 need this?
@@ -1099,7 +1099,7 @@ GenTree* Compiler::impAssignStructPtr(GenTree* destAddr,
src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
(src->TypeGet() != TYP_STRUCT &&
(GenTree::OperIsSIMD(src->gtOper) || src->OperIsSimdHWIntrinsic() || src->gtOper == GT_LCL_FLD)));
-#else // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#else // !defined(UNIX_AMD64_ABI)
assert(varTypeIsStruct(src));
assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
@@ -1107,7 +1107,7 @@ GenTree* Compiler::impAssignStructPtr(GenTree* destAddr,
src->gtOper == GT_COMMA ||
(src->TypeGet() != TYP_STRUCT &&
(GenTree::OperIsSIMD(src->gtOper) || src->OperIsSimdHWIntrinsic() || src->gtOper == GT_LCL_FLD)));
-#endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // !defined(UNIX_AMD64_ABI)
if (destAddr->OperGet() == GT_ADDR)
{
GenTree* destNode = destAddr->gtGetOp1();
@@ -1187,7 +1187,7 @@ GenTree* Compiler::impAssignStructPtr(GenTree* destAddr,
// but that method has not been updadted to include ARM.
impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
lcl->gtFlags |= GTF_DONT_CSE;
-#elif defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#elif defined(UNIX_AMD64_ABI)
// Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
assert(!src->gtCall.IsVarargs() && "varargs not allowed for System V OSs.");
@@ -8473,7 +8473,7 @@ GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HAN
retTypeDesc->InitializeStructReturnType(this, retClsHnd);
#endif // FEATURE_MULTIREG_RET
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
assert(!call->IsVarargs() && "varargs not allowed for System V OSs.");
@@ -8513,7 +8513,7 @@ GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HAN
call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
}
-#else // not FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // not UNIX_AMD64_ABI
// Check for TYP_STRUCT type that wraps a primitive type
// Such structs are returned using a single register
@@ -8562,7 +8562,7 @@ GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HAN
#endif // FEATURE_MULTIREG_RET
}
-#endif // not FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // not UNIX_AMD64_ABI
return call;
}
@@ -8580,7 +8580,7 @@ GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE re
#if defined(_TARGET_XARCH_)
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// No VarArgs for CoreCLR on x64 Unix
assert(!info.compIsVarArgs);
@@ -8610,9 +8610,9 @@ GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE re
return impAssignMultiRegTypeToVar(op, retClsHnd);
}
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // !UNIX_AMD64_ABI
assert(info.compRetNativeType != TYP_STRUCT);
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // !UNIX_AMD64_ABI
#elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
@@ -12536,14 +12536,14 @@ void Compiler::impImportBlockCode(BasicBlock* block)
if (varTypeIsStruct(op1))
{
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// Non-calls, such as obj or ret_expr, have to go through this.
// Calls with large struct return value have to go through this.
// Helper calls with small struct return value also have to go
// through this since they do not follow Unix calling convention.
if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
op1->AsCall()->gtCallType == CT_HELPER)
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
{
op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
}
@@ -15984,7 +15984,7 @@ bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE&
(unsigned)CHECK_SPILL_ALL);
}
-#if defined(_TARGET_ARM_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(_TARGET_ARM_) || defined(UNIX_AMD64_ABI)
#if defined(_TARGET_ARM_)
// TODO-ARM64-NYI: HFA
// TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
@@ -15992,7 +15992,7 @@ bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE&
if (IsHfa(retClsHnd))
{
// Same as !IsHfa but just don't bother with impAssignStructPtr.
-#else // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#else // defined(UNIX_AMD64_ABI)
ReturnTypeDesc retTypeDesc;
retTypeDesc.InitializeStructReturnType(this, retClsHnd);
unsigned retRegCount = retTypeDesc.GetReturnRegCount();
@@ -16005,7 +16005,7 @@ bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE&
assert(retRegCount == MAX_RET_REG_COUNT);
// Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
CLANG_FORMAT_COMMENT_ANCHOR;
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
if (fgNeedReturnSpillTemp())
{
@@ -16013,11 +16013,11 @@ bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE&
{
#if defined(_TARGET_ARM_)
impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
-#else // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#else // defined(UNIX_AMD64_ABI)
// The inlinee compiler has figured out the type of the temp already. Use it here.
impInlineInfo->retExpr =
gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
}
}
else
diff --git a/src/jit/jit.h b/src/jit/jit.h
index 1f28ae44ad..b077c4d8f8 100644
--- a/src/jit/jit.h
+++ b/src/jit/jit.h
@@ -264,22 +264,22 @@
#define INDEBUG_LDISASM_COMMA(x)
#endif
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
-#define FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(x) , x
-#define FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY(x) x
-#else // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
-#define FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(x)
-#define FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY(x)
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
-
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) || (!defined(_TARGET_64BIT_) && !defined(LEGACY_BACKEND))
+#if defined(UNIX_AMD64_ABI)
+#define UNIX_AMD64_ABI_ONLY_ARG(x) , x
+#define UNIX_AMD64_ABI_ONLY(x) x
+#else // !defined(UNIX_AMD64_ABI)
+#define UNIX_AMD64_ABI_ONLY_ARG(x)
+#define UNIX_AMD64_ABI_ONLY(x)
+#endif // defined(UNIX_AMD64_ABI)
+
+#if defined(UNIX_AMD64_ABI) || (!defined(_TARGET_64BIT_) && !defined(LEGACY_BACKEND))
#define FEATURE_PUT_STRUCT_ARG_STK 1
#define PUT_STRUCT_ARG_STK_ONLY_ARG(x) , x
#define PUT_STRUCT_ARG_STK_ONLY(x) x
-#else // !(defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)|| (!defined(_TARGET_64BIT_) && !defined(LEGACY_BACKEND)))
+#else // !(defined(UNIX_AMD64_ABI)|| (!defined(_TARGET_64BIT_) && !defined(LEGACY_BACKEND)))
#define PUT_STRUCT_ARG_STK_ONLY_ARG(x)
#define PUT_STRUCT_ARG_STK_ONLY(x)
-#endif // !(defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)|| (!defined(_TARGET_64BIT_) && !defined(LEGACY_BACKEND)))
+#endif // !(defined(UNIX_AMD64_ABI)|| (!defined(_TARGET_64BIT_) && !defined(LEGACY_BACKEND)))
#if defined(UNIX_AMD64_ABI)
#define UNIX_AMD64_ABI_ONLY_ARG(x) , x
@@ -293,11 +293,11 @@
#define MULTIREG_HAS_SECOND_GC_RET 1
#define MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(x) , x
#define MULTIREG_HAS_SECOND_GC_RET_ONLY(x) x
-#else // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#else // !defined(UNIX_AMD64_ABI)
#define MULTIREG_HAS_SECOND_GC_RET 0
#define MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(x)
#define MULTIREG_HAS_SECOND_GC_RET_ONLY(x)
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
// To get rid of warning 4701 : local variable may be used without being initialized
#define DUMMY_INIT(x) (x)
diff --git a/src/jit/lclvars.cpp b/src/jit/lclvars.cpp
index 765a2eee3f..d5ed2a5ddc 100644
--- a/src/jit/lclvars.cpp
+++ b/src/jit/lclvars.cpp
@@ -79,9 +79,9 @@ void Compiler::lvaInit()
lvaSIMDInitTempVarNum = BAD_VAR_NUM;
#endif // FEATURE_SIMD
lvaCurEpoch = 0;
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
lvaFirstStackIncomingArgNum = BAD_VAR_NUM;
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
}
/*****************************************************************************/
@@ -686,7 +686,7 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo)
}
}
#else // !_TARGET_ARM_
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
if (varTypeIsStruct(argType))
{
@@ -725,7 +725,7 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo)
}
}
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#endif // !_TARGET_ARM_
// The final home for this incoming register might be our local stack frame.
@@ -734,13 +734,13 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo)
bool canPassArgInRegisters = false;
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
if (varTypeIsStruct(argType))
{
canPassArgInRegisters = structDesc.passedInRegisters;
}
else
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
{
canPassArgInRegisters = varDscInfo->canEnreg(argType, cSlotsToEnregister);
}
@@ -758,7 +758,7 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo)
varDsc->lvOtherArgReg = REG_NA;
#endif // FEATURE_MULTIREG_ARGS
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
unsigned secondAllocatedRegArgNum = 0;
var_types firstEightByteType = TYP_UNDEF;
var_types secondEightByteType = TYP_UNDEF;
@@ -772,7 +772,7 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo)
}
}
else
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
{
firstAllocatedRegArgNum = varDscInfo->allocRegArg(argType, cSlots);
}
@@ -791,7 +791,7 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo)
#if FEATURE_MULTIREG_ARGS
if (varTypeIsStruct(argType))
{
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
varDsc->lvArgReg = genMapRegArgNumToRegNum(firstAllocatedRegArgNum, firstEightByteType);
// If there is a second eightbyte, get a register for it too and map the arg to the reg number.
@@ -815,7 +815,7 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo)
varDsc->addPrefReg(genRegMask(varDsc->lvOtherArgReg), this);
}
#endif // _TARGET_ARM64_
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
}
else
#endif // FEATURE_MULTIREG_ARGS
@@ -838,7 +838,7 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo)
{
printf("Arg #%u passed in register(s) ", varDscInfo->varNum);
bool isFloat = false;
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// In case of one eightbyte struct the type is already normalized earlier.
// The varTypeIsFloating(argType) is good for this case.
if (varTypeIsStruct(argType) && (structDesc.eightByteCount >= 1))
@@ -846,13 +846,13 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo)
isFloat = varTypeIsFloating(firstEightByteType);
}
else
-#else // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#else
{
isFloat = varTypeIsFloating(argType);
}
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // !UNIX_AMD64_ABI
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
if (varTypeIsStruct(argType))
{
// Print both registers, just to be clear
@@ -879,7 +879,7 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo)
}
}
else
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
{
unsigned regArgNum = genMapRegNumToRegArgNum(varDsc->lvArgReg, argType);
@@ -952,14 +952,14 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo)
#endif // FEATURE_FASTTAILCALL
}
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// The arg size is returning the number of bytes of the argument. For a struct it could return a size not a
// multiple of TARGET_POINTER_SIZE. The stack allocated space should always be multiple of TARGET_POINTER_SIZE,
// so round it up.
compArgSize += (unsigned)roundUp(argSize, TARGET_POINTER_SIZE);
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // !UNIX_AMD64_ABI
compArgSize += argSize;
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // !UNIX_AMD64_ABI
if (info.compIsVarArgs || isHfaArg || isSoftFPPreSpill)
{
#if defined(_TARGET_X86_)
@@ -2239,7 +2239,7 @@ bool Compiler::lvaIsMultiregStruct(LclVarDsc* varDsc)
return true;
}
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) || defined(_TARGET_ARM64_)
+#if defined(UNIX_AMD64_ABI) || defined(_TARGET_ARM64_)
if (howToPassStruct == SPK_ByValue)
{
assert(type == TYP_STRUCT);
@@ -3500,7 +3500,7 @@ const size_t LclVarDsc::lvArgStackSize() const
#if defined(WINDOWS_AMD64_ABI)
// Structs are either passed by reference or can be passed by value using one pointer
stackSize = TARGET_POINTER_SIZE;
-#elif defined(_TARGET_ARM64_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#elif defined(_TARGET_ARM64_) || defined(UNIX_AMD64_ABI)
// lvSize performs a roundup.
stackSize = this->lvSize();
@@ -3513,12 +3513,12 @@ const size_t LclVarDsc::lvArgStackSize() const
}
#endif // defined(_TARGET_ARM64_)
-#else // !_TARGET_ARM64_ !WINDOWS_AMD64_ABI !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // !_TARGET_ARM64_ !WINDOWS_AMD64_ABI !UNIX_AMD64_ABI
NYI("Unsupported target.");
unreached();
-#endif // !_TARGET_ARM64_ !WINDOWS_AMD64_ABI !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // !_TARGET_ARM64_ !WINDOWS_AMD64_ABI !UNIX_AMD64_ABI
}
else
{
@@ -3537,12 +3537,12 @@ var_types LclVarDsc::lvaArgType()
var_types type = TypeGet();
#ifdef _TARGET_AMD64_
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
if (type == TYP_STRUCT)
{
NYI("lvaArgType");
}
-#else //! FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else //! UNIX_AMD64_ABI
if (type == TYP_STRUCT)
{
switch (lvExactSize)
@@ -3581,7 +3581,7 @@ var_types LclVarDsc::lvaArgType()
break;
}
}
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // !UNIX_AMD64_ABI
#elif defined(_TARGET_ARM64_)
if (type == TYP_STRUCT)
{
@@ -3815,10 +3815,10 @@ void Compiler::lvaMarkLclRefs(GenTree* tree)
#endif // ASSERTION_PROP
bool allowStructs = false;
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// On System V the type of the var could be a struct type.
allowStructs = varTypeIsStruct(varDsc);
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
/* Variables must be used as the same type throughout the method */
noway_assert(tiVerificationNeeded || varDsc->lvType == TYP_UNDEF || tree->gtType == TYP_UNKNOWN || allowStructs ||
@@ -5012,11 +5012,11 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs()
{
unsigned argumentSize = eeGetArgSize(argLst, &info.compMethodInfo->args);
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// On the stack frame the homed arg always takes a full number of slots
// for proper stack alignment. Make sure the real struct size is properly rounded up.
argumentSize = (unsigned)roundUp(argumentSize, TARGET_POINTER_SIZE);
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
argOffs =
lvaAssignVirtualFrameOffsetToArg(lclNum++, argumentSize, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset));
diff --git a/src/jit/linuxnonjit/CMakeLists.txt b/src/jit/linuxnonjit/CMakeLists.txt
index feec36b8af..eb6f6c145c 100644
--- a/src/jit/linuxnonjit/CMakeLists.txt
+++ b/src/jit/linuxnonjit/CMakeLists.txt
@@ -16,7 +16,6 @@ if (CLR_CMAKE_PLATFORM_ARCH_I386)
set(JIT_ARCH_ALTJIT_SOURCES ${JIT_I386_SOURCES})
elseif(CLR_CMAKE_PLATFORM_ARCH_AMD64)
add_definitions(-DUNIX_AMD64_ABI)
- add_definitions(-DFEATURE_UNIX_AMD64_STRUCT_PASSING)
set(JIT_ARCH_ALTJIT_SOURCES ${JIT_AMD64_SOURCES})
else()
clr_unknown_arch()
diff --git a/src/jit/lower.cpp b/src/jit/lower.cpp
index 3f609e70e4..928a4e76fd 100644
--- a/src/jit/lower.cpp
+++ b/src/jit/lower.cpp
@@ -978,12 +978,12 @@ void Lowering::ReplaceArgWithPutArgOrBitcast(GenTree** argSlot, GenTree* putArgO
// call, arg, and info must be non-null.
//
// Notes:
-// For System V systems with native struct passing (i.e. FEATURE_UNIX_AMD64_STRUCT_PASSING defined)
+// For System V systems with native struct passing (i.e. UNIX_AMD64_ABI defined)
// this method allocates a single GT_PUTARG_REG for 1 eightbyte structs and a GT_FIELD_LIST of two GT_PUTARG_REGs
// for two eightbyte structs.
//
// For STK passed structs the method generates GT_PUTARG_STK tree. For System V systems with native struct passing
-// (i.e. FEATURE_UNIX_AMD64_STRUCT_PASSING defined) this method also sets the GC pointers count and the pointers
+// (i.e. UNIX_AMD64_ABI defined) this method also sets the GC pointers count and the pointers
// layout object, so the codegen of the GT_PUTARG_STK could use this for optimizing copying to the stack by value.
// (using block copy primitives for non GC pointers and a single TARGET_POINTER_SIZE copy with recording GC info.)
//
@@ -997,7 +997,7 @@ GenTree* Lowering::NewPutArg(GenTreeCall* call, GenTree* arg, fgArgTabEntry* inf
bool updateArgTable = true;
bool isOnStack = true;
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
if (varTypeIsStruct(type))
{
isOnStack = !info->structDesc.passedInRegisters;
@@ -1006,9 +1006,9 @@ GenTree* Lowering::NewPutArg(GenTreeCall* call, GenTree* arg, fgArgTabEntry* inf
{
isOnStack = info->regNum == REG_STK;
}
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // !UNIX_AMD64_ABI
isOnStack = info->regNum == REG_STK;
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // !UNIX_AMD64_ABI
#ifdef _TARGET_ARMARCH_
// Mark contained when we pass struct
@@ -1088,7 +1088,7 @@ GenTree* Lowering::NewPutArg(GenTreeCall* call, GenTree* arg, fgArgTabEntry* inf
{
if (!isOnStack)
{
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
if (info->isStruct)
{
// The following code makes sure a register passed struct arg is moved to
@@ -1203,7 +1203,7 @@ GenTree* Lowering::NewPutArg(GenTreeCall* call, GenTree* arg, fgArgTabEntry* inf
}
}
else
-#else // not defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#else // not defined(UNIX_AMD64_ABI)
#if FEATURE_MULTIREG_ARGS
if ((info->numRegs > 1) && (arg->OperGet() == GT_FIELD_LIST))
{
@@ -1245,7 +1245,7 @@ GenTree* Lowering::NewPutArg(GenTreeCall* call, GenTree* arg, fgArgTabEntry* inf
}
else
#endif // FEATURE_MULTIREG_ARGS
-#endif // not defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // not defined(UNIX_AMD64_ABI)
{
putArg = comp->gtNewPutArgReg(type, arg, info->regNum);
}
diff --git a/src/jit/lsra.h b/src/jit/lsra.h
index 22e3ead7ec..6710782e83 100644
--- a/src/jit/lsra.h
+++ b/src/jit/lsra.h
@@ -1086,13 +1086,13 @@ private:
void buildUpperVectorRestoreRefPositions(GenTree* tree, LsraLocation currentLoc, VARSET_VALARG_TP liveLargeVectors);
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// For AMD64 on SystemV machines. This method
// is called as replacement for raUpdateRegStateForArg
// that is used on Windows. On System V systems a struct can be passed
// partially using registers from the 2 register files.
void unixAmd64UpdateRegStateForArg(LclVarDsc* argDsc);
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
// Update reg state for an incoming register argument
void updateRegStateForArg(LclVarDsc* argDsc);
diff --git a/src/jit/lsrabuild.cpp b/src/jit/lsrabuild.cpp
index 195d1e8b11..988728da7c 100644
--- a/src/jit/lsrabuild.cpp
+++ b/src/jit/lsrabuild.cpp
@@ -1950,7 +1950,7 @@ void LinearScan::insertZeroInitRefPositions()
}
}
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
//------------------------------------------------------------------------
// unixAmd64UpdateRegStateForArg: Sets the register state for an argument of type STRUCT for System V systems.
//
@@ -1996,7 +1996,7 @@ void LinearScan::unixAmd64UpdateRegStateForArg(LclVarDsc* argDsc)
}
}
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
//------------------------------------------------------------------------
// updateRegStateForArg: Updates rsCalleeRegArgMaskLiveIn for the appropriate
@@ -2019,7 +2019,7 @@ void LinearScan::unixAmd64UpdateRegStateForArg(LclVarDsc* argDsc)
//
void LinearScan::updateRegStateForArg(LclVarDsc* argDsc)
{
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// For System V AMD64 calls the argDsc can have 2 registers (for structs.)
// Handle them here.
if (varTypeIsStruct(argDsc))
@@ -2027,7 +2027,7 @@ void LinearScan::updateRegStateForArg(LclVarDsc* argDsc)
unixAmd64UpdateRegStateForArg(argDsc);
}
else
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
{
RegState* intRegState = &compiler->codeGen->intRegState;
RegState* floatRegState = &compiler->codeGen->floatRegState;
diff --git a/src/jit/lsraxarch.cpp b/src/jit/lsraxarch.cpp
index f72403d0e3..f7bdf80616 100644
--- a/src/jit/lsraxarch.cpp
+++ b/src/jit/lsraxarch.cpp
@@ -1047,7 +1047,7 @@ void LinearScan::BuildCall(GenTreeCall* call)
HandleFloatVarArgs(call, argNode, &callHasFloatRegArgs);
appendLocationInfoToList(argNode);
}
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
else if (argNode->OperGet() == GT_FIELD_LIST)
{
for (GenTreeFieldList* entry = argNode->AsFieldList(); entry != nullptr; entry = entry->Rest())
@@ -1058,7 +1058,7 @@ void LinearScan::BuildCall(GenTreeCall* call)
appendLocationInfoToList(entry->Current());
}
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#ifdef DEBUG
// In DEBUG only, check validity with respect to the arg table entry.
@@ -1085,7 +1085,7 @@ void LinearScan::BuildCall(GenTreeCall* call)
#endif // FEATURE_PUT_STRUCT_ARG_STK
continue;
}
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
if (argNode->OperGet() == GT_FIELD_LIST)
{
assert(argNode->isContained());
@@ -1101,7 +1101,7 @@ void LinearScan::BuildCall(GenTreeCall* call)
}
}
else
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
{
const regNumber argReg = curArgTabEntry->regNum;
assert(argNode->gtRegNum == argReg);
diff --git a/src/jit/morph.cpp b/src/jit/morph.cpp
index 866bd8c7b2..dc6c546736 100644
--- a/src/jit/morph.cpp
+++ b/src/jit/morph.cpp
@@ -1218,7 +1218,7 @@ fgArgTabEntry* fgArgInfo::AddRegArg(
return curArgTabEntry;
}
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
fgArgTabEntry* fgArgInfo::AddRegArg(unsigned argNum,
GenTree* node,
GenTree* parent,
@@ -1247,26 +1247,26 @@ fgArgTabEntry* fgArgInfo::AddRegArg(unsigned
return curArgTabEntry;
}
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
fgArgTabEntry* fgArgInfo::AddStkArg(unsigned argNum,
GenTree* node,
GenTree* parent,
unsigned numSlots,
- unsigned alignment FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(const bool isStruct))
+ unsigned alignment UNIX_AMD64_ABI_ONLY_ARG(const bool isStruct))
{
fgArgTabEntry* curArgTabEntry = new (compiler, CMK_fgArgInfo) fgArgTabEntry;
nextSlotNum = (unsigned)roundUp(nextSlotNum, alignment);
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// The node of the ArgTabEntry could change after remorphing - it could be rewritten to a cpyblk or a
// PlaceHolder node (in case of needed late argument, for example.)
// This reqires using of an extra flag. At creation time the state is right, so
// and this assert enforces that.
assert((varTypeIsStruct(node) && isStruct) || (!varTypeIsStruct(node) && !isStruct));
curArgTabEntry->isStruct = isStruct; // is this a struct arg
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
curArgTabEntry->argNum = argNum;
curArgTabEntry->node = node;
@@ -1536,7 +1536,7 @@ void fgArgInfo::ArgsComplete()
#endif
else // we have a register argument, next we look for a struct type.
{
- if (varTypeIsStruct(argx) FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY(|| curArgTabEntry->isStruct))
+ if (varTypeIsStruct(argx) UNIX_AMD64_ABI_ONLY(|| curArgTabEntry->isStruct))
{
hasStructRegArg = true;
}
@@ -2163,8 +2163,7 @@ void fgArgInfo::Dump(Compiler* compiler)
// Return Value:
// the newly created temp var tree.
-GenTree* Compiler::fgMakeTmpArgNode(
- unsigned tmpVarNum FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(const bool passedInRegisters))
+GenTree* Compiler::fgMakeTmpArgNode(unsigned tmpVarNum UNIX_AMD64_ABI_ONLY_ARG(const bool passedInRegisters))
{
LclVarDsc* varDsc = &lvaTable[tmpVarNum];
assert(varDsc->lvIsTemp);
@@ -2179,11 +2178,11 @@ GenTree* Compiler::fgMakeTmpArgNode(
#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) || (!defined(LEGACY_BACKEND) && defined(_TARGET_ARM_))
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
arg->gtFlags |= GTF_DONT_CSE;
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // !UNIX_AMD64_ABI
// Can this type be passed in a single register?
// If so, the following call will return the corresponding primitive type.
// Otherwise, it will return TYP_UNKNOWN and we will pass by reference.
@@ -2198,7 +2197,7 @@ GenTree* Compiler::fgMakeTmpArgNode(
passedInRegisters = true;
type = structBaseType;
}
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // !UNIX_AMD64_ABI
// If it is passed in registers, don't get the address of the var. Make it a
// field instead. It will be loaded in registers with putarg_reg tree in lower.
@@ -2209,7 +2208,7 @@ GenTree* Compiler::fgMakeTmpArgNode(
}
else
{
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// TODO-Cleanup: Fix this - we should never have an address that is TYP_STRUCT.
var_types addrType = type;
#else
@@ -2224,7 +2223,7 @@ GenTree* Compiler::fgMakeTmpArgNode(
if (lvaIsMultiregStruct(varDsc))
{
// ToDo-ARM64: Consider using: arg->ChangeOper(GT_LCL_FLD);
- // as that is how FEATURE_UNIX_AMD64_STRUCT_PASSING works.
+ // as that is how UNIX_AMD64_ABI works.
// We will create a GT_OBJ for the argument below.
// This will be passed by value in two registers.
assert(addrNode != nullptr);
@@ -2309,8 +2308,8 @@ void fgArgInfo::EvalArgsToTemps()
{
// Create a copy of the temp to go into the late argument list
tmpVarNum = curArgTabEntry->tmpNum;
- defArg = compiler->fgMakeTmpArgNode(tmpVarNum FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(
- argTable[curInx]->structDesc.passedInRegisters));
+ defArg = compiler->fgMakeTmpArgNode(
+ tmpVarNum UNIX_AMD64_ABI_ONLY_ARG(argTable[curInx]->structDesc.passedInRegisters));
// mark the original node as a late argument
argx->gtFlags |= GTF_LATE_ARG;
@@ -2329,7 +2328,7 @@ void fgArgInfo::EvalArgsToTemps()
}
#endif
-#if defined(_TARGET_AMD64_) && !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI)
noway_assert(argx->gtType != TYP_STRUCT);
#endif
@@ -2481,13 +2480,13 @@ void fgArgInfo::EvalArgsToTemps()
// For a struct type we also need to record the class handle of the arg.
CORINFO_CLASS_HANDLE clsHnd = NO_CLASS_HANDLE;
-#if defined(_TARGET_AMD64_) && !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI)
// All structs are either passed (and retyped) as integral types, OR they
// are passed by reference.
noway_assert(argx->gtType != TYP_STRUCT);
-#else // !defined(_TARGET_AMD64_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#else // !defined(_TARGET_AMD64_) || defined(UNIX_AMD64_ABI)
if (varTypeIsStruct(defArg))
{
@@ -2516,7 +2515,7 @@ void fgArgInfo::EvalArgsToTemps()
}
}
-#endif // !(defined(_TARGET_AMD64_) && !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING))
+#endif // !(defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI))
setupArg = compiler->gtNewArgPlaceHolderNode(defArg->gtType, clsHnd);
@@ -2814,7 +2813,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
bool callIsVararg = call->IsVarargs();
#endif
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// If fgMakeOutgoingStructArgCopy is called and copies are generated, hasStackArgCopy is set
// to make sure to call EvalArgsToTemp. fgMakeOutgoingStructArgCopy just marks the argument
// to need a temp variable, and EvalArgsToTemp actually creates the temp variable node.
@@ -3194,10 +3193,10 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
/* this is a register argument - put it in the table */
call->fgArgInfo->AddRegArg(argIndex, argx, nullptr, genMapIntRegArgNumToRegNum(intArgRegNum), 1, 1
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
,
false, REG_STK, nullptr
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
);
}
// this can't be a struct.
@@ -3301,9 +3300,9 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
#endif // _TARGET_ARM_
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
bool hasStructArgument = false; // @TODO-ARM64-UNIX: Remove this bool during a future refactoring
// hasMultiregStructArgs is true if there are any structs that are eligible for passing
@@ -3479,7 +3478,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
if (reMorphing)
{
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// Get the struct description for the already completed struct argument.
fgArgTabEntry* fgEntryPtr = gtArgEntryByNode(call, argx);
assert(fgEntryPtr != nullptr);
@@ -3494,7 +3493,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
{
structDesc.CopyFrom(fgEntryPtr->structDesc);
}
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
assert(argEntry != nullptr);
if (argEntry->IsBackFilled())
@@ -3531,7 +3530,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
if (argx->IsArgPlaceHolderNode() || (!isStructArg))
{
#if defined(_TARGET_AMD64_)
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
if (!isStructArg)
{
size = 1; // On AMD64, all primitives fit in a single (64-bit) 'slot'
@@ -3547,9 +3546,9 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
hasMultiregStructArgs = true;
}
}
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // !UNIX_AMD64_ABI
size = 1; // On AMD64, all primitives fit in a single (64-bit) 'slot'
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#elif defined(_TARGET_ARM64_)
if (isStructArg)
{
@@ -3618,7 +3617,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
isStructArg = true;
}
#ifdef _TARGET_AMD64_
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
if (varTypeIsStruct(argx))
{
size = info.compCompHnd->getClassSize(impGetRefAnyClass());
@@ -3627,7 +3626,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
eeGetSystemVAmd64PassStructInRegisterDescriptor(impGetRefAnyClass(), &structDesc);
}
else
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
{
size = 1;
}
@@ -3657,9 +3656,9 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
}
CORINFO_CLASS_HANDLE objClass = argObj->gtObj.gtClass;
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
eeGetSystemVAmd64PassStructInRegisterDescriptor(objClass, &structDesc);
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
unsigned originalSize = info.compCompHnd->getClassSize(objClass);
originalSize = (originalSize == 0 ? TARGET_POINTER_SIZE : originalSize);
@@ -3683,26 +3682,26 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
}
#endif // _TARGET_ARM64_
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// On System V OS-es a struct is never passed by reference.
// It is either passed by value on the stack or in registers.
bool passStructInRegisters = false;
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // !UNIX_AMD64_ABI
bool passStructByRef = false;
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // !UNIX_AMD64_ABI
// The following if-then-else needs to be carefully refactored.
// Basically the else portion wants to turn a struct load (a GT_OBJ)
// into a GT_IND of the appropriate size.
// It can do this with structs sizes that are 1, 2, 4, or 8 bytes.
- // It can't do this when FEATURE_UNIX_AMD64_STRUCT_PASSING is defined (Why?)
- // TODO-Cleanup: Remove the #ifndef FEATURE_UNIX_AMD64_STRUCT_PASSING below.
+ // It can't do this when UNIX_AMD64_ABI is defined (Why?)
+ // TODO-Cleanup: Remove the #ifndef UNIX_AMD64_ABI below.
// It also can't do this if we have a HFA arg,
// unless we have a 1-elem HFA in which case we want to do the optimization.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifndef _TARGET_X86_
-#ifndef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifndef UNIX_AMD64_ABI
// Check for struct argument with size 1, 2, 4 or 8 bytes
// As we can optimize these by turning them into a GT_IND of the correct type
//
@@ -3718,17 +3717,17 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
!isPow2(originalSize) || // it is not a power of two (1, 2, 4 or 8)
(isHfaArg && (hfaSlots != 1))) // it is a one element HFA struct
#endif // !_TARGET_ARM_
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
{
// Normalize 'size' to the number of pointer sized items
// 'size' is the number of register slots that we will use to pass the argument
size = roundupSize / TARGET_POINTER_SIZE;
#if defined(_TARGET_AMD64_)
-#ifndef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifndef UNIX_AMD64_ABI
size = 1; // This must be copied to a temp and passed by address
passStructByRef = true;
copyBlkClass = objClass;
-#else // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // UNIX_AMD64_ABI
if (!structDesc.passedInRegisters)
{
GenTree* lclVar = fgIsIndirOfAddrOfLocal(argObj);
@@ -3789,7 +3788,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
copyBlkClass = objClass;
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#elif defined(_TARGET_ARM64_)
if ((size > 2) && !isHfaArg)
{
@@ -3827,7 +3826,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
#endif
#endif // _TARGET_ARM_
}
-#ifndef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifndef UNIX_AMD64_ABI
// TODO-Amd64-Unix: Since the else part below is disabled for UNIX_AMD64, copies are always
// generated for struct 1, 2, 4, or 8.
else // We have a struct argument with size 1, 2, 4 or 8 bytes
@@ -3953,16 +3952,16 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
}
#endif
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#endif // not _TARGET_X86_
// We still have a struct unless we converted the GT_OBJ into a GT_IND above...
if (varTypeIsStruct(structBaseType) &&
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
!passStructInRegisters
-#else // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#else // !defined(UNIX_AMD64_ABI)
!passStructByRef
-#endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // !defined(UNIX_AMD64_ABI)
)
{
if (isHfaArg && passUsingFloatRegs)
@@ -4022,7 +4021,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
//
if (isRegParamType(genActualType(argx->TypeGet()))
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
&& (!isStructArg || structDesc.passedInRegisters)
#endif
)
@@ -4100,7 +4099,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
#if defined(UNIX_AMD64_ABI)
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// Here a struct can be passed in register following the classifications of its members and size.
// Now make sure there are actually enough registers to do so.
if (isStructArg)
@@ -4123,7 +4122,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
((intArgRegNum + structIntRegs) <= MAX_REG_ARG);
}
else
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
{
if (passUsingFloatRegs)
{
@@ -4209,7 +4208,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
if (isRegArg)
{
regNumber nextRegNum = REG_STK;
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
regNumber nextOtherRegNum = REG_STK;
unsigned int structFloatRegs = 0;
unsigned int structIntRegs = 0;
@@ -4234,7 +4233,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
}
}
else
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
{
// fill in or update the argInfo table
nextRegNum = passUsingFloatRegs ? genMapFloatRegArgNumToRegNum(nextFltArgRegNum)
@@ -4242,7 +4241,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
}
#ifdef _TARGET_AMD64_
-#ifndef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifndef UNIX_AMD64_ABI
assert(size == 1);
#endif
#endif
@@ -4262,10 +4261,10 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
// This is a register argument - put it in the table
newArgEntry = call->fgArgInfo->AddRegArg(argIndex, argx, args, nextRegNum, size, argAlign
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
,
isStructArg, nextOtherRegNum, &structDesc
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
);
newArgEntry->SetIsHfaRegArg(passUsingFloatRegs &&
@@ -4283,14 +4282,14 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
// Set up the next intArgRegNum and fltArgRegNum values.
if (!isBackFilled)
{
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
if (isStructArg)
{
intArgRegNum += structIntRegs;
fltArgRegNum += structFloatRegs;
}
else
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
{
if (passUsingFloatRegs)
{
@@ -4353,18 +4352,16 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
else
{
// This is a stack argument - put it in the table
- call->fgArgInfo->AddStkArg(argIndex, argx, args, size,
- argAlign FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(isStructArg));
+ call->fgArgInfo->AddStkArg(argIndex, argx, args, size, argAlign UNIX_AMD64_ABI_ONLY_ARG(isStructArg));
}
}
if (copyBlkClass != NO_CLASS_HANDLE)
{
noway_assert(!reMorphing);
- fgMakeOutgoingStructArgCopy(call, args, argIndex,
- copyBlkClass FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(&structDesc));
+ fgMakeOutgoingStructArgCopy(call, args, argIndex, copyBlkClass UNIX_AMD64_ABI_ONLY_ARG(&structDesc));
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
hasStackArgCopy = true;
#endif
}
@@ -4453,13 +4450,13 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
}
#endif // _TARGET_X86_ && !LEGACY_BACKEND
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
if (isStructArg && !isRegArg)
{
nonRegPassedStructSlots += size;
}
else
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
{
argSlots += size;
}
@@ -4576,9 +4573,9 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
// all cases of fgMakeOutgoingStructArgCopy() being called. hasStackArgCopy
// is added to make sure to call EvalArgsToTemp.
if (!reMorphing && (call->fgArgInfo->HasRegArgs()
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
|| hasStackArgCopy
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
))
{
// This is the first time that we morph this call AND it has register arguments.
@@ -4595,12 +4592,12 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
}
}
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// Rewrite the struct args to be passed by value on stack or in registers.
fgMorphSystemVStructArgs(call, hasStructArgument);
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // !UNIX_AMD64_ABI
#ifndef LEGACY_BACKEND
// In the future we can migrate UNIX_AMD64 to use this
@@ -4614,7 +4611,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
}
#endif // LEGACY_BACKEND
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#ifdef DEBUG
if (verbose)
@@ -4628,7 +4625,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
#pragma warning(pop)
#endif
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// fgMorphSystemVStructArgs:
// Rewrite the struct args to be passed by value on stack or in registers.
//
@@ -4811,7 +4808,7 @@ void Compiler::fgMorphSystemVStructArgs(GenTreeCall* call, bool hasStructArgumen
// Update the flags
call->gtFlags |= (flagsSummary & GTF_ALL_EFFECT);
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
//-----------------------------------------------------------------------------
// fgMorphMultiregStructArgs: Locate the TYP_STRUCT arguments and
@@ -5484,8 +5481,8 @@ void Compiler::fgMakeOutgoingStructArgCopy(
GenTreeCall* call,
GenTree* args,
unsigned argIndex,
- CORINFO_CLASS_HANDLE copyBlkClass FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(
- const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr))
+ CORINFO_CLASS_HANDLE copyBlkClass
+ UNIX_AMD64_ABI_ONLY_ARG(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr))
{
GenTree* argx = args->Current();
noway_assert(argx->gtOper != GT_MKREFANY);
@@ -5604,7 +5601,7 @@ void Compiler::fgMakeOutgoingStructArgCopy(
// Structs are always on the stack, and thus never need temps
// so we have to put the copy and temp all into one expression
- GenTree* arg = fgMakeTmpArgNode(tmp FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(structDescPtr->passedInRegisters));
+ GenTree* arg = fgMakeTmpArgNode(tmp UNIX_AMD64_ABI_ONLY_ARG(structDescPtr->passedInRegisters));
// Change the expression to "(tmp=val),tmp"
arg = gtNewOperNode(GT_COMMA, arg->TypeGet(), copyBlk, arg);
@@ -5725,7 +5722,7 @@ void Compiler::fgFixupStructReturn(GenTree* callNode)
assert(call->TypeGet() != TYP_STRUCT);
#endif
-#if !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if !defined(UNIX_AMD64_ABI)
// If it was a struct return, it has been transformed into a call
// with a return buffer (that returns TYP_VOID) or into a return
// of a primitive/enregisterable type
@@ -7568,7 +7565,7 @@ bool Compiler::fgCanFastTailCall(GenTreeCall* callee)
hasMultiByteStackArgs = hasMultiByteStackArgs ||
!VarTypeIsMultiByteAndCanEnreg(argx->TypeGet(), objClass, &typeSize, false);
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
assert(objClass != nullptr);
@@ -7637,7 +7634,7 @@ bool Compiler::fgCanFastTailCall(GenTreeCall* callee)
++calleeArgRegCount;
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#else
assert(!"Target platform ABI rules regarding passing struct type args in registers");
@@ -8953,7 +8950,7 @@ GenTree* Compiler::fgMorphCall(GenTreeCall* call)
// This is a HFA, use float 0.
callType = TYP_FLOAT;
}
-#elif defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#elif defined(UNIX_AMD64_ABI)
// Return a dummy node, as the return is already removed.
if (varTypeIsStruct(callType))
{
@@ -18573,7 +18570,7 @@ Compiler::fgWalkResult Compiler::fgMorphLocalField(GenTree* tree, fgWalkData* fg
void Compiler::fgMarkImplicitByRefArgs()
{
-#if (defined(_TARGET_AMD64_) && !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)) || defined(_TARGET_ARM64_)
+#if (defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI)) || defined(_TARGET_ARM64_)
#ifdef DEBUG
if (verbose)
{
@@ -18621,7 +18618,7 @@ void Compiler::fgMarkImplicitByRefArgs()
}
}
-#endif // (_TARGET_AMD64_ && !FEATURE_UNIX_AMD64_STRUCT_PASSING) || _TARGET_ARM64_
+#endif // (_TARGET_AMD64_ && !UNIX_AMD64_ABI) || _TARGET_ARM64_
}
//------------------------------------------------------------------------
@@ -18635,7 +18632,7 @@ void Compiler::fgMarkImplicitByRefArgs()
void Compiler::fgRetypeImplicitByRefArgs()
{
-#if (defined(_TARGET_AMD64_) && !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)) || defined(_TARGET_ARM64_)
+#if (defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI)) || defined(_TARGET_ARM64_)
#ifdef DEBUG
if (verbose)
{
@@ -18809,7 +18806,7 @@ void Compiler::fgRetypeImplicitByRefArgs()
}
}
-#endif // (_TARGET_AMD64_ && !FEATURE_UNIX_AMD64_STRUCT_PASSING) || _TARGET_ARM64_
+#endif // (_TARGET_AMD64_ && !UNIX_AMD64_ABI) || _TARGET_ARM64_
}
//------------------------------------------------------------------------
@@ -18820,7 +18817,7 @@ void Compiler::fgRetypeImplicitByRefArgs()
void Compiler::fgMarkDemotedImplicitByRefArgs()
{
-#if (defined(_TARGET_AMD64_) && !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)) || defined(_TARGET_ARM64_)
+#if (defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI)) || defined(_TARGET_ARM64_)
for (unsigned lclNum = 0; lclNum < info.compArgsCount; lclNum++)
{
@@ -18884,7 +18881,7 @@ void Compiler::fgMarkDemotedImplicitByRefArgs()
}
}
-#endif // (_TARGET_AMD64_ && !FEATURE_UNIX_AMD64_STRUCT_PASSING) || _TARGET_ARM64_
+#endif // (_TARGET_AMD64_ && !UNIX_AMD64_ABI) || _TARGET_ARM64_
}
/*****************************************************************************
@@ -18894,11 +18891,11 @@ void Compiler::fgMarkDemotedImplicitByRefArgs()
*/
bool Compiler::fgMorphImplicitByRefArgs(GenTree* tree)
{
-#if (!defined(_TARGET_AMD64_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)) && !defined(_TARGET_ARM64_)
+#if (!defined(_TARGET_AMD64_) || defined(UNIX_AMD64_ABI)) && !defined(_TARGET_ARM64_)
return false;
-#else // (_TARGET_AMD64_ && !FEATURE_UNIX_AMD64_STRUCT_PASSING) || _TARGET_ARM64_
+#else // (_TARGET_AMD64_ && !UNIX_AMD64_ABI) || _TARGET_ARM64_
bool changed = false;
@@ -18932,7 +18929,7 @@ bool Compiler::fgMorphImplicitByRefArgs(GenTree* tree)
}
return changed;
-#endif // (_TARGET_AMD64_ && !FEATURE_UNIX_AMD64_STRUCT_PASSING) || _TARGET_ARM64_
+#endif // (_TARGET_AMD64_ && !UNIX_AMD64_ABI) || _TARGET_ARM64_
}
GenTree* Compiler::fgMorphImplicitByRefArgs(GenTree* tree, bool isAddr)
diff --git a/src/jit/protononjit/CMakeLists.txt b/src/jit/protononjit/CMakeLists.txt
index cb1c42cc43..dc6a595e69 100644
--- a/src/jit/protononjit/CMakeLists.txt
+++ b/src/jit/protononjit/CMakeLists.txt
@@ -30,7 +30,6 @@ if (NOT WIN32)
remove_definitions(-DUNIX_X86_ABI)
elseif(CLR_CMAKE_PLATFORM_ARCH_AMD64)
remove_definitions(-DUNIX_AMD64_ABI)
- remove_definitions(-DFEATURE_UNIX_AMD64_STRUCT_PASSING)
else()
clr_unknown_arch()
endif()
diff --git a/src/jit/scopeinfo.cpp b/src/jit/scopeinfo.cpp
index 6a1064b0d9..31f66e2c2a 100644
--- a/src/jit/scopeinfo.cpp
+++ b/src/jit/scopeinfo.cpp
@@ -954,7 +954,7 @@ void CodeGen::psiBegProlog()
if (lclVarDsc1->lvIsRegArg)
{
bool isStructHandled = false;
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
if (varTypeIsStruct(lclVarDsc1))
{
@@ -1002,7 +1002,7 @@ void CodeGen::psiBegProlog()
isStructHandled = true;
}
-#endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // !defined(UNIX_AMD64_ABI)
if (!isStructHandled)
{
#ifdef DEBUG
diff --git a/src/jit/target.h b/src/jit/target.h
index 09d1cb9a49..15f4693d05 100644
--- a/src/jit/target.h
+++ b/src/jit/target.h
@@ -1018,13 +1018,13 @@ typedef unsigned short regPairNoSmall; // arm: need 12 bits
#define REG_LNGRET REG_EAX
#define RBM_LNGRET RBM_EAX
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
#define REG_INTRET_1 REG_RDX
#define RBM_INTRET_1 RBM_RDX
#define REG_LNGRET_1 REG_RDX
#define RBM_LNGRET_1 RBM_RDX
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#define REG_FLOATRET REG_XMM0
@@ -1032,13 +1032,13 @@ typedef unsigned short regPairNoSmall; // arm: need 12 bits
#define REG_DOUBLERET REG_XMM0
#define RBM_DOUBLERET RBM_XMM0
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
#define REG_FLOATRET_1 REG_XMM1
#define RBM_FLOATRET_1 RBM_XMM1
#define REG_DOUBLERET_1 REG_XMM1
#define RBM_DOUBLERET_1 RBM_XMM1
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#define REG_FPBASE REG_EBP
#define RBM_FPBASE RBM_EBP
@@ -1132,7 +1132,7 @@ typedef unsigned short regPairNoSmall; // arm: need 12 bits
#define RBM_PROFILER_TAILCALL_TRASH RBM_PROFILER_LEAVE_TRASH
// The registers trashed by the CORINFO_HELP_STOP_FOR_GC helper.
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// See vm\amd64\unixasmhelpers.S for more details.
//
// On Unix a struct of size >=9 and <=16 bytes in size is returned in two return registers.
diff --git a/src/vm/amd64/calldescrworkeramd64.S b/src/vm/amd64/calldescrworkeramd64.S
index 05dd8ac8ef..e91d41dd0e 100644
--- a/src/vm/amd64/calldescrworkeramd64.S
+++ b/src/vm/amd64/calldescrworkeramd64.S
@@ -108,7 +108,7 @@ LOCAL_LABEL(NoFloatArguments):
cmp ecx, 8
je LOCAL_LABEL(ReturnsDouble)
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// Struct with two integer eightbytes
cmp ecx, 16
jne LOCAL_LABEL(NotTwoIntegerEightbytes)
@@ -138,7 +138,7 @@ LOCAL_LABEL(NotFirstIntegerSecondSSEEightbyte):
jne LOCAL_LABEL(Epilog) // unexpected
movsd real8 ptr [rbx+CallDescrData__returnValue], xmm0
movsd real8 ptr [rbx+CallDescrData__returnValue + 8], xmm1
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
jmp LOCAL_LABEL(Epilog)
diff --git a/src/vm/amd64/cgenamd64.cpp b/src/vm/amd64/cgenamd64.cpp
index 6d11c7f0fa..b770401891 100644
--- a/src/vm/amd64/cgenamd64.cpp
+++ b/src/vm/amd64/cgenamd64.cpp
@@ -333,11 +333,11 @@ void HijackFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
pRD->pCurrentContextPointers->Rdi = NULL;
#endif
pRD->pCurrentContextPointers->Rcx = NULL;
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
pRD->pCurrentContextPointers->Rdx = (PULONG64)&m_Args->Rdx;
-#else // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // UNIX_AMD64_ABI
pRD->pCurrentContextPointers->Rdx = NULL;
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
pRD->pCurrentContextPointers->R8 = NULL;
pRD->pCurrentContextPointers->R9 = NULL;
pRD->pCurrentContextPointers->R10 = NULL;
diff --git a/src/vm/argdestination.h b/src/vm/argdestination.h
index 8ab0a5664b..04968a1aff 100644
--- a/src/vm/argdestination.h
+++ b/src/vm/argdestination.h
@@ -28,7 +28,7 @@ public:
m_argLocDescForStructInRegs(argLocDescForStructInRegs)
{
LIMITED_METHOD_CONTRACT;
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
_ASSERTE((argLocDescForStructInRegs != NULL) || (offset != TransitionBlock::StructInRegsOffset));
#elif defined(_TARGET_ARM64_)
// This assert is not interesting on arm64. argLocDescForStructInRegs could be
@@ -85,7 +85,7 @@ public:
#endif // !DACCESS_COMPILE
#endif // defined(_TARGET_ARM64_)
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// Returns true if the ArgDestination represents a struct passed in registers.
bool IsStructPassedInRegs()
@@ -257,7 +257,7 @@ public:
_ASSERTE(remainingBytes == 0);
}
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
};
diff --git a/src/vm/callhelpers.cpp b/src/vm/callhelpers.cpp
index c555751c23..55073a8a1e 100644
--- a/src/vm/callhelpers.cpp
+++ b/src/vm/callhelpers.cpp
@@ -420,7 +420,7 @@ void MethodDescCallSite::CallTargetWorker(const ARG_SLOT *pArguments, ARG_SLOT *
#ifdef _DEBUG
{
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// Validate that the return value is not too big for the buffer passed
if (m_pMD->GetMethodTable()->IsRegPassedStruct())
{
@@ -430,7 +430,7 @@ void MethodDescCallSite::CallTargetWorker(const ARG_SLOT *pArguments, ARG_SLOT *
_ASSERTE(cbReturnValue >= thReturnValueType.GetSize());
}
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
// The metasig should be reset
_ASSERTE(m_methodSig.GetArgNum() == 0);
@@ -539,7 +539,7 @@ void MethodDescCallSite::CallTargetWorker(const ARG_SLOT *pArguments, ARG_SLOT *
// We need to pass in a pointer, but be careful of the ARG_SLOT calling convention. We might already have a pointer in the ARG_SLOT.
PVOID pSrc = stackSize > sizeof(ARG_SLOT) ? (LPVOID)ArgSlotToPtr(pArguments[arg]) : (LPVOID)ArgSlotEndianessFixup((ARG_SLOT*)&pArguments[arg], stackSize);
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
if (argDest.IsStructPassedInRegs())
{
TypeHandle th;
@@ -548,7 +548,7 @@ void MethodDescCallSite::CallTargetWorker(const ARG_SLOT *pArguments, ARG_SLOT *
argDest.CopyStructToRegisters(pSrc, th.AsMethodTable()->GetNumInstanceFieldBytes(), 0);
}
else
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
{
PVOID pDest = argDest.GetDestinationAddress();
diff --git a/src/vm/callingconvention.h b/src/vm/callingconvention.h
index 9a0abd3b71..a0d7bd4fa0 100644
--- a/src/vm/callingconvention.h
+++ b/src/vm/callingconvention.h
@@ -43,11 +43,11 @@ struct ArgLocDesc
int m_idxStack; // First stack slot used (or -1)
int m_cStack; // Count of stack slots used (or 0)
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
EEClass* m_eeClass; // For structs passed in register, it points to the EEClass of the struct
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#if defined(_TARGET_ARM64_)
bool m_isSinglePrecision; // For determining if HFA is single or double
@@ -78,7 +78,7 @@ struct ArgLocDesc
#if defined(_TARGET_ARM64_)
m_isSinglePrecision = FALSE;
#endif // defined(_TARGET_ARM64_)
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
m_eeClass = NULL;
#endif
}
@@ -157,7 +157,7 @@ struct TransitionBlock
{
LIMITED_METHOD_CONTRACT;
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
return offset >= sizeof(TransitionBlock);
#else
int ofsArgRegs = GetOffsetOfArgumentRegisters();
@@ -180,7 +180,7 @@ struct TransitionBlock
{
LIMITED_METHOD_CONTRACT;
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
_ASSERTE(offset != TransitionBlock::StructInRegsOffset);
#endif
return (offset - GetOffsetOfArgumentRegisters()) / TARGET_POINTER_SIZE;
@@ -199,7 +199,7 @@ struct TransitionBlock
static BOOL IsFloatArgumentRegisterOffset(int offset)
{
LIMITED_METHOD_CONTRACT;
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
return (offset != TransitionBlock::StructInRegsOffset) && (offset < 0);
#else
return offset < 0;
@@ -212,7 +212,7 @@ struct TransitionBlock
static BOOL HasFloatRegister(int offset, ArgLocDesc* argLocDescForStructInRegs)
{
LIMITED_METHOD_CONTRACT;
- #if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+ #if defined(UNIX_AMD64_ABI)
if (offset == TransitionBlock::StructInRegsOffset)
{
return argLocDescForStructInRegs->m_cFloatReg > 0;
@@ -248,7 +248,7 @@ struct TransitionBlock
}
static const int InvalidOffset = -1;
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// Special offset value to represent struct passed in registers. Such a struct can span both
// general purpose and floating point registers, so it can have two different offsets.
static const int StructInRegsOffset = -2;
@@ -399,7 +399,7 @@ public:
{
LIMITED_METHOD_CONTRACT;
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// No arguments are passed by reference on AMD64 on Unix
return FALSE;
#else
@@ -416,12 +416,12 @@ public:
LIMITED_METHOD_CONTRACT;
#ifdef _TARGET_AMD64_
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
PORTABILITY_ASSERT("ArgIteratorTemplate::IsVarArgPassedByRef");
return FALSE;
-#else // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // UNIX_AMD64_ABI
return IsArgPassedByRef(size);
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#else
return (size > ENREGISTERED_PARAMTYPE_MAXSIZE);
@@ -498,7 +498,7 @@ public:
ArgLocDesc* GetArgLocDescForStructInRegs()
{
-#if (defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)) || defined (_TARGET_ARM64_)
+#if defined(UNIX_AMD64_ABI) || defined (_TARGET_ARM64_)
return m_hasArgLocDescForStructInRegs ? &m_argLocDescForStructInRegs : NULL;
#else
return NULL;
@@ -603,13 +603,13 @@ public:
{
LIMITED_METHOD_CONTRACT;
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
if (m_hasArgLocDescForStructInRegs)
{
*pLoc = m_argLocDescForStructInRegs;
return;
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
if (argOffset == TransitionBlock::StructInRegsOffset)
{
@@ -650,10 +650,10 @@ protected:
CorElementType m_argType;
int m_argSize;
TypeHandle m_argTypeHandle;
-#if (defined(_TARGET_AMD64_) && defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)) || defined(_TARGET_ARM64_)
+#if (defined(_TARGET_AMD64_) && defined(UNIX_AMD64_ABI)) || defined(_TARGET_ARM64_)
ArgLocDesc m_argLocDescForStructInRegs;
bool m_hasArgLocDescForStructInRegs;
-#endif // _TARGET_AMD64_ && UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // (_TARGET_AMD64_ && UNIX_AMD64_ABI) || _TARGET_ARM64_
#ifdef _TARGET_X86_
int m_curOfs; // Current position of the stack iterator
@@ -665,9 +665,7 @@ protected:
int m_idxGenReg; // Next general register to be assigned a value
int m_idxStack; // Next stack slot to be assigned a value
int m_idxFPReg; // Next floating point register to be assigned a value
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
bool m_fArgInRegisters; // Indicates that the current argument is stored in registers
-#endif
#else
int m_curOfs; // Current position of the stack iterator
#endif
@@ -943,7 +941,7 @@ int ArgIteratorTemplate<ARGITERATOR_BASE>::GetNextOffset()
m_argSize = argSize;
m_argTypeHandle = thValueType;
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
m_hasArgLocDescForStructInRegs = false;
#endif
@@ -988,7 +986,7 @@ int ArgIteratorTemplate<ARGITERATOR_BASE>::GetNextOffset()
case ELEMENT_TYPE_VALUETYPE:
{
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
MethodTable *pMT = m_argTypeHandle.AsMethodTable();
if (pMT->IsRegPassedStruct())
{
@@ -1035,9 +1033,9 @@ int ArgIteratorTemplate<ARGITERATOR_BASE>::GetNextOffset()
cFPRegs = 0;
cGenRegs = 0;
-#else // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // UNIX_AMD64_ABI
argSize = sizeof(TADDR);
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
break;
}
@@ -1060,7 +1058,7 @@ int ArgIteratorTemplate<ARGITERATOR_BASE>::GetNextOffset()
return argOfs;
}
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
m_fArgInRegisters = false;
#endif
@@ -1380,7 +1378,7 @@ void ArgIteratorTemplate<ARGITERATOR_BASE>::ComputeReturnFlags()
{
_ASSERTE(!thValueType.IsNull());
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
MethodTable *pMT = thValueType.AsMethodTable();
if (pMT->IsRegPassedStruct())
{
@@ -1412,7 +1410,7 @@ void ArgIteratorTemplate<ARGITERATOR_BASE>::ComputeReturnFlags()
break;
}
-#else // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // UNIX_AMD64_ABI
#ifdef FEATURE_HFA
if (thValueType.IsHFA() && !this->IsVarArg())
@@ -1440,7 +1438,7 @@ void ArgIteratorTemplate<ARGITERATOR_BASE>::ComputeReturnFlags()
if (size <= ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE)
break;
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
}
#endif // ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE
@@ -1562,7 +1560,7 @@ void ArgIteratorTemplate<ARGITERATOR_BASE>::ForceSigWalk()
int stackElemSize;
#ifdef _TARGET_AMD64_
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
if (m_fArgInRegisters)
{
// Arguments passed in registers don't consume any stack
@@ -1570,11 +1568,11 @@ void ArgIteratorTemplate<ARGITERATOR_BASE>::ForceSigWalk()
}
stackElemSize = StackElemSize(GetArgSize());
-#else // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // UNIX_AMD64_ABI
// All stack arguments take just one stack slot on AMD64 because of arguments bigger
// than a stack slot are passed by reference.
stackElemSize = STACK_ELEM_SIZE;
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#else // _TARGET_AMD64_
stackElemSize = StackElemSize(GetArgSize());
#if defined(ENREGISTERED_PARAMTYPE_MAXSIZE)
diff --git a/src/vm/class.h b/src/vm/class.h
index 5e349f75b1..7533e9c02c 100644
--- a/src/vm/class.h
+++ b/src/vm/class.h
@@ -408,12 +408,12 @@ class EEClassLayoutInfo
e_ZERO_SIZED = 0x04,
// The size of the struct is explicitly specified in the meta-data.
e_HAS_EXPLICIT_SIZE = 0x08,
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
#ifdef FEATURE_HFA
-#error Can't have FEATURE_HFA and FEATURE_UNIX_AMD64_STRUCT_PASSING defined at the same time.
+#error Can't have FEATURE_HFA and UNIX_AMD64_ABI defined at the same time.
#endif // FEATURE_HFA
e_NATIVE_PASS_IN_REGISTERS = 0x10, // Flag wheter a native struct is passed in registers.
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#ifdef FEATURE_HFA
// HFA type of the unmanaged layout
e_R4_HFA = 0x10,
@@ -510,13 +510,13 @@ class EEClassLayoutInfo
return m_cbPackingSize;
}
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
bool IsNativeStructPassedInRegisters()
{
LIMITED_METHOD_CONTRACT;
return (m_bFlags & e_NATIVE_PASS_IN_REGISTERS) != 0;
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
CorElementType GetNativeHFATypeRaw();
#ifdef FEATURE_HFA
@@ -580,13 +580,13 @@ class EEClassLayoutInfo
m_bFlags |= (hfaType == ELEMENT_TYPE_R4) ? e_R4_HFA : e_R8_HFA;
}
#endif
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
void SetNativeStructPassedInRegisters()
{
LIMITED_METHOD_CONTRACT;
m_bFlags |= e_NATIVE_PASS_IN_REGISTERS;
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
};
@@ -713,14 +713,14 @@ class EEClassOptionalFields
#define MODULE_NON_DYNAMIC_STATICS ((DWORD)-1)
DWORD m_cbModuleDynamicID;
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// Number of eightBytes in the following arrays
int m_numberEightBytes;
// Classification of the eightBytes
SystemVClassificationType m_eightByteClassifications[CLR_SYSTEMV_MAX_EIGHTBYTES_COUNT_TO_PASS_IN_REGISTERS];
// Size of data the eightBytes
unsigned int m_eightByteSizes[CLR_SYSTEMV_MAX_EIGHTBYTES_COUNT_TO_PASS_IN_REGISTERS];
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
// Set default values for optional fields.
inline void Init();
@@ -1589,7 +1589,7 @@ public:
DWORD GetReliabilityContract();
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// Get number of eightbytes used by a struct passed in registers.
inline int GetNumberEightBytes()
{
@@ -1626,7 +1626,7 @@ public:
GetOptionalFields()->m_eightByteSizes[i] = eightByteSizes[i];
}
}
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#if defined(FEATURE_HFA)
bool CheckForHFA(MethodTable ** pByValueClassCache);
diff --git a/src/vm/class.inl b/src/vm/class.inl
index d411f817d2..542f41e682 100644
--- a/src/vm/class.inl
+++ b/src/vm/class.inl
@@ -31,9 +31,9 @@ inline void EEClassOptionalFields::Init()
m_WinRTRedirectedTypeIndex = WinMDAdapter::RedirectedTypeIndex_Invalid;
#endif // FEATURE_COMINTEROP
m_cbModuleDynamicID = MODULE_NON_DYNAMIC_STATICS;
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
m_numberEightBytes = 0;
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
}
#endif // !DACCESS_COMPILE
diff --git a/src/vm/comdelegate.cpp b/src/vm/comdelegate.cpp
index 173a8fe6d1..2927dafca9 100644
--- a/src/vm/comdelegate.cpp
+++ b/src/vm/comdelegate.cpp
@@ -75,7 +75,7 @@ class ShuffleIterator
// Argument location description
ArgLocDesc* m_argLocDesc;
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// Current eightByte used for struct arguments in registers
int m_currentEightByte;
#endif
@@ -86,7 +86,7 @@ class ShuffleIterator
// Current stack slot index (relative to the ArgLocDesc::m_idxStack)
int m_currentStackSlotIndex;
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// Get next shuffle offset for struct passed in registers. There has to be at least one offset left.
UINT16 GetNextOfsInStruct()
{
@@ -129,7 +129,7 @@ class ShuffleIterator
_ASSERTE(false);
return 0;
}
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
public:
@@ -137,7 +137,7 @@ public:
ShuffleIterator(ArgLocDesc* argLocDesc)
:
m_argLocDesc(argLocDesc),
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
m_currentEightByte(0),
#endif
m_currentGenRegIndex(0),
@@ -150,7 +150,7 @@ public:
bool HasNextOfs()
{
return (m_currentGenRegIndex < m_argLocDesc->m_cGenReg) ||
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
(m_currentFloatRegIndex < m_argLocDesc->m_cFloatReg) ||
#endif
(m_currentStackSlotIndex < m_argLocDesc->m_cStack);
@@ -161,7 +161,7 @@ public:
{
int index;
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// Check if the argLocDesc is for a struct in registers
EEClass* eeClass = m_argLocDesc->m_eeClass;
@@ -178,7 +178,7 @@ public:
return (UINT16)index | ShuffleEntry::REGMASK | ShuffleEntry::FPREGMASK;
}
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
// Shuffle any registers first (the order matters since otherwise we could end up shuffling a stack slot
// over a register we later need to shuffle down as well).
@@ -214,7 +214,7 @@ public:
#endif
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// Return an index of argument slot. First indices are reserved for general purpose registers,
// the following ones for float registers and then the rest for stack slots.
// This index is independent of how many registers are actually used to pass arguments.
@@ -238,7 +238,7 @@ int GetNormalizedArgumentSlotIndex(UINT16 offset)
return index;
}
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
VOID GenerateShuffleArray(MethodDesc* pInvoke, MethodDesc *pTargetMeth, SArray<ShuffleEntry> * pShuffleEntryArray)
{
@@ -378,9 +378,9 @@ VOID GenerateShuffleArray(MethodDesc* pInvoke, MethodDesc *pTargetMeth, SArray<S
ArgLocDesc sArgSrc;
ArgLocDesc sArgDst;
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
int argSlots = NUM_FLOAT_ARGUMENT_REGISTERS + NUM_ARGUMENT_REGISTERS + sArgPlacerSrc.SizeOfArgStack() / sizeof(size_t);
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
// If the target method in non-static (this happens for open instance delegates), we need to account for
// the implicit this parameter.
@@ -454,7 +454,7 @@ VOID GenerateShuffleArray(MethodDesc* pInvoke, MethodDesc *pTargetMeth, SArray<S
_ASSERTE(!iteratorDst.HasNextOfs());
}
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// The Unix AMD64 ABI can cause a struct to be passed on stack for the source and in registers for the destination.
// That can cause some arguments that are passed on stack for the destination to be passed in registers in the source.
// An extreme example of that is e.g.:
@@ -494,7 +494,7 @@ VOID GenerateShuffleArray(MethodDesc* pInvoke, MethodDesc *pTargetMeth, SArray<S
}
}
while (reordered);
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
entry.srcofs = ShuffleEntry::SENTINEL;
entry.dstofs = 0;
diff --git a/src/vm/fcall.h b/src/vm/fcall.h
index c50c6edbd7..e7465bab80 100644
--- a/src/vm/fcall.h
+++ b/src/vm/fcall.h
@@ -1333,14 +1333,8 @@ typedef UINT16 FC_UINT16_RET;
// FC_TypedByRef should be used for TypedReferences in FCall signatures
-#if defined(UNIX_AMD64_ABI) && !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
-// Explicitly pass the TypedReferences by reference
-#define FC_TypedByRef TypedByRef&
-#define FC_DECIMAL DECIMAL&
-#else
#define FC_TypedByRef TypedByRef
#define FC_DECIMAL DECIMAL
-#endif
// The fcall entrypoints has to be at unique addresses. Use this helper macro to make
diff --git a/src/vm/jitinterface.cpp b/src/vm/jitinterface.cpp
index 502b5ada13..81f5a81916 100644
--- a/src/vm/jitinterface.cpp
+++ b/src/vm/jitinterface.cpp
@@ -2256,7 +2256,7 @@ bool CEEInfo::getSystemVAmd64PassStructInRegisterDescriptor(
MODE_PREEMPTIVE;
} CONTRACTL_END;
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING_ITF)
+#if defined(UNIX_AMD64_ABI_ITF)
JIT_TO_EE_TRANSITION();
_ASSERTE(structPassInRegDescPtr != nullptr);
@@ -2291,27 +2291,31 @@ bool CEEInfo::getSystemVAmd64PassStructInRegisterDescriptor(
}
_ASSERTE(methodTablePtr != nullptr);
- // If we have full support for FEATURE_UNIX_AMD64_STRUCT_PASSING, and not just the interface,
+ // If we have full support for UNIX_AMD64_ABI, and not just the interface,
// then we've cached whether this is a reg passed struct in the MethodTable, computed during
// MethodTable construction. Otherwise, we are just building in the interface, and we haven't
// computed or cached anything, so we need to compute it now.
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
bool canPassInRegisters = useNativeLayout ? methodTablePtr->GetLayoutInfo()->IsNativeStructPassedInRegisters()
: methodTablePtr->IsRegPassedStruct();
-#else // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#else // !defined(UNIX_AMD64_ABI)
+ bool canPassInRegisters = false;
SystemVStructRegisterPassingHelper helper((unsigned int)th.GetSize());
- bool canPassInRegisters = methodTablePtr->ClassifyEightBytes(&helper, 0, 0, useNativeLayout);
-#endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+ if (th.GetSize() <= CLR_SYSTEMV_MAX_STRUCT_BYTES_TO_PASS_IN_REGISTERS)
+ {
+ canPassInRegisters = methodTablePtr->ClassifyEightBytes(&helper, 0, 0, useNativeLayout);
+ }
+#endif // !defined(UNIX_AMD64_ABI)
if (canPassInRegisters)
{
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
SystemVStructRegisterPassingHelper helper((unsigned int)th.GetSize());
bool result = methodTablePtr->ClassifyEightBytes(&helper, 0, 0, useNativeLayout);
// The answer must be true at this point.
_ASSERTE(result);
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
structPassInRegDescPtr->passedInRegisters = true;
@@ -2332,9 +2336,9 @@ bool CEEInfo::getSystemVAmd64PassStructInRegisterDescriptor(
EE_TO_JIT_TRANSITION();
return true;
-#else // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING_ITF)
+#else // !defined(UNIX_AMD64_ABI_ITF)
return false;
-#endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING_ITF)
+#endif // !defined(UNIX_AMD64_ABI_ITF)
}
/*********************************************************************/
diff --git a/src/vm/method.cpp b/src/vm/method.cpp
index e1bd021bc7..815f9d217a 100644
--- a/src/vm/method.cpp
+++ b/src/vm/method.cpp
@@ -1286,12 +1286,12 @@ MetaSig::RETURNTYPE MethodDesc::ReturnsObject(
*pMT = pReturnTypeMT;
}
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
if (pReturnTypeMT->IsRegPassedStruct())
{
return MetaSig::RETVALUETYPE;
}
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // !UNIX_AMD64_ABI
if (pReturnTypeMT->ContainsPointers())
{
diff --git a/src/vm/methodtable.cpp b/src/vm/methodtable.cpp
index 67656235ef..af4b8f7121 100644
--- a/src/vm/methodtable.cpp
+++ b/src/vm/methodtable.cpp
@@ -2219,7 +2219,7 @@ BOOL MethodTable::IsClassPreInited()
//========================================================================================
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING_ITF)
+#if defined(UNIX_AMD64_ABI_ITF)
#if defined(_DEBUG) && defined(LOGGING)
static
@@ -3153,7 +3153,7 @@ void MethodTable::AssignClassifiedEightByteTypes(SystemVStructRegisterPassingHe
}
}
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING_ITF)
+#endif // defined(UNIX_AMD64_ABI_ITF)
#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
//==========================================================================================
diff --git a/src/vm/methodtable.h b/src/vm/methodtable.h
index e88fe16644..edb92f8099 100644
--- a/src/vm/methodtable.h
+++ b/src/vm/methodtable.h
@@ -627,7 +627,7 @@ public:
typedef DPTR(MethodTableWriteableData) PTR_MethodTableWriteableData;
typedef DPTR(MethodTableWriteableData const) PTR_Const_MethodTableWriteableData;
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING_ITF
+#ifdef UNIX_AMD64_ABI_ITF
inline
SystemVClassificationType CorInfoType2UnixAmd64Classification(CorElementType eeType)
{
@@ -731,7 +731,7 @@ struct SystemVStructRegisterPassingHelper
typedef DPTR(SystemVStructRegisterPassingHelper) SystemVStructRegisterPassingHelperPtr;
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING_ITF
+#endif // UNIX_AMD64_ABI_ITF
//===============================================================================================
//
@@ -1036,10 +1036,10 @@ public:
// during object construction.
void CheckRunClassInitAsIfConstructingThrowing();
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING_ITF)
+#if defined(UNIX_AMD64_ABI_ITF)
// Builds the internal data structures and classifies struct eightbytes for Amd System V calling convention.
bool ClassifyEightBytes(SystemVStructRegisterPassingHelperPtr helperPtr, unsigned int nestingLevel, unsigned int startOffsetOfStruct, bool isNativeStruct);
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING_ITF)
+#endif // defined(UNIX_AMD64_ABI_ITF)
// Copy m_dwFlags from another method table
void CopyFlags(MethodTable * pOldMT)
@@ -1076,12 +1076,12 @@ public:
private:
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING_ITF)
+#if defined(UNIX_AMD64_ABI_ITF)
void AssignClassifiedEightByteTypes(SystemVStructRegisterPassingHelperPtr helperPtr, unsigned int nestingLevel) const;
// Builds the internal data structures and classifies struct eightbytes for Amd System V calling convention.
bool ClassifyEightBytesWithManagedLayout(SystemVStructRegisterPassingHelperPtr helperPtr, unsigned int nestingLevel, unsigned int startOffsetOfStruct, bool isNativeStruct);
bool ClassifyEightBytesWithNativeLayout(SystemVStructRegisterPassingHelperPtr helperPtr, unsigned int nestingLevel, unsigned int startOffsetOfStruct, bool isNativeStruct);
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING_ITF)
+#endif // defined(UNIX_AMD64_ABI_ITF)
DWORD GetClassIndexFromToken(mdTypeDef typeToken)
{
@@ -2088,7 +2088,7 @@ public:
bool IsNativeHFA();
CorElementType GetNativeHFAType();
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
inline bool IsRegPassedStruct()
{
LIMITED_METHOD_CONTRACT;
@@ -2100,7 +2100,7 @@ public:
LIMITED_METHOD_CONTRACT;
SetFlag(enum_flag_IsRegStructPassed);
}
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
#ifdef FEATURE_64BIT_ALIGNMENT
// Returns true iff the native view of this type requires 64-bit aligment.
@@ -3919,18 +3919,18 @@ private:
enum_flag_HasPreciseInitCctors = 0x00000400, // Do we need to run class constructors at allocation time? (Not perf important, could be moved to EEClass
#if defined(FEATURE_HFA)
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
-#error Can't define both FEATURE_HFA and FEATURE_UNIX_AMD64_STRUCT_PASSING
+#if defined(UNIX_AMD64_ABI)
+#error Can't define both FEATURE_HFA and UNIX_AMD64_ABI
#endif
enum_flag_IsHFA = 0x00000800, // This type is an HFA (Homogenous Floating-point Aggregate)
#endif // FEATURE_HFA
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
#if defined(FEATURE_HFA)
-#error Can't define both FEATURE_HFA and FEATURE_UNIX_AMD64_STRUCT_PASSING
+#error Can't define both FEATURE_HFA and UNIX_AMD64_ABI
#endif
enum_flag_IsRegStructPassed = 0x00000800, // This type is a System V register passed struct.
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
enum_flag_IsByRefLike = 0x00001000,
diff --git a/src/vm/methodtablebuilder.cpp b/src/vm/methodtablebuilder.cpp
index c57677b316..0dc226e333 100644
--- a/src/vm/methodtablebuilder.cpp
+++ b/src/vm/methodtablebuilder.cpp
@@ -1869,23 +1869,23 @@ MethodTableBuilder::BuildMethodTableThrowing(
#ifdef FEATURE_HFA
GetHalfBakedClass()->CheckForHFA(pByValueClassCache);
#endif
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
#ifdef FEATURE_HFA
-#error Can't have FEATURE_HFA and FEATURE_UNIX_AMD64_STRUCT_PASSING defined at the same time.
+#error Can't have FEATURE_HFA and UNIX_AMD64_ABI defined at the same time.
#endif // FEATURE_HFA
SystemVAmd64CheckForPassStructInRegister();
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
}
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
#ifdef FEATURE_HFA
-#error Can't have FEATURE_HFA and FEATURE_UNIX_AMD64_STRUCT_PASSING defined at the same time.
+#error Can't have FEATURE_HFA and UNIX_AMD64_ABI defined at the same time.
#endif // FEATURE_HFA
if (HasLayout())
{
SystemVAmd64CheckForPassNativeStructInRegister();
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#ifdef FEATURE_HFA
if (HasLayout())
{
@@ -8165,7 +8165,7 @@ DWORD MethodTableBuilder::GetFieldSize(FieldDesc *pFD)
return (1 << (DWORD)(DWORD_PTR&)(pFD->m_pMTOfEnclosingClass));
}
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// checks whether the struct is enregisterable.
void MethodTableBuilder::SystemVAmd64CheckForPassStructInRegister()
{
@@ -8250,7 +8250,7 @@ void MethodTableBuilder::StoreEightByteClassification(SystemVStructRegisterPassi
eeClass->SetEightByteClassification(helper->eightByteCount, helper->eightByteClassifications, helper->eightByteSizes);
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
//---------------------------------------------------------------------------------------
//
diff --git a/src/vm/methodtablebuilder.h b/src/vm/methodtablebuilder.h
index 3e267a2b23..3054432c35 100644
--- a/src/vm/methodtablebuilder.h
+++ b/src/vm/methodtablebuilder.h
@@ -2903,14 +2903,14 @@ private:
VOID CheckForNativeHFA();
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// checks whether the struct is enregisterable.
void SystemVAmd64CheckForPassStructInRegister();
void SystemVAmd64CheckForPassNativeStructInRegister();
// Store the eightbyte classification into the EEClass
void StoreEightByteClassification(SystemVStructRegisterPassingHelper* helper);
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
// this accesses the field size which is temporarily stored in m_pMTOfEnclosingClass
// during class loading. Don't use any other time
diff --git a/src/vm/object.cpp b/src/vm/object.cpp
index 1ff98d11da..bb7c8e713e 100644
--- a/src/vm/object.cpp
+++ b/src/vm/object.cpp
@@ -590,7 +590,7 @@ void STDCALL CopyValueClassArgUnchecked(ArgDestination *argDest, void* src, Meth
STATIC_CONTRACT_FORBID_FAULT;
STATIC_CONTRACT_MODE_COOPERATIVE;
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
if (argDest->IsStructPassedInRegs())
{
@@ -606,7 +606,7 @@ void STDCALL CopyValueClassArgUnchecked(ArgDestination *argDest, void* src, Meth
return;
}
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
// destOffset is only valid for Nullable<T> passed in registers
_ASSERTE(destOffset == 0);
@@ -621,7 +621,7 @@ void InitValueClassArg(ArgDestination *argDest, MethodTable *pMT)
STATIC_CONTRACT_FORBID_FAULT;
STATIC_CONTRACT_MODE_COOPERATIVE;
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
if (argDest->IsStructPassedInRegs())
{
@@ -2050,7 +2050,7 @@ BOOL Nullable::UnBoxIntoArgNoGC(ArgDestination *argDest, OBJECTREF boxedVal, Met
}
CONTRACTL_END;
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
if (argDest->IsStructPassedInRegs())
{
// We should only get here if we are unboxing a T as a Nullable<T>
@@ -2088,7 +2088,7 @@ BOOL Nullable::UnBoxIntoArgNoGC(ArgDestination *argDest, OBJECTREF boxedVal, Met
return TRUE;
}
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
return UnBoxNoGC(argDest->GetDestinationAddress(), boxedVal, destMT);
}
diff --git a/src/vm/siginfo.cpp b/src/vm/siginfo.cpp
index 5fe0002cb2..37e2ed5f77 100644
--- a/src/vm/siginfo.cpp
+++ b/src/vm/siginfo.cpp
@@ -4996,13 +4996,13 @@ void ReportPointersFromValueTypeArg(promote_func *fn, ScanContext *sc, PTR_Metho
return;
}
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
if (pSrc->IsStructPassedInRegs())
{
pSrc->ReportPointersFromStructInRegisters(fn, sc, pMT->GetNumInstanceFieldBytes());
return;
}
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
ReportPointersFromValueType(fn, sc, pMT, pSrc->GetDestinationAddress());
}
diff --git a/src/vm/threads.h b/src/vm/threads.h
index be7ce6afd5..292ab226b3 100644
--- a/src/vm/threads.h
+++ b/src/vm/threads.h
@@ -970,7 +970,7 @@ typedef DWORD (*AppropriateWaitFunc) (void *args, DWORD timeout, DWORD option);
// ex: Windows/Unix ARM/ARM64, Unix-AMD64.
//
//
-// FEATURE_UNIX_AMD64_STRUCT_PASSING is a specific kind of FEATURE_MULTIREG_RETURN
+// UNIX_AMD64_ABI is a specific kind of FEATURE_MULTIREG_RETURN
// [GcInfo v1 and v2] specified by SystemV ABI for AMD64
//
diff --git a/src/vm/threadsuspend.cpp b/src/vm/threadsuspend.cpp
index 12fbc901a9..405a6675d6 100644
--- a/src/vm/threadsuspend.cpp
+++ b/src/vm/threadsuspend.cpp
@@ -6425,7 +6425,7 @@ ReturnKind GetReturnKindFromMethodTable(Thread *pThread, EECodeInfo *codeInfo)
return RT_ByRef;
}
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// The Multi-reg return case using the classhandle is only implemented for AMD64 SystemV ABI.
// On other platforms, multi-reg return is not supported with GcInfo v1.
// So, the relevant information must be obtained from the GcInfo tables (which requires version2).
@@ -6452,7 +6452,7 @@ ReturnKind GetReturnKindFromMethodTable(Thread *pThread, EECodeInfo *codeInfo)
ReturnKind structReturnKind = GetStructReturnKind(regKinds[0], regKinds[1]);
return structReturnKind;
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
return RT_Scalar;
}
@@ -6468,10 +6468,10 @@ ReturnKind GetReturnKind(Thread *pThread, EECodeInfo *codeInfo)
}
else
{
-#if !defined(FEATURE_MULTIREG_RETURN) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if !defined(FEATURE_MULTIREG_RETURN) || defined(UNIX_AMD64_ABI)
// For ARM64 struct-return, GetReturnKindFromMethodTable() is not supported
_ASSERTE(returnKind == GetReturnKindFromMethodTable(pThread, codeInfo));
-#endif // !FEATURE_MULTIREG_RETURN || FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // !FEATURE_MULTIREG_RETURN || UNIX_AMD64_ABI
}
_ASSERTE(IsValidReturnKind(returnKind));