summaryrefslogtreecommitdiff
path: root/src/jit/importer.cpp
diff options
context:
space:
mode:
authorTanner Gooding <tagoo@outlook.com>2019-02-21 03:09:40 -0800
committerGitHub <noreply@github.com>2019-02-21 03:09:40 -0800
commit4ba958c9fd412b217e21a1dd508ec466a21aa462 (patch)
treeb405b229b15ba500dec8b72e61460e5a7752fc1d /src/jit/importer.cpp
parentfeed8ae9df458f336b2dd76cc9abe934bb2a5751 (diff)
downloadcoreclr-4ba958c9fd412b217e21a1dd508ec466a21aa462.tar.gz
coreclr-4ba958c9fd412b217e21a1dd508ec466a21aa462.tar.bz2
coreclr-4ba958c9fd412b217e21a1dd508ec466a21aa462.zip
Enable the HWIntrinsic extension methods and remove the instance implementations (#22705)
* Removing various S.R.I.Vector instance method APIs, since they should now be exposed as extension methods * Updating the JIT to recognize the S.R.I.Vector extension methods. * Updating various S.R.I.Vector test templates * Regenerating the S.R.I tests that are created from a template. * Fixing the numArgs for Base_Vector256_GetLower * Fixing the handling for `Base_VectorXXX_As` to normalize the struct type. * Adding the Base_Vector128_As intrinsics back for arm64
Diffstat (limited to 'src/jit/importer.cpp')
-rw-r--r--src/jit/importer.cpp758
1 files changed, 31 insertions, 727 deletions
diff --git a/src/jit/importer.cpp b/src/jit/importer.cpp
index e677d37e0b..5d3a82e4af 100644
--- a/src/jit/importer.cpp
+++ b/src/jit/importer.cpp
@@ -3458,64 +3458,6 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis,
ni = lookupNamedIntrinsic(method);
#ifdef FEATURE_HW_INTRINSICS
- switch (ni)
- {
-#if defined(_TARGET_ARM64_)
- case NI_Base_Vector64_AsByte:
- case NI_Base_Vector64_AsInt16:
- case NI_Base_Vector64_AsInt32:
- case NI_Base_Vector64_AsSByte:
- case NI_Base_Vector64_AsSingle:
- case NI_Base_Vector64_AsUInt16:
- case NI_Base_Vector64_AsUInt32:
-#endif // _TARGET_ARM64_
- case NI_Base_Vector128_As:
- case NI_Base_Vector128_AsByte:
- case NI_Base_Vector128_AsDouble:
- case NI_Base_Vector128_AsInt16:
- case NI_Base_Vector128_AsInt32:
- case NI_Base_Vector128_AsInt64:
- case NI_Base_Vector128_AsSByte:
- case NI_Base_Vector128_AsSingle:
- case NI_Base_Vector128_AsUInt16:
- case NI_Base_Vector128_AsUInt32:
- case NI_Base_Vector128_AsUInt64:
-#if defined(_TARGET_XARCH_)
- case NI_Base_Vector128_CreateScalarUnsafe:
- case NI_Base_Vector128_GetElement:
- case NI_Base_Vector128_WithElement:
- case NI_Base_Vector128_ToScalar:
- case NI_Base_Vector128_ToVector256:
- case NI_Base_Vector128_ToVector256Unsafe:
- case NI_Base_Vector128_Zero:
- case NI_Base_Vector256_As:
- case NI_Base_Vector256_AsByte:
- case NI_Base_Vector256_AsDouble:
- case NI_Base_Vector256_AsInt16:
- case NI_Base_Vector256_AsInt32:
- case NI_Base_Vector256_AsInt64:
- case NI_Base_Vector256_AsSByte:
- case NI_Base_Vector256_AsSingle:
- case NI_Base_Vector256_AsUInt16:
- case NI_Base_Vector256_AsUInt32:
- case NI_Base_Vector256_AsUInt64:
- case NI_Base_Vector256_CreateScalarUnsafe:
- case NI_Base_Vector256_GetElement:
- case NI_Base_Vector256_WithElement:
- case NI_Base_Vector256_GetLower:
- case NI_Base_Vector256_ToScalar:
- case NI_Base_Vector256_Zero:
-#endif // _TARGET_XARCH_
- {
- return impBaseIntrinsic(ni, clsHnd, method, sig);
- }
-
- default:
- {
- break;
- }
- }
-
if ((ni > NI_HW_INTRINSIC_START) && (ni < NI_HW_INTRINSIC_END))
{
GenTree* hwintrinsic = impHWIntrinsic(ni, method, sig, mustExpand);
@@ -4171,643 +4113,6 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis,
return retNode;
}
-#ifdef FEATURE_HW_INTRINSICS
-//------------------------------------------------------------------------
-// impBaseIntrinsic: dispatch intrinsics to their own implementation
-//
-// Arguments:
-// intrinsic -- id of the intrinsic function.
-// clsHnd -- handle for the intrinsic method's class
-// method -- method handle of the intrinsic function.
-// sig -- signature of the intrinsic call
-//
-// Return Value:
-// the expanded intrinsic.
-//
-GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
- CORINFO_CLASS_HANDLE clsHnd,
- CORINFO_METHOD_HANDLE method,
- CORINFO_SIG_INFO* sig)
-{
- GenTree* retNode = nullptr;
- GenTree* op1 = nullptr;
-
- if (!featureSIMD)
- {
- return nullptr;
- }
-
- unsigned simdSize = 0;
- var_types baseType = TYP_UNKNOWN;
- var_types retType = JITtype2varType(sig->retType);
-
- if (sig->hasThis())
- {
- baseType = getBaseTypeAndSizeOfSIMDType(clsHnd, &simdSize);
-
- if (retType == TYP_STRUCT)
- {
- unsigned retSimdSize = 0;
- var_types retBasetype = getBaseTypeAndSizeOfSIMDType(sig->retTypeClass, &retSimdSize);
- if (!varTypeIsArithmetic(retBasetype))
- {
- return nullptr;
- }
- retType = getSIMDTypeForSize(retSimdSize);
- }
- }
- else
- {
- assert(retType == TYP_STRUCT);
- baseType = getBaseTypeAndSizeOfSIMDType(sig->retTypeClass, &simdSize);
- retType = getSIMDTypeForSize(simdSize);
- }
-
- if (!varTypeIsArithmetic(baseType))
- {
- return nullptr;
- }
-
- switch (intrinsic)
- {
-#if defined(_TARGET_XARCH_)
- case NI_Base_Vector256_As:
- case NI_Base_Vector256_AsByte:
- case NI_Base_Vector256_AsDouble:
- case NI_Base_Vector256_AsInt16:
- case NI_Base_Vector256_AsInt32:
- case NI_Base_Vector256_AsInt64:
- case NI_Base_Vector256_AsSByte:
- case NI_Base_Vector256_AsSingle:
- case NI_Base_Vector256_AsUInt16:
- case NI_Base_Vector256_AsUInt32:
- case NI_Base_Vector256_AsUInt64:
- {
- if (!compSupports(InstructionSet_AVX))
- {
- // We don't want to deal with TYP_SIMD32 if the compiler doesn't otherwise support the type.
- break;
- }
-
- __fallthrough;
- }
-#endif // _TARGET_XARCH_
-
-#if defined(_TARGET_ARM64_)
- case NI_Base_Vector64_AsByte:
- case NI_Base_Vector64_AsInt16:
- case NI_Base_Vector64_AsInt32:
- case NI_Base_Vector64_AsSByte:
- case NI_Base_Vector64_AsSingle:
- case NI_Base_Vector64_AsUInt16:
- case NI_Base_Vector64_AsUInt32:
-#endif // _TARGET_ARM64_
- case NI_Base_Vector128_As:
- case NI_Base_Vector128_AsByte:
- case NI_Base_Vector128_AsDouble:
- case NI_Base_Vector128_AsInt16:
- case NI_Base_Vector128_AsInt32:
- case NI_Base_Vector128_AsInt64:
- case NI_Base_Vector128_AsSByte:
- case NI_Base_Vector128_AsSingle:
- case NI_Base_Vector128_AsUInt16:
- case NI_Base_Vector128_AsUInt32:
- case NI_Base_Vector128_AsUInt64:
- {
- // We fold away the cast here, as it only exists to satisfy
- // the type system. It is safe to do this here since the retNode type
- // and the signature return type are both the same TYP_SIMD.
-
- assert(sig->numArgs == 0);
- assert(sig->hasThis());
-
- retNode = impSIMDPopStack(retType, true, sig->retTypeClass);
- SetOpLclRelatedToSIMDIntrinsic(retNode);
- assert(retNode->gtType == getSIMDTypeForSize(getSIMDTypeSizeInBytes(sig->retTypeSigClass)));
- break;
- }
-
-#ifdef _TARGET_XARCH_
- case NI_Base_Vector128_CreateScalarUnsafe:
- {
- assert(sig->numArgs == 1);
-
-#ifdef _TARGET_X86_
- if (varTypeIsLong(baseType))
- {
- // TODO-XARCH-CQ: It may be beneficial to emit the movq
- // instruction, which takes a 64-bit memory address and
- // works on 32-bit x86 systems.
- break;
- }
-#endif // _TARGET_X86_
-
- if (compSupports(InstructionSet_SSE2) || (compSupports(InstructionSet_SSE) && (baseType == TYP_FLOAT)))
- {
- op1 = impPopStack().val;
- retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, baseType, simdSize);
- }
- break;
- }
-
- case NI_Base_Vector128_ToScalar:
- {
- assert(sig->numArgs == 0);
- assert(sig->hasThis());
-
- if (compSupports(InstructionSet_SSE) && varTypeIsFloating(baseType))
- {
- op1 = impSIMDPopStack(getSIMDTypeForSize(simdSize), true, clsHnd);
- retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, baseType, 16);
- }
- break;
- }
-
- case NI_Base_Vector128_ToVector256:
- case NI_Base_Vector128_ToVector256Unsafe:
- case NI_Base_Vector256_GetLower:
- {
- assert(sig->numArgs == 0);
- assert(sig->hasThis());
-
- if (compSupports(InstructionSet_AVX))
- {
- op1 = impSIMDPopStack(getSIMDTypeForSize(simdSize), true, clsHnd);
- retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, baseType, simdSize);
- }
- break;
- }
-
- case NI_Base_Vector128_Zero:
- {
- assert(sig->numArgs == 0);
-
- if (compSupports(InstructionSet_SSE))
- {
- retNode = gtNewSimdHWIntrinsicNode(retType, intrinsic, baseType, simdSize);
- }
- break;
- }
-
- case NI_Base_Vector256_CreateScalarUnsafe:
- {
- assert(sig->numArgs == 1);
-
-#ifdef _TARGET_X86_
- if (varTypeIsLong(baseType))
- {
- // TODO-XARCH-CQ: It may be beneficial to emit the movq
- // instruction, which takes a 64-bit memory address and
- // works on 32-bit x86 systems.
- break;
- }
-#endif // _TARGET_X86_
-
- if (compSupports(InstructionSet_AVX))
- {
- op1 = impPopStack().val;
- retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, baseType, simdSize);
- }
- break;
- }
-
- case NI_Base_Vector256_ToScalar:
- {
- assert(sig->numArgs == 0);
- assert(sig->hasThis());
-
- if (compSupports(InstructionSet_AVX) && varTypeIsFloating(baseType))
- {
- op1 = impSIMDPopStack(getSIMDTypeForSize(simdSize), true, clsHnd);
- retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, baseType, 32);
- }
- break;
- }
-
- case NI_Base_Vector256_Zero:
- {
- assert(sig->numArgs == 0);
-
- if (compSupports(InstructionSet_AVX))
- {
- retNode = gtNewSimdHWIntrinsicNode(retType, intrinsic, baseType, simdSize);
- }
- break;
- }
-
- case NI_Base_Vector256_WithElement:
- {
- if (!compSupports(InstructionSet_AVX))
- {
- // Using software fallback if JIT/hardware don't support AVX instructions and YMM registers
- return nullptr;
- }
- __fallthrough;
- }
- case NI_Base_Vector128_WithElement:
- {
- assert(sig->numArgs == 2);
- GenTree* indexOp = impStackTop(1).val;
- if (!compSupports(InstructionSet_SSE2) || !varTypeIsArithmetic(baseType) || !indexOp->OperIsConst())
- {
- // Using software fallback if
- // 1. JIT/hardware don't support SSE2 instructions
- // 2. baseType is not a numeric type (throw execptions)
- // 3. index is not a constant
- return nullptr;
- }
-
- switch (baseType)
- {
- // Using software fallback if baseType is not supported by hardware
- case TYP_BYTE:
- case TYP_UBYTE:
- case TYP_INT:
- case TYP_UINT:
- if (!compSupports(InstructionSet_SSE41))
- {
- return nullptr;
- }
- break;
- case TYP_LONG:
- case TYP_ULONG:
- if (!compSupports(InstructionSet_SSE41_X64))
- {
- return nullptr;
- }
- break;
- case TYP_DOUBLE:
- case TYP_FLOAT:
- case TYP_SHORT:
- case TYP_USHORT:
- // short/ushort/float/double is supported by SSE2
- break;
- default:
- unreached();
- break;
- }
-
- ssize_t imm8 = indexOp->AsIntCon()->IconValue();
- ssize_t cachedImm8 = imm8;
- ssize_t count = simdSize / genTypeSize(baseType);
-
- if (imm8 >= count || imm8 < 0)
- {
- // Using software fallback if index is out of range (throw exeception)
- return nullptr;
- }
-
- GenTree* valueOp = impPopStack().val;
- impPopStack();
- GenTree* vectorOp = impSIMDPopStack(getSIMDTypeForSize(simdSize), true, clsHnd);
-
- GenTree* clonedVectorOp = nullptr;
-
- if (simdSize == 32)
- {
- // Extract the half vector that will be modified
- assert(compSupports(InstructionSet_AVX));
-
- // copy `vectorOp` to accept the modified half vector
- vectorOp = impCloneExpr(vectorOp, &clonedVectorOp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
- nullptr DEBUGARG("Clone Vector for Vector256<T>.WithElement"));
-
- if (imm8 >= count / 2)
- {
- imm8 -= count / 2;
- vectorOp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, vectorOp, gtNewIconNode(1), NI_AVX_ExtractVector128,
- baseType, simdSize);
- }
- else
- {
- vectorOp =
- gtNewSimdHWIntrinsicNode(TYP_SIMD16, vectorOp, NI_Base_Vector256_GetLower, baseType, simdSize);
- }
- }
-
- GenTree* immNode = gtNewIconNode(imm8);
-
- switch (baseType)
- {
- case TYP_LONG:
- case TYP_ULONG:
- retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, vectorOp, valueOp, immNode, NI_SSE41_X64_Insert,
- baseType, 16);
- break;
-
- case TYP_FLOAT:
- {
- if (!compSupports(InstructionSet_SSE41))
- {
- // Emulate Vector128<float>.WithElement by SSE instructions
- if (imm8 == 0)
- {
- // vector.WithElement(0, value)
- // =>
- // movss xmm0, xmm1 (xmm0 = vector, xmm1 = value)
- valueOp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, valueOp,
- NI_Base_Vector128_CreateScalarUnsafe, TYP_FLOAT, 16);
- retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, vectorOp, valueOp, NI_SSE_MoveScalar,
- TYP_FLOAT, 16);
- }
- else if (imm8 == 1)
- {
- // vector.WithElement(1, value)
- // =>
- // shufps xmm1, xmm0, 0 (xmm0 = vector, xmm1 = value)
- // shufps xmm1, xmm0, 226
- GenTree* tmpOp =
- gtNewSimdHWIntrinsicNode(TYP_SIMD16, valueOp, NI_Base_Vector128_CreateScalarUnsafe,
- TYP_FLOAT, 16);
- GenTree* dupVectorOp = nullptr;
- vectorOp = impCloneExpr(vectorOp, &dupVectorOp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
- nullptr DEBUGARG("Clone Vector for Vector128<float>.WithElement"));
- tmpOp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmpOp, vectorOp, gtNewIconNode(0),
- NI_SSE_Shuffle, TYP_FLOAT, 16);
- retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmpOp, dupVectorOp, gtNewIconNode(226),
- NI_SSE_Shuffle, TYP_FLOAT, 16);
- }
- else
- {
- ssize_t controlBits1 = 0;
- ssize_t controlBits2 = 0;
- if (imm8 == 2)
- {
- controlBits1 = 48;
- controlBits2 = 132;
- }
- else
- {
- controlBits1 = 32;
- controlBits2 = 36;
- }
- // vector.WithElement(2, value)
- // =>
- // shufps xmm1, xmm0, 48 (xmm0 = vector, xmm1 = value)
- // shufps xmm0, xmm1, 132
- //
- // vector.WithElement(3, value)
- // =>
- // shufps xmm1, xmm0, 32 (xmm0 = vector, xmm1 = value)
- // shufps xmm0, xmm1, 36
- GenTree* tmpOp =
- gtNewSimdHWIntrinsicNode(TYP_SIMD16, valueOp, NI_Base_Vector128_CreateScalarUnsafe,
- TYP_FLOAT, 16);
- GenTree* dupVectorOp = nullptr;
- vectorOp = impCloneExpr(vectorOp, &dupVectorOp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
- nullptr DEBUGARG("Clone Vector for Vector128<float>.WithElement"));
- valueOp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, vectorOp, tmpOp, gtNewIconNode(controlBits1),
- NI_SSE_Shuffle, TYP_FLOAT, 16);
- retNode =
- gtNewSimdHWIntrinsicNode(TYP_SIMD16, valueOp, dupVectorOp, gtNewIconNode(controlBits2),
- NI_SSE_Shuffle, TYP_FLOAT, 16);
- }
- break;
- }
- else
- {
- valueOp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, valueOp, NI_Base_Vector128_CreateScalarUnsafe,
- TYP_FLOAT, 16);
- immNode->AsIntCon()->SetIconValue(imm8 * 16);
- __fallthrough;
- }
- }
-
- case TYP_BYTE:
- case TYP_UBYTE:
- case TYP_INT:
- case TYP_UINT:
- retNode =
- gtNewSimdHWIntrinsicNode(TYP_SIMD16, vectorOp, valueOp, immNode, NI_SSE41_Insert, baseType, 16);
- break;
-
- case TYP_SHORT:
- case TYP_USHORT:
- retNode =
- gtNewSimdHWIntrinsicNode(TYP_SIMD16, vectorOp, valueOp, immNode, NI_SSE2_Insert, baseType, 16);
- break;
-
- case TYP_DOUBLE:
- {
- // vector.WithElement(0, value)
- // =>
- // movsd xmm0, xmm1 (xmm0 = vector, xmm1 = value)
- //
- // vector.WithElement(1, value)
- // =>
- // unpcklpd xmm0, xmm1 (xmm0 = vector, xmm1 = value)
- valueOp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, valueOp, NI_Base_Vector128_CreateScalarUnsafe,
- TYP_DOUBLE, 16);
- NamedIntrinsic in = (imm8 == 0) ? NI_SSE2_MoveScalar : NI_SSE2_UnpackLow;
- retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, vectorOp, valueOp, in, TYP_DOUBLE, 16);
- break;
- }
-
- default:
- unreached();
- break;
- }
-
- if (simdSize == 32)
- {
- assert(clonedVectorOp);
- int upperOrLower = (cachedImm8 >= count / 2) ? 1 : 0;
- retNode = gtNewSimdHWIntrinsicNode(retType, clonedVectorOp, retNode, gtNewIconNode(upperOrLower),
- NI_AVX_InsertVector128, baseType, simdSize);
- }
-
- break;
- }
-
- case NI_Base_Vector256_GetElement:
- {
- if (!compSupports(InstructionSet_AVX))
- {
- // Using software fallback if JIT/hardware don't support AVX instructions and YMM registers
- return nullptr;
- }
- __fallthrough;
- }
- case NI_Base_Vector128_GetElement:
- {
- assert(sig->numArgs == 1);
- GenTree* indexOp = impStackTop().val;
- if (!compSupports(InstructionSet_SSE2) || !varTypeIsArithmetic(baseType) || !indexOp->OperIsConst())
- {
- // Using software fallback if
- // 1. JIT/hardware don't support SSE2 instructions
- // 2. baseType is not a numeric type (throw execptions)
- // 3. index is not a constant
- return nullptr;
- }
-
- switch (baseType)
- {
- // Using software fallback if baseType is not supported by hardware
- case TYP_BYTE:
- case TYP_UBYTE:
- case TYP_INT:
- case TYP_UINT:
- if (!compSupports(InstructionSet_SSE41))
- {
- return nullptr;
- }
- break;
- case TYP_LONG:
- case TYP_ULONG:
- if (!compSupports(InstructionSet_SSE41_X64))
- {
- return nullptr;
- }
- break;
- case TYP_DOUBLE:
- case TYP_FLOAT:
- case TYP_SHORT:
- case TYP_USHORT:
- // short/ushort/float/double is supported by SSE2
- break;
- default:
- break;
- }
-
- ssize_t imm8 = indexOp->AsIntCon()->IconValue();
- ssize_t count = simdSize / genTypeSize(baseType);
-
- if (imm8 >= count || imm8 < 0)
- {
- // Using software fallback if index is out of range (throw exeception)
- return nullptr;
- }
-
- impPopStack();
- GenTree* vectorOp = impSIMDPopStack(getSIMDTypeForSize(simdSize), true, clsHnd);
- NamedIntrinsic resIntrinsic = NI_Illegal;
-
- if (simdSize == 32)
- {
- assert(compSupports(InstructionSet_AVX));
- if (imm8 >= count / 2)
- {
- imm8 -= count / 2;
- vectorOp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, vectorOp, gtNewIconNode(1), NI_AVX_ExtractVector128,
- baseType, simdSize);
- }
- else
- {
- vectorOp =
- gtNewSimdHWIntrinsicNode(TYP_SIMD16, vectorOp, NI_Base_Vector256_GetLower, baseType, simdSize);
- }
- }
-
- if (imm8 == 0 && (genTypeSize(baseType) >= 4))
- {
- switch (baseType)
- {
- case TYP_LONG:
- resIntrinsic = NI_SSE2_X64_ConvertToInt64;
- break;
- case TYP_ULONG:
- resIntrinsic = NI_SSE2_X64_ConvertToUInt64;
- break;
- case TYP_INT:
- resIntrinsic = NI_SSE2_ConvertToInt32;
- break;
- case TYP_UINT:
- resIntrinsic = NI_SSE2_ConvertToUInt32;
- break;
- case TYP_FLOAT:
- case TYP_DOUBLE:
- resIntrinsic = NI_Base_Vector128_ToScalar;
- break;
- default:
- unreached();
- }
- return gtNewSimdHWIntrinsicNode(retType, vectorOp, resIntrinsic, baseType, 16);
- }
-
- GenTree* immNode = gtNewIconNode(imm8);
-
- switch (baseType)
- {
- case TYP_LONG:
- case TYP_ULONG:
- retNode = gtNewSimdHWIntrinsicNode(retType, vectorOp, immNode, NI_SSE41_X64_Extract, baseType, 16);
- break;
-
- case TYP_FLOAT:
- {
- if (!compSupports(InstructionSet_SSE41))
- {
- assert(imm8 >= 1);
- assert(imm8 <= 3);
- // Emulate Vector128<float>.GetElement(i) by SSE instructions
- // vector.GetElement(i)
- // =>
- // shufps xmm0, xmm0, control
- // (xmm0 = vector, control = i + 228)
- immNode->AsIntCon()->SetIconValue(228 + imm8);
- GenTree* clonedVectorOp = nullptr;
- vectorOp = impCloneExpr(vectorOp, &clonedVectorOp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
- nullptr DEBUGARG("Clone Vector for Vector128<float>.GetElement"));
- vectorOp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, vectorOp, clonedVectorOp, immNode,
- NI_SSE_Shuffle, TYP_FLOAT, 16);
- return gtNewSimdHWIntrinsicNode(retType, vectorOp, NI_Base_Vector128_ToScalar, TYP_FLOAT, 16);
- }
- __fallthrough;
- }
- case TYP_UBYTE:
- case TYP_INT:
- case TYP_UINT:
- retNode = gtNewSimdHWIntrinsicNode(retType, vectorOp, immNode, NI_SSE41_Extract, baseType, 16);
- break;
-
- case TYP_BYTE:
- // We do not have SSE41/SSE2 Extract APIs on signed small int, so need a CAST on the result
- retNode = gtNewSimdHWIntrinsicNode(TYP_UBYTE, vectorOp, immNode, NI_SSE41_Extract, TYP_UBYTE, 16);
- retNode = gtNewCastNode(TYP_INT, retNode, true, TYP_BYTE);
- break;
-
- case TYP_SHORT:
- case TYP_USHORT:
- // We do not have SSE41/SSE2 Extract APIs on signed small int, so need a CAST on the result
- retNode = gtNewSimdHWIntrinsicNode(TYP_USHORT, vectorOp, immNode, NI_SSE2_Extract, TYP_USHORT, 16);
- if (baseType == TYP_SHORT)
- {
- retNode = gtNewCastNode(TYP_INT, retNode, true, TYP_SHORT);
- }
- break;
-
- case TYP_DOUBLE:
- assert(imm8 == 1);
- // vector.GetElement(1)
- // =>
- // pshufd xmm1, xmm0, 0xEE (xmm0 = vector)
- vectorOp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, vectorOp, gtNewIconNode(0xEE), NI_SSE2_Shuffle,
- TYP_INT, 16);
- retNode =
- gtNewSimdHWIntrinsicNode(TYP_DOUBLE, vectorOp, NI_Base_Vector128_ToScalar, TYP_DOUBLE, 16);
- break;
-
- default:
- unreached();
- }
-
- break;
- }
-
-#endif // _TARGET_XARCH_
-
- default:
- {
- unreached();
- break;
- }
- }
-
- return retNode;
-}
-#endif // FEATURE_HW_INTRINSICS
-
GenTree* Compiler::impMathIntrinsic(CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
var_types callType,
@@ -4979,7 +4284,7 @@ NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method)
{
className += 2;
- if (strcmp(className, "`1") == 0)
+ if (className[0] == '\0')
{
if (strncmp(methodName, "As", 2) == 0)
{
@@ -5026,18 +4331,8 @@ NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method)
{
className += 3;
-#if defined(_TARGET_XARCH_)
if (className[0] == '\0')
{
- if (strcmp(methodName, "CreateScalarUnsafe") == 0)
- {
- result = NI_Base_Vector128_CreateScalarUnsafe;
- }
- }
- else
-#endif // _TARGET_XARCH_
- if (strcmp(className, "`1") == 0)
- {
if (strncmp(methodName, "As", 2) == 0)
{
methodName += 2;
@@ -5088,17 +4383,13 @@ NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method)
}
}
#if defined(_TARGET_XARCH_)
- else if (strcmp(methodName, "GetElement") == 0)
- {
- result = NI_Base_Vector128_GetElement;
- }
- else if (strcmp(methodName, "WithElement") == 0)
+ else if (strcmp(methodName, "CreateScalarUnsafe") == 0)
{
- result = NI_Base_Vector128_WithElement;
+ result = NI_Base_Vector128_CreateScalarUnsafe;
}
- else if (strcmp(methodName, "get_Zero") == 0)
+ else if (strcmp(methodName, "GetElement") == 0)
{
- result = NI_Base_Vector128_Zero;
+ result = NI_Base_Vector128_GetElement;
}
else if (strncmp(methodName, "To", 2) == 0)
{
@@ -5122,8 +4413,21 @@ NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method)
}
}
}
+ else if (strcmp(methodName, "WithElement") == 0)
+ {
+ result = NI_Base_Vector128_WithElement;
+ }
#endif // _TARGET_XARCH_
}
+#if defined(_TARGET_XARCH_)
+ else if (strcmp(className, "`1") == 0)
+ {
+ if (strcmp(methodName, "get_Zero") == 0)
+ {
+ result = NI_Base_Vector128_Zero;
+ }
+ }
+#endif // _TARGET_XARCH_
}
#if defined(_TARGET_XARCH_)
else if (strncmp(className, "256", 3) == 0)
@@ -5132,13 +4436,6 @@ NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method)
if (className[0] == '\0')
{
- if (strcmp(methodName, "CreateScalarUnsafe") == 0)
- {
- result = NI_Base_Vector256_CreateScalarUnsafe;
- }
- }
- else if (strcmp(className, "`1") == 0)
- {
if (strncmp(methodName, "As", 2) == 0)
{
methodName += 2;
@@ -5188,25 +4485,32 @@ NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method)
result = NI_Base_Vector256_AsUInt64;
}
}
- else if (strcmp(methodName, "get_Zero") == 0)
+ else if (strcmp(methodName, "CreateScalarUnsafe") == 0)
{
- result = NI_Base_Vector256_Zero;
+ result = NI_Base_Vector256_CreateScalarUnsafe;
+ }
+ else if (strcmp(methodName, "GetElement") == 0)
+ {
+ result = NI_Base_Vector256_GetElement;
}
else if (strcmp(methodName, "GetLower") == 0)
{
result = NI_Base_Vector256_GetLower;
}
- else if (strcmp(methodName, "GetElement") == 0)
+ else if (strcmp(methodName, "ToScalar") == 0)
{
- result = NI_Base_Vector256_GetElement;
+ result = NI_Base_Vector256_ToScalar;
}
else if (strcmp(methodName, "WithElement") == 0)
{
result = NI_Base_Vector256_WithElement;
}
- else if (strcmp(methodName, "ToScalar") == 0)
+ }
+ else if (strcmp(className, "`1") == 0)
+ {
+ if (strcmp(methodName, "get_Zero") == 0)
{
- result = NI_Base_Vector256_ToScalar;
+ result = NI_Base_Vector256_Zero;
}
}
}