summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTanner Gooding <tagoo@outlook.com>2018-01-29 16:01:35 -0800
committerTanner Gooding <tagoo@outlook.com>2018-01-29 16:01:35 -0800
commita56f02b94f538396f82d1a815a97eb2091e9c761 (patch)
tree943a3242da9f06e6c717f3a76f0b8e946ef9b09e
parentb3eba0cde4960c5aae6102e655e9eb319e1b75fd (diff)
downloadcoreclr-a56f02b94f538396f82d1a815a97eb2091e9c761.tar.gz
coreclr-a56f02b94f538396f82d1a815a97eb2091e9c761.tar.bz2
coreclr-a56f02b94f538396f82d1a815a97eb2091e9c761.zip
Moving the listed instruction for the HWIntrinsics to its own line
-rw-r--r--src/mscorlib/src/System/Runtime/Intrinsics/X86/Avx.PlatformNotSupported.cs735
-rw-r--r--src/mscorlib/src/System/Runtime/Intrinsics/X86/Avx.cs735
-rw-r--r--src/mscorlib/src/System/Runtime/Intrinsics/X86/Avx2.PlatformNotSupported.cs1074
-rw-r--r--src/mscorlib/src/System/Runtime/Intrinsics/X86/Avx2.cs1074
-rw-r--r--src/mscorlib/src/System/Runtime/Intrinsics/X86/Bmi2.cs2
-rw-r--r--src/mscorlib/src/System/Runtime/Intrinsics/X86/Fma.PlatformNotSupported.cs96
-rw-r--r--src/mscorlib/src/System/Runtime/Intrinsics/X86/Fma.cs96
-rw-r--r--src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse.PlatformNotSupported.cs285
-rw-r--r--src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse.cs285
-rw-r--r--src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse2.PlatformNotSupported.cs978
-rw-r--r--src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse2.cs978
-rw-r--r--src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse3.PlatformNotSupported.cs30
-rw-r--r--src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse3.cs30
-rw-r--r--src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse41.PlatformNotSupported.cs297
-rw-r--r--src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse41.cs297
-rw-r--r--src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse42.PlatformNotSupported.cs207
-rw-r--r--src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse42.cs207
-rw-r--r--src/mscorlib/src/System/Runtime/Intrinsics/X86/Ssse3.PlatformNotSupported.cs48
-rw-r--r--src/mscorlib/src/System/Runtime/Intrinsics/X86/Ssse3.cs48
19 files changed, 5001 insertions, 2501 deletions
diff --git a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Avx.PlatformNotSupported.cs b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Avx.PlatformNotSupported.cs
index 7a622da668..7037e6e870 100644
--- a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Avx.PlatformNotSupported.cs
+++ b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Avx.PlatformNotSupported.cs
@@ -16,637 +16,791 @@ namespace System.Runtime.Intrinsics.X86
public static bool IsSupported { get { return false; } }
/// <summary>
- /// __m256 _mm256_add_ps (__m256 a, __m256 b); VADDPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_add_ps (__m256 a, __m256 b);
+ /// VADDPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> Add(Vector256<float> left, Vector256<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_add_pd (__m256d a, __m256d b); VADDPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_add_pd (__m256d a, __m256d b);
+ /// VADDPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> Add(Vector256<double> left, Vector256<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_addsub_ps (__m256 a, __m256 b); VADDSUBPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_addsub_ps (__m256 a, __m256 b);
+ /// VADDSUBPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> AddSubtract(Vector256<float> left, Vector256<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_addsub_pd (__m256d a, __m256d b); VADDSUBPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_addsub_pd (__m256d a, __m256d b);
+ /// VADDSUBPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> AddSubtract(Vector256<double> left, Vector256<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_and_ps (__m256 a, __m256 b); VANDPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_and_ps (__m256 a, __m256 b);
+ /// VANDPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> And(Vector256<float> left, Vector256<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_and_pd (__m256d a, __m256d b); VANDPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_and_pd (__m256d a, __m256d b);
+ /// VANDPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> And(Vector256<double> left, Vector256<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_andnot_ps (__m256 a, __m256 b); VANDNPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_andnot_ps (__m256 a, __m256 b);
+ /// VANDNPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> AndNot(Vector256<float> left, Vector256<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_andnot_pd (__m256d a, __m256d b); VANDNPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_andnot_pd (__m256d a, __m256d b);
+ /// VANDNPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> AndNot(Vector256<double> left, Vector256<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_blend_ps (__m256 a, __m256 b, const int imm8); VBLENDPS ymm, ymm, ymm/m256, imm8
+ /// __m256 _mm256_blend_ps (__m256 a, __m256 b, const int imm8);
+ /// VBLENDPS ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<float> Blend(Vector256<float> left, Vector256<float> right, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_blend_pd (__m256d a, __m256d b, const int imm8); VBLENDPD ymm, ymm, ymm/m256, imm8
+ /// __m256d _mm256_blend_pd (__m256d a, __m256d b, const int imm8);
+ /// VBLENDPD ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<double> Blend(Vector256<double> left, Vector256<double> right, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_blendv_ps (__m256 a, __m256 b, __m256 mask); VBLENDVPS ymm, ymm, ymm/m256, ymm
+ /// __m256 _mm256_blendv_ps (__m256 a, __m256 b, __m256 mask);
+ /// VBLENDVPS ymm, ymm, ymm/m256, ymm
/// </summary>
public static Vector256<float> BlendVariable(Vector256<float> left, Vector256<float> right, Vector256<float> mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_blendv_pd (__m256d a, __m256d b, __m256d mask); VBLENDVPD ymm, ymm, ymm/m256, ymm
+ /// __m256d _mm256_blendv_pd (__m256d a, __m256d b, __m256d mask);
+ /// VBLENDVPD ymm, ymm, ymm/m256, ymm
/// </summary>
public static Vector256<double> BlendVariable(Vector256<double> left, Vector256<double> right, Vector256<double> mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_broadcast_ss (float const * mem_addr); VBROADCASTSS xmm, m32
+ /// __m128 _mm_broadcast_ss (float const * mem_addr);
+ /// VBROADCASTSS xmm, m32
/// </summary>
public static Vector128<float> BroadcastElementToVector128(float* source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_broadcast_ss (float const * mem_addr); VBROADCASTSS ymm, m32
+ /// __m256 _mm256_broadcast_ss (float const * mem_addr);
+ /// VBROADCASTSS ymm, m32
/// </summary>
public static Vector256<float> BroadcastElementToVector256(float* source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_broadcast_sd (double const * mem_addr); VBROADCASTSD ymm, m64
+ /// __m256d _mm256_broadcast_sd (double const * mem_addr);
+ /// VBROADCASTSD ymm, m64
/// </summary>
public static Vector256<double> BroadcastElementToVector256(double* source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_broadcast_ps (__m128 const * mem_addr); VBROADCASTF128, ymm, m128
+ /// __m256 _mm256_broadcast_ps (__m128 const * mem_addr);
+ /// VBROADCASTF128, ymm, m128
/// </summary>
public static unsafe Vector256<float> BroadcastVector128ToVector256(float* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_broadcast_pd (__m128d const * mem_addr); VBROADCASTF128, ymm, m128
+ /// __m256d _mm256_broadcast_pd (__m128d const * mem_addr);
+ /// VBROADCASTF128, ymm, m128
/// </summary>
public static unsafe Vector256<double> BroadcastVector128ToVector256(double* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_ceil_ps (__m256 a); VROUNDPS ymm, ymm/m256, imm8(10)
+ /// __m256 _mm256_ceil_ps (__m256 a);
+ /// VROUNDPS ymm, ymm/m256, imm8(10)
/// </summary>
public static Vector256<float> Ceiling(Vector256<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_ceil_pd (__m256d a); VROUNDPD ymm, ymm/m256, imm8(10)
+ /// __m256d _mm256_ceil_pd (__m256d a);
+ /// VROUNDPD ymm, ymm/m256, imm8(10)
/// </summary>
public static Vector256<double> Ceiling(Vector256<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_cmp_ps (__m128 a, __m128 b, const int imm8); VCMPPS xmm, xmm, xmm/m128, imm8
+ /// __m128 _mm_cmp_ps (__m128 a, __m128 b, const int imm8);
+ /// VCMPPS xmm, xmm, xmm/m128, imm8
/// </summary>
public static Vector128<float> Compare(Vector128<float> left, Vector128<float> right, FloatComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_cmp_pd (__m128d a, __m128d b, const int imm8); VCMPPD xmm, xmm, xmm/m128, imm8
+ /// __m128d _mm_cmp_pd (__m128d a, __m128d b, const int imm8);
+ /// VCMPPD xmm, xmm, xmm/m128, imm8
/// </summary>
public static Vector128<double> Compare(Vector128<double> left, Vector128<double> right, FloatComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_cmp_ps (__m256 a, __m256 b, const int imm8); VCMPPS ymm, ymm, ymm/m256, imm8
+ /// __m256 _mm256_cmp_ps (__m256 a, __m256 b, const int imm8);
+ /// VCMPPS ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<float> Compare(Vector256<float> left, Vector256<float> right, FloatComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_cmp_pd (__m256d a, __m256d b, const int imm8); VCMPPD ymm, ymm, ymm/m256, imm8
+ /// __m256d _mm256_cmp_pd (__m256d a, __m256d b, const int imm8);
+ /// VCMPPD ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<double> Compare(Vector256<double> left, Vector256<double> right, FloatComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_cmp_sd (__m128d a, __m128d b, const int imm8); VCMPSS xmm, xmm, xmm/m32, imm8
+ /// __m128d _mm_cmp_sd (__m128d a, __m128d b, const int imm8);
+ /// VCMPSS xmm, xmm, xmm/m32, imm8
/// </summary>
public static Vector128<double> CompareScalar(Vector128<double> left, Vector128<double> right, FloatComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_cmp_ss (__m128 a, __m128 b, const int imm8); VCMPSD xmm, xmm, xmm/m64, imm8
+ /// __m128 _mm_cmp_ss (__m128 a, __m128 b, const int imm8);
+ /// VCMPSD xmm, xmm, xmm/m64, imm8
/// </summary>
public static Vector128<float> CompareScalar(Vector128<float> left, Vector128<float> right, FloatComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// float _mm256_cvtss_f32 (__m256 a); HELPER: VMOVSS
+ /// float _mm256_cvtss_f32 (__m256 a);
+ /// HELPER: VMOVSS
/// </summary>
public static float ConvertToSingle(Vector256<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm256_cvtpd_epi32 (__m256d a); VCVTPD2DQ xmm, ymm/m256
+ /// __m128i _mm256_cvtpd_epi32 (__m256d a);
+ /// VCVTPD2DQ xmm, ymm/m256
/// </summary>
public static Vector128<int> ConvertToVector128Int32(Vector256<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm256_cvtpd_ps (__m256d a); VCVTPD2PS xmm, ymm/m256
+ /// __m128 _mm256_cvtpd_ps (__m256d a);
+ /// VCVTPD2PS xmm, ymm/m256
/// </summary>
public static Vector128<float> ConvertToVector128Single(Vector256<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_cvtps_epi32 (__m256 a); VCVTPS2DQ ymm, ymm/m256
+ /// __m256i _mm256_cvtps_epi32 (__m256 a);
+ /// VCVTPS2DQ ymm, ymm/m256
/// </summary>
public static Vector256<int> ConvertToVector256Int32(Vector256<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_cvtepi32_ps (__m256i a); VCVTDQ2PS ymm, ymm/m256
+ /// __m256 _mm256_cvtepi32_ps (__m256i a);
+ /// VCVTDQ2PS ymm, ymm/m256
/// </summary>
public static Vector256<float> ConvertToVector256Single(Vector256<int> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_cvtps_pd (__m128 a); VCVTPS2PD ymm, xmm/m128
+ /// __m256d _mm256_cvtps_pd (__m128 a);
+ /// VCVTPS2PD ymm, xmm/m128
/// </summary>
public static Vector256<double> ConvertToVector256Double(Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_cvtepi32_pd (__m128i a); VCVTDQ2PD ymm, xmm/m128
+ /// __m256d _mm256_cvtepi32_pd (__m128i a);
+ /// VCVTDQ2PD ymm, xmm/m128
/// </summary>
public static Vector256<double> ConvertToVector256Double(Vector128<int> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm256_cvttpd_epi32 (__m256d a); VCVTTPD2DQ xmm, ymm/m256
+ /// __m128i _mm256_cvttpd_epi32 (__m256d a);
+ /// VCVTTPD2DQ xmm, ymm/m256
/// </summary>
public static Vector128<int> ConvertToVector128Int32WithTruncation(Vector256<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_cvttps_epi32 (__m256 a); VCVTTPS2DQ ymm, ymm/m256
+ /// __m256i _mm256_cvttps_epi32 (__m256 a);
+ /// VCVTTPS2DQ ymm, ymm/m256
/// </summary>
public static Vector256<int> ConvertToVector256Int32WithTruncation(Vector256<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_div_ps (__m256 a, __m256 b); VDIVPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_div_ps (__m256 a, __m256 b);
+ /// VDIVPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> Divide(Vector256<float> left, Vector256<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_div_pd (__m256d a, __m256d b); VDIVPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_div_pd (__m256d a, __m256d b);
+ /// VDIVPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> Divide(Vector256<double> left, Vector256<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_dp_ps (__m256 a, __m256 b, const int imm8); VDPPS ymm, ymm, ymm/m256, imm8
+ /// __m256 _mm256_dp_ps (__m256 a, __m256 b, const int imm8);
+ /// VDPPS ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<float> DotProduct(Vector256<float> left, Vector256<float> right, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_moveldup_ps (__m256 a); VMOVSLDUP ymm, ymm/m256
+ /// __m256 _mm256_moveldup_ps (__m256 a);
+ /// VMOVSLDUP ymm, ymm/m256
/// </summary>
public static Vector256<float> DuplicateEvenIndexed(Vector256<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_movedup_pd (__m256d a); VMOVDDUP ymm, ymm/m256
+ /// __m256d _mm256_movedup_pd (__m256d a);
+ /// VMOVDDUP ymm, ymm/m256
/// </summary>
public static Vector256<double> DuplicateEvenIndexed(Vector256<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_movehdup_ps (__m256 a); VMOVSHDUP ymm, ymm/m256
+ /// __m256 _mm256_movehdup_ps (__m256 a);
+ /// VMOVSHDUP ymm, ymm/m256
/// </summary>
public static Vector256<float> DuplicateOddIndexed(Vector256<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __int8 _mm256_extract_epi8 (__m256i a, const int index); HELPER
+ /// __int8 _mm256_extract_epi8 (__m256i a, const int index);
+ /// HELPER
/// </summary>
public static sbyte Extract(Vector256<sbyte> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __int8 _mm256_extract_epi8 (__m256i a, const int index); HELPER
+ /// __int8 _mm256_extract_epi8 (__m256i a, const int index);
+ /// HELPER
/// </summary>
public static byte Extract(Vector256<byte> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __int16 _mm256_extract_epi16 (__m256i a, const int index); HELPER
+ /// __int16 _mm256_extract_epi16 (__m256i a, const int index);
+ /// HELPER
/// </summary>
public static short Extract(Vector256<short> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __int16 _mm256_extract_epi16 (__m256i a, const int index); HELPER
+ /// __int16 _mm256_extract_epi16 (__m256i a, const int index);
+ /// HELPER
/// </summary>
public static ushort Extract(Vector256<ushort> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __int32 _mm256_extract_epi32 (__m256i a, const int index); HELPER
+ /// __int32 _mm256_extract_epi32 (__m256i a, const int index);
+ /// HELPER
/// </summary>
public static int Extract(Vector256<int> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __int32 _mm256_extract_epi32 (__m256i a, const int index); HELPER
+ /// __int32 _mm256_extract_epi32 (__m256i a, const int index);
+ /// HELPER
/// </summary>
public static uint Extract(Vector256<uint> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __int64 _mm256_extract_epi64 (__m256i a, const int index); HELPER
+ /// __int64 _mm256_extract_epi64 (__m256i a, const int index);
+ /// HELPER
/// </summary>
public static long Extract(Vector256<long> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __int64 _mm256_extract_epi64 (__m256i a, const int index); HELPER
+ /// __int64 _mm256_extract_epi64 (__m256i a, const int index);
+ /// HELPER
/// </summary>
public static ulong Extract(Vector256<ulong> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm256_extractf128_ps (__m256 a, const int imm8); VEXTRACTF128 xmm/m128, ymm, imm8
- /// __m128d _mm256_extractf128_pd (__m256d a, const int imm8); VEXTRACTF128 xmm/m128, ymm, imm8
- /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8); VEXTRACTF128 xmm/m128, ymm, imm8
+ /// __m128 _mm256_extractf128_ps (__m256 a, const int imm8);
+ /// VEXTRACTF128 xmm/m128, ymm, imm8
+ /// __m128d _mm256_extractf128_pd (__m256d a, const int imm8);
+ /// VEXTRACTF128 xmm/m128, ymm, imm8
+ /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTF128 xmm/m128, ymm, imm8
/// </summary>
public static Vector128<T> ExtractVector128<T>(Vector256<T> value, byte index) where T : struct { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8); VEXTRACTF128 m128, ymm, imm8
+ /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTF128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(byte* address, Vector256<byte> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8); VEXTRACTF128 m128, ymm, imm8
+ /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTF128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(sbyte* address, Vector256<sbyte> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8); VEXTRACTF128 m128, ymm, imm8
+ /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTF128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(short* address, Vector256<short> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8); VEXTRACTF128 m128, ymm, imm8
+ /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTF128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(ushort* address, Vector256<ushort> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8); VEXTRACTF128 m128, ymm, imm8
+ /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTF128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(int* address, Vector256<int> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8); VEXTRACTF128 m128, ymm, imm8
+ /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTF128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(uint* address, Vector256<uint> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8); VEXTRACTF128 m128, ymm, imm8
+ /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTF128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(long* address, Vector256<long> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8); VEXTRACTF128 m128, ymm, imm8
+ /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTF128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(ulong* address, Vector256<ulong> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm256_extractf128_ps (__m256 a, const int imm8); VEXTRACTF128 m128, ymm, imm8
+ /// __m128 _mm256_extractf128_ps (__m256 a, const int imm8);
+ /// VEXTRACTF128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(float* address, Vector256<float> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm256_extractf128_pd (__m256d a, const int imm8); VEXTRACTF128 m128, ymm, imm8
+ /// __m128d _mm256_extractf128_pd (__m256d a, const int imm8);
+ /// VEXTRACTF128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(double* address, Vector256<double> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_castpd128_pd256 (__m128d a); HELPER - No Codegen
- /// __m256 _mm256_castps128_ps256 (__m128 a); HELPER - No Codegen
- /// __m256i _mm256_castsi128_si256 (__m128i a); HELPER - No Codegen
+ /// __m256d _mm256_castpd128_pd256 (__m128d a);
+ /// HELPER - No Codegen
+ /// __m256 _mm256_castps128_ps256 (__m128 a);
+ /// HELPER - No Codegen
+ /// __m256i _mm256_castsi128_si256 (__m128i a);
+ /// HELPER - No Codegen
/// </summary>
public static Vector256<T> ExtendToVector256<T>(Vector128<T> value) where T : struct { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_floor_ps (__m256 a); VROUNDPS ymm, ymm/m256, imm8(9)
+ /// __m256 _mm256_floor_ps (__m256 a);
+ /// VROUNDPS ymm, ymm/m256, imm8(9)
/// </summary>
public static Vector256<float> Floor(Vector256<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_floor_pd (__m256d a); VROUNDPS ymm, ymm/m256, imm8(9)
+ /// __m256d _mm256_floor_pd (__m256d a);
+ /// VROUNDPS ymm, ymm/m256, imm8(9)
/// </summary>
public static Vector256<double> Floor(Vector256<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm256_castpd256_pd128 (__m256d a); HELPER - No Codegen
- /// __m128 _mm256_castps256_ps128 (__m256 a); HELPER - No Codegen
- /// __m128i _mm256_castsi256_si128 (__m256i a); HELPER - No Codegen
+ /// __m128d _mm256_castpd256_pd128 (__m256d a);
+ /// HELPER - No Codegen
+ /// __m128 _mm256_castps256_ps128 (__m256 a);
+ /// HELPER - No Codegen
+ /// __m128i _mm256_castsi256_si128 (__m256i a);
+ /// HELPER - No Codegen
/// </summary>
public static Vector128<T> GetLowerHalf<T>(Vector256<T> value) where T : struct { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_hadd_ps (__m256 a, __m256 b); VHADDPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_hadd_ps (__m256 a, __m256 b);
+ /// VHADDPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> HorizontalAdd(Vector256<float> left, Vector256<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_hadd_pd (__m256d a, __m256d b); VHADDPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_hadd_pd (__m256d a, __m256d b);
+ /// VHADDPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> HorizontalAdd(Vector256<double> left, Vector256<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_hsub_ps (__m256 a, __m256 b); VHSUBPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_hsub_ps (__m256 a, __m256 b);
+ /// VHSUBPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> HorizontalSubtract(Vector256<float> left, Vector256<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_hsub_pd (__m256d a, __m256d b); VHSUBPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_hsub_pd (__m256d a, __m256d b);
+ /// VHSUBPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> HorizontalSubtract(Vector256<double> left, Vector256<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_insert_epi8 (__m256i a, __int8 i, const int index); HELPER
+ /// __m256i _mm256_insert_epi8 (__m256i a, __int8 i, const int index);
+ /// HELPER
/// </summary>
public static Vector256<sbyte> Insert(Vector256<sbyte> value, sbyte data, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_insert_epi8 (__m256i a, __int8 i, const int index); HELPER
+ /// __m256i _mm256_insert_epi8 (__m256i a, __int8 i, const int index);
+ /// HELPER
/// </summary>
public static Vector256<byte> Insert(Vector256<byte> value, byte data, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_insert_epi16 (__m256i a, __int16 i, const int index); HELPER
+ /// __m256i _mm256_insert_epi16 (__m256i a, __int16 i, const int index);
+ /// HELPER
/// </summary>
public static Vector256<short> Insert(Vector256<short> value, short data, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_insert_epi16 (__m256i a, __int16 i, const int index); HELPER
+ /// __m256i _mm256_insert_epi16 (__m256i a, __int16 i, const int index);
+ /// HELPER
/// </summary>
public static Vector256<ushort> Insert(Vector256<ushort> value, ushort data, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_insert_epi32 (__m256i a, __int32 i, const int index); HELPER
+ /// __m256i _mm256_insert_epi32 (__m256i a, __int32 i, const int index);
+ /// HELPER
/// </summary>
public static Vector256<int> Insert(Vector256<int> value, int data, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_insert_epi32 (__m256i a, __int32 i, const int index); HELPER
+ /// __m256i _mm256_insert_epi32 (__m256i a, __int32 i, const int index);
+ /// HELPER
/// </summary>
public static Vector256<uint> Insert(Vector256<uint> value, uint data, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_insert_epi64 (__m256i a, __int64 i, const int index); HELPER
+ /// __m256i _mm256_insert_epi64 (__m256i a, __int64 i, const int index);
+ /// HELPER
/// </summary>
public static Vector256<long> Insert(Vector256<long> value, long data, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_insert_epi64 (__m256i a, __int64 i, const int index); HELPER
+ /// __m256i _mm256_insert_epi64 (__m256i a, __int64 i, const int index);
+ /// HELPER
/// </summary>
public static Vector256<ulong> Insert(Vector256<ulong> value, ulong data, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_insertf128_ps (__m256 a, __m128 b, int imm8); VINSERTF128 ymm, ymm, xmm/m128, imm8
- /// __m256d _mm256_insertf128_pd (__m256d a, __m128d b, int imm8); VINSERTF128 ymm, ymm, xmm/m128, imm8
- /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8); VINSERTF128 ymm, ymm, xmm/m128, imm8
+ /// __m256 _mm256_insertf128_ps (__m256 a, __m128 b, int imm8);
+ /// VINSERTF128 ymm, ymm, xmm/m128, imm8
+ /// __m256d _mm256_insertf128_pd (__m256d a, __m128d b, int imm8);
+ /// VINSERTF128 ymm, ymm, xmm/m128, imm8
+ /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8);
+ /// VINSERTF128 ymm, ymm, xmm/m128, imm8
/// </summary>
public static Vector256<T> Insert<T>(Vector256<T> value, Vector128<T> data, byte index) where T : struct { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8); VINSERTF128 ymm, ymm, m128, imm8
+ /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8);
+ /// VINSERTF128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<sbyte> Insert(Vector256<sbyte> value, sbyte* address, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8); VINSERTF128 ymm, ymm, m128, imm8
+ /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8);
+ /// VINSERTF128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<byte> Insert(Vector256<byte> value, byte* address, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8); VINSERTF128 ymm, ymm, m128, imm8
+ /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8);
+ /// VINSERTF128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<short> Insert(Vector256<short> value, short* address, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8); VINSERTF128 ymm, ymm, m128, imm8
+ /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8);
+ /// VINSERTF128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<ushort> Insert(Vector256<ushort> value, ushort* address, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8); VINSERTF128 ymm, ymm, m128, imm8
+ /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8);
+ /// VINSERTF128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<int> Insert(Vector256<int> value, int* address, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8); VINSERTF128 ymm, ymm, m128, imm8
+ /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8);
+ /// VINSERTF128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<uint> Insert(Vector256<uint> value, uint* address, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8); VINSERTF128 ymm, ymm, m128, imm8
+ /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8);
+ /// VINSERTF128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<long> Insert(Vector256<long> value, long* address, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8); VINSERTF128 ymm, ymm, m128, imm8
+ /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8);
+ /// VINSERTF128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<ulong> Insert(Vector256<ulong> value, ulong* address, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_insertf128_ps (__m256 a, __m128 b, int imm8); VINSERTF128 ymm, ymm, m128, imm8
+ /// __m256 _mm256_insertf128_ps (__m256 a, __m128 b, int imm8);
+ /// VINSERTF128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<float> Insert(Vector256<float> value, float* address, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_insertf128_pd (__m256d a, __m128d b, int imm8); VINSERTF128 ymm, ymm, m128, imm8
+ /// __m256d _mm256_insertf128_pd (__m256d a, __m128d b, int imm8);
+ /// VINSERTF128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<double> Insert(Vector256<double> value, double* address, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr); VMOVDQU ymm, m256
+ /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr);
+ /// VMOVDQU ymm, m256
/// </summary>
public static unsafe Vector256<sbyte> LoadVector256(sbyte* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr); VMOVDQU ymm, m256
+ /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr);
+ /// VMOVDQU ymm, m256
/// </summary>
public static unsafe Vector256<byte> LoadVector256(byte* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr); VMOVDQU ymm, m256
+ /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr);
+ /// VMOVDQU ymm, m256
/// </summary>
public static unsafe Vector256<short> LoadVector256(short* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr); VMOVDQU ymm, m256
+ /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr);
+ /// VMOVDQU ymm, m256
/// </summary>
public static unsafe Vector256<ushort> LoadVector256(ushort* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr); VMOVDQU ymm, m256
+ /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr);
+ /// VMOVDQU ymm, m256
/// </summary>
public static unsafe Vector256<int> LoadVector256(int* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr); VMOVDQU ymm, m256
+ /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr);
+ /// VMOVDQU ymm, m256
/// </summary>
public static unsafe Vector256<uint> LoadVector256(uint* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr); VMOVDQU ymm, m256
+ /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr);
+ /// VMOVDQU ymm, m256
/// </summary>
public static unsafe Vector256<long> LoadVector256(long* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr); VMOVDQU ymm, m256
+ /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr);
+ /// VMOVDQU ymm, m256
/// </summary>
public static unsafe Vector256<ulong> LoadVector256(ulong* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_loadu_ps (float const * mem_addr); VMOVUPS ymm, ymm/m256
+ /// __m256 _mm256_loadu_ps (float const * mem_addr);
+ /// VMOVUPS ymm, ymm/m256
/// </summary>
public static unsafe Vector256<float> LoadVector256(float* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_loadu_pd (double const * mem_addr); VMOVUPD ymm, ymm/m256
+ /// __m256d _mm256_loadu_pd (double const * mem_addr);
+ /// VMOVUPD ymm, ymm/m256
/// </summary>
public static unsafe Vector256<double> LoadVector256(double* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_load_si256 (__m256i const * mem_addr); VMOVDQA ymm, m256
+ /// __m256i _mm256_load_si256 (__m256i const * mem_addr);
+ /// VMOVDQA ymm, m256
/// </summary>
public static unsafe Vector256<sbyte> LoadAlignedVector256(sbyte* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_load_si256 (__m256i const * mem_addr); VMOVDQA ymm, m256
+ /// __m256i _mm256_load_si256 (__m256i const * mem_addr);
+ /// VMOVDQA ymm, m256
/// </summary>
public static unsafe Vector256<byte> LoadAlignedVector256(byte* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_load_si256 (__m256i const * mem_addr); VMOVDQA ymm, m256
+ /// __m256i _mm256_load_si256 (__m256i const * mem_addr);
+ /// VMOVDQA ymm, m256
/// </summary>
public static unsafe Vector256<short> LoadAlignedVector256(short* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_load_si256 (__m256i const * mem_addr); VMOVDQA ymm, m256
+ /// __m256i _mm256_load_si256 (__m256i const * mem_addr);
+ /// VMOVDQA ymm, m256
/// </summary>
public static unsafe Vector256<ushort> LoadAlignedVector256(ushort* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_load_si256 (__m256i const * mem_addr); VMOVDQA ymm, m256
+ /// __m256i _mm256_load_si256 (__m256i const * mem_addr);
+ /// VMOVDQA ymm, m256
/// </summary>
public static unsafe Vector256<int> LoadAlignedVector256(int* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_load_si256 (__m256i const * mem_addr); VMOVDQA ymm, m256
+ /// __m256i _mm256_load_si256 (__m256i const * mem_addr);
+ /// VMOVDQA ymm, m256
/// </summary>
public static unsafe Vector256<uint> LoadAlignedVector256(uint* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_load_si256 (__m256i const * mem_addr); VMOVDQA ymm, m256
+ /// __m256i _mm256_load_si256 (__m256i const * mem_addr);
+ /// VMOVDQA ymm, m256
/// </summary>
public static unsafe Vector256<long> LoadAlignedVector256(long* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_load_si256 (__m256i const * mem_addr); VMOVDQA ymm, m256
+ /// __m256i _mm256_load_si256 (__m256i const * mem_addr);
+ /// VMOVDQA ymm, m256
/// </summary>
public static unsafe Vector256<ulong> LoadAlignedVector256(ulong* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_load_ps (float const * mem_addr); VMOVAPS ymm, ymm/m256
+ /// __m256 _mm256_load_ps (float const * mem_addr);
+ /// VMOVAPS ymm, ymm/m256
/// </summary>
public static unsafe Vector256<float> LoadAlignedVector256(float* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_load_pd (double const * mem_addr); VMOVAPD ymm, ymm/m256
+ /// __m256d _mm256_load_pd (double const * mem_addr);
+ /// VMOVAPD ymm, ymm/m256
/// </summary>
public static unsafe Vector256<double> LoadAlignedVector256(double* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr); VLDDQU ymm, m256
+ /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr);
+ /// VLDDQU ymm, m256
/// </summary>
public static unsafe Vector256<sbyte> LoadDquVector256(sbyte* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr); VLDDQU ymm, m256
+ /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr);
+ /// VLDDQU ymm, m256
/// </summary>
public static unsafe Vector256<byte> LoadDquVector256(byte* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr); VLDDQU ymm, m256
+ /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr);
+ /// VLDDQU ymm, m256
/// </summary>
public static unsafe Vector256<short> LoadDquVector256(short* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr); VLDDQU ymm, m256
+ /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr);
+ /// VLDDQU ymm, m256
/// </summary>
public static unsafe Vector256<ushort> LoadDquVector256(ushort* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr); VLDDQU ymm, m256
+ /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr);
+ /// VLDDQU ymm, m256
/// </summary>
public static unsafe Vector256<int> LoadDquVector256(int* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr); VLDDQU ymm, m256
+ /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr);
+ /// VLDDQU ymm, m256
/// </summary>
public static unsafe Vector256<uint> LoadDquVector256(uint* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr); VLDDQU ymm, m256
+ /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr);
+ /// VLDDQU ymm, m256
/// </summary>
public static unsafe Vector256<long> LoadDquVector256(long* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr); VLDDQU ymm, m256
+ /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr);
+ /// VLDDQU ymm, m256
/// </summary>
public static unsafe Vector256<ulong> LoadDquVector256(ulong* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_maskload_ps (float const * mem_addr, __m128i mask); VMASKMOVPS xmm, xmm, m128
+ /// __m128 _mm_maskload_ps (float const * mem_addr, __m128i mask);
+ /// VMASKMOVPS xmm, xmm, m128
/// </summary>
public static unsafe Vector128<float> MaskLoad(float* address, Vector128<uint> mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_maskload_pd (double const * mem_addr, __m128i mask); VMASKMOVPD xmm, xmm, m128
+ /// __m128d _mm_maskload_pd (double const * mem_addr, __m128i mask);
+ /// VMASKMOVPD xmm, xmm, m128
/// </summary>
public static unsafe Vector128<double> MaskLoad(double* address, Vector128<ulong> mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_maskload_ps (float const * mem_addr, __m256i mask); VMASKMOVPS ymm, ymm, m256
+ /// __m256 _mm256_maskload_ps (float const * mem_addr, __m256i mask);
+ /// VMASKMOVPS ymm, ymm, m256
/// </summary>
public static unsafe Vector256<float> MaskLoad(float* address, Vector256<uint> mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_maskload_pd (double const * mem_addr, __m256i mask); VMASKMOVPD ymm, ymm, m256
+ /// __m256d _mm256_maskload_pd (double const * mem_addr, __m256i mask);
+ /// VMASKMOVPD ymm, ymm, m256
/// </summary>
public static unsafe Vector256<double> MaskLoad(double* address, Vector256<ulong> mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_maskstore_ps (float * mem_addr, __m128i mask, __m128 a); VMASKMOVPS m128, xmm, xmm
+ /// void _mm_maskstore_ps (float * mem_addr, __m128i mask, __m128 a);
+ /// VMASKMOVPS m128, xmm, xmm
/// </summary>
public static unsafe void MaskStore(float* address, Vector128<float> mask, Vector128<uint> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_maskstore_pd (double * mem_addr, __m128i mask, __m128d a); VMASKMOVPD m128, xmm, xmm
+ /// void _mm_maskstore_pd (double * mem_addr, __m128i mask, __m128d a);
+ /// VMASKMOVPD m128, xmm, xmm
/// </summary>
public static unsafe void MaskStore(double* address, Vector128<double> mask, Vector128<ulong> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_maskstore_ps (float * mem_addr, __m256i mask, __m256 a); VMASKMOVPS m256, ymm, ymm
+ /// void _mm256_maskstore_ps (float * mem_addr, __m256i mask, __m256 a);
+ /// VMASKMOVPS m256, ymm, ymm
/// </summary>
public static unsafe void MaskStore(float* address, Vector256<float> mask, Vector256<uint> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_maskstore_pd (double * mem_addr, __m256i mask, __m256d a); VMASKMOVPD m256, ymm, ymm
+ /// void _mm256_maskstore_pd (double * mem_addr, __m256i mask, __m256d a);
+ /// VMASKMOVPD m256, ymm, ymm
/// </summary>
public static unsafe void MaskStore(double* address, Vector256<double> mask, Vector256<ulong> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_max_ps (__m256 a, __m256 b); VMAXPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_max_ps (__m256 a, __m256 b);
+ /// VMAXPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> Max(Vector256<float> left, Vector256<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_max_pd (__m256d a, __m256d b); VMAXPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_max_pd (__m256d a, __m256d b);
+ /// VMAXPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> Max(Vector256<double> left, Vector256<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_min_ps (__m256 a, __m256 b); VMINPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_min_ps (__m256 a, __m256 b);
+ /// VMINPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> Min(Vector256<float> left, Vector256<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_min_pd (__m256d a, __m256d b); VMINPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_min_pd (__m256d a, __m256d b);
+ /// VMINPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> Min(Vector256<double> left, Vector256<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm256_movemask_ps (__m256 a); VMOVMSKPS reg, ymm
+ /// int _mm256_movemask_ps (__m256 a);
+ /// VMOVMSKPS reg, ymm
/// </summary>
public static int MoveMask(Vector256<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm256_movemask_pd (__m256d a); VMOVMSKPD reg, ymm
+ /// int _mm256_movemask_pd (__m256d a);
+ /// VMOVMSKPD reg, ymm
/// </summary>
public static int MoveMask(Vector256<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_mul_ps (__m256 a, __m256 b); VMULPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_mul_ps (__m256 a, __m256 b);
+ /// VMULPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> Multiply(Vector256<float> left, Vector256<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_mul_pd (__m256d a, __m256d b); VMULPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_mul_pd (__m256d a, __m256d b);
+ /// VMULPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> Multiply(Vector256<double> left, Vector256<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_or_ps (__m256 a, __m256 b); VORPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_or_ps (__m256 a, __m256 b);
+ /// VORPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> Or(Vector256<float> left, Vector256<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_or_pd (__m256d a, __m256d b); VORPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_or_pd (__m256d a, __m256d b);
+ /// VORPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> Or(Vector256<double> left, Vector256<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_permute_ps (__m128 a, int imm8); VPERMILPS xmm, xmm, imm8
+ /// __m128 _mm_permute_ps (__m128 a, int imm8);
+ /// VPERMILPS xmm, xmm, imm8
/// </summary>
public static Vector128<float> Permute(Vector128<float> value, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_permute_pd (__m128d a, int imm8); VPERMILPD xmm, xmm, imm8
+ /// __m128d _mm_permute_pd (__m128d a, int imm8);
+ /// VPERMILPD xmm, xmm, imm8
/// </summary>
public static Vector128<double> Permute(Vector128<double> value, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_permute_ps (__m256 a, int imm8); VPERMILPS ymm, ymm, imm8
+ /// __m256 _mm256_permute_ps (__m256 a, int imm8);
+ /// VPERMILPS ymm, ymm, imm8
/// </summary>
public static Vector256<float> Permute(Vector256<float> value, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_permute_pd (__m256d a, int imm8); VPERMILPD ymm, ymm, imm8
+ /// __m256d _mm256_permute_pd (__m256d a, int imm8);
+ /// VPERMILPD ymm, ymm, imm8
/// </summary>
public static Vector256<double> Permute(Vector256<double> value, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_permute2f128_ps (__m256 a, __m256 b, int imm8); VPERM2F128 ymm, ymm, ymm/m256, imm8
- /// __m256d _mm256_permute2f128_pd (__m256d a, __m256d b, int imm8); VPERM2F128 ymm, ymm, ymm/m256, imm8
- /// __m256i _mm256_permute2f128_si256 (__m256i a, __m256i b, int imm8); VPERM2F128 ymm, ymm, ymm/m256, imm8
+ /// __m256 _mm256_permute2f128_ps (__m256 a, __m256 b, int imm8);
+ /// VPERM2F128 ymm, ymm, ymm/m256, imm8
+ /// __m256d _mm256_permute2f128_pd (__m256d a, __m256d b, int imm8);
+ /// VPERM2F128 ymm, ymm, ymm/m256, imm8
+ /// __m256i _mm256_permute2f128_si256 (__m256i a, __m256i b, int imm8);
+ /// VPERM2F128 ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<T> Permute2x128<T>(Vector256<T> left, Vector256<T> right, byte control) where T : struct { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_permutevar_ps (__m128 a, __m128i b); VPERMILPS xmm, xmm, xmm/m128
+ /// __m128 _mm_permutevar_ps (__m128 a, __m128i b);
+ /// VPERMILPS xmm, xmm, xmm/m128
/// </summary>
public static Vector128<float> PermuteVar(Vector128<float> left, Vector128<float> mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_permutevar_pd (__m128d a, __m128i b); VPERMILPD xmm, xmm, xmm/m128
+ /// __m128d _mm_permutevar_pd (__m128d a, __m128i b);
+ /// VPERMILPD xmm, xmm, xmm/m128
/// </summary>
public static Vector128<double> PermuteVar(Vector128<double> left, Vector128<double> mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_permutevar_ps (__m256 a, __m256i b); VPERMILPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_permutevar_ps (__m256 a, __m256i b);
+ /// VPERMILPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> PermuteVar(Vector256<float> left, Vector256<float> mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_permutevar_pd (__m256d a, __m256i b); VPERMILPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_permutevar_pd (__m256d a, __m256i b);
+ /// VPERMILPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> PermuteVar(Vector256<double> left, Vector256<double> mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_rcp_ps (__m256 a); VRCPPS ymm, ymm/m256
+ /// __m256 _mm256_rcp_ps (__m256 a);
+ /// VRCPPS ymm, ymm/m256
/// </summary>
public static Vector256<float> Reciprocal(Vector256<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_rsqrt_ps (__m256 a); VRSQRTPS ymm, ymm/m256
+ /// __m256 _mm256_rsqrt_ps (__m256 a);
+ /// VRSQRTPS ymm, ymm/m256
/// </summary>
public static Vector256<float> ReciprocalSqrt(Vector256<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_round_ps (__m256 a, int rounding); VROUNDPS ymm, ymm/m256, imm8(8)
+ /// __m256 _mm256_round_ps (__m256 a, int rounding);
+ /// VROUNDPS ymm, ymm/m256, imm8(8)
/// _MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC
/// </summary>
public static Vector256<float> RoundToNearestInteger(Vector256<float> value) { throw new PlatformNotSupportedException(); }
@@ -668,7 +822,8 @@ namespace System.Runtime.Intrinsics.X86
public static Vector256<float> RoundCurrentDirection(Vector256<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_round_pd (__m256d a, int rounding); VROUNDPD ymm, ymm/m256, imm8(8)
+ /// __m256d _mm256_round_pd (__m256d a, int rounding);
+ /// VROUNDPD ymm, ymm/m256, imm8(8)
/// _MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC
/// </summary>
public static Vector256<double> RoundToNearestInteger(Vector256<double> value) { throw new PlatformNotSupportedException(); }
@@ -690,318 +845,408 @@ namespace System.Runtime.Intrinsics.X86
public static Vector256<double> RoundCurrentDirection(Vector256<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_set_epi8 (char e31, char e30, char e29, char e28, char e27, char e26, char e25, char e24, char e23, char e22, char e21, char e20, char e19, char e18, char e17, char e16, char e15, char e14, char e13, char e12, char e11, char e10, char e9, char e8, char e7, char e6, char e5, char e4, char e3, char e2, char e1, char e0); HELPER
+ /// __m256i _mm256_set_epi8 (char e31, char e30, char e29, char e28, char e27, char e26, char e25, char e24, char e23, char e22, char e21, char e20, char e19, char e18, char e17, char e16, char e15, char e14, char e13, char e12, char e11, char e10, char e9, char e8, char e7, char e6, char e5, char e4, char e3, char e2, char e1, char e0);
+ /// HELPER
/// </summary>
public static Vector256<sbyte> SetVector256(sbyte e31, sbyte e30, sbyte e29, sbyte e28, sbyte e27, sbyte e26, sbyte e25, sbyte e24, sbyte e23, sbyte e22, sbyte e21, sbyte e20, sbyte e19, sbyte e18, sbyte e17, sbyte e16, sbyte e15, sbyte e14, sbyte e13, sbyte e12, sbyte e11, sbyte e10, sbyte e9, sbyte e8, sbyte e7, sbyte e6, sbyte e5, sbyte e4, sbyte e3, sbyte e2, sbyte e1, sbyte e0) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_set_epi8 (char e31, char e30, char e29, char e28, char e27, char e26, char e25, char e24, char e23, char e22, char e21, char e20, char e19, char e18, char e17, char e16, char e15, char e14, char e13, char e12, char e11, char e10, char e9, char e8, char e7, char e6, char e5, char e4, char e3, char e2, char e1, char e0); HELPER
+ /// __m256i _mm256_set_epi8 (char e31, char e30, char e29, char e28, char e27, char e26, char e25, char e24, char e23, char e22, char e21, char e20, char e19, char e18, char e17, char e16, char e15, char e14, char e13, char e12, char e11, char e10, char e9, char e8, char e7, char e6, char e5, char e4, char e3, char e2, char e1, char e0);
+ /// HELPER
/// </summary>
public static Vector256<byte> SetVector256(byte e31, byte e30, byte e29, byte e28, byte e27, byte e26, byte e25, byte e24, byte e23, byte e22, byte e21, byte e20, byte e19, byte e18, byte e17, byte e16, byte e15, byte e14, byte e13, byte e12, byte e11, byte e10, byte e9, byte e8, byte e7, byte e6, byte e5, byte e4, byte e3, byte e2, byte e1, byte e0) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_set_epi16 (short e15, short e14, short e13, short e12, short e11, short e10, short e9, short e8, short e7, short e6, short e5, short e4, short e3, short e2, short e1, short e0); HELPER
+ /// __m256i _mm256_set_epi16 (short e15, short e14, short e13, short e12, short e11, short e10, short e9, short e8, short e7, short e6, short e5, short e4, short e3, short e2, short e1, short e0);
+ /// HELPER
/// </summary>
public static Vector256<short> SetVector256(short e15, short e14, short e13, short e12, short e11, short e10, short e9, short e8, short e7, short e6, short e5, short e4, short e3, short e2, short e1, short e0) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_set_epi16 (short e15, short e14, short e13, short e12, short e11, short e10, short e9, short e8, short e7, short e6, short e5, short e4, short e3, short e2, short e1, short e0); HELPER
+ /// __m256i _mm256_set_epi16 (short e15, short e14, short e13, short e12, short e11, short e10, short e9, short e8, short e7, short e6, short e5, short e4, short e3, short e2, short e1, short e0);
+ /// HELPER
/// </summary>
public static Vector256<ushort> SetVector256(ushort e15, ushort e14, ushort e13, ushort e12, ushort e11, ushort e10, ushort e9, ushort e8, ushort e7, ushort e6, ushort e5, ushort e4, ushort e3, ushort e2, ushort e1, ushort e0) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_set_epi32 (int e7, int e6, int e5, int e4, int e3, int e2, int e1, int e0); HELPER
+ /// __m256i _mm256_set_epi32 (int e7, int e6, int e5, int e4, int e3, int e2, int e1, int e0);
+ /// HELPER
/// </summary>
public static Vector256<int> SetVector256(int e7, int e6, int e5, int e4, int e3, int e2, int e1, int e0) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_set_epi32 (int e7, int e6, int e5, int e4, int e3, int e2, int e1, int e0); HELPER
+ /// __m256i _mm256_set_epi32 (int e7, int e6, int e5, int e4, int e3, int e2, int e1, int e0);
+ /// HELPER
/// </summary>
public static Vector256<uint> SetVector256(uint e7, uint e6, uint e5, uint e4, uint e3, uint e2, uint e1, uint e0) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_set_epi64x (__int64 e3, __int64 e2, __int64 e1, __int64 e0); HELPER
+ /// __m256i _mm256_set_epi64x (__int64 e3, __int64 e2, __int64 e1, __int64 e0);
+ /// HELPER
/// </summary>
public static Vector256<long> SetVector256(long e3, long e2, long e1, long e0) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_set_epi64x (__int64 e3, __int64 e2, __int64 e1, __int64 e0); HELPER
+ /// __m256i _mm256_set_epi64x (__int64 e3, __int64 e2, __int64 e1, __int64 e0);
+ /// HELPER
/// </summary>
public static Vector256<ulong> SetVector256(ulong e3, ulong e2, ulong e1, ulong e0) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_set_ps (float e7, float e6, float e5, float e4, float e3, float e2, float e1, float e0); HELPER
+ /// __m256 _mm256_set_ps (float e7, float e6, float e5, float e4, float e3, float e2, float e1, float e0);
+ /// HELPER
/// </summary>
public static Vector256<float> SetVector256(float e7, float e6, float e5, float e4, float e3, float e2, float e1, float e0) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_set_pd (double e3, double e2, double e1, double e0); HELPER
+ /// __m256d _mm256_set_pd (double e3, double e2, double e1, double e0);
+ /// HELPER
/// </summary>
public static Vector256<double> SetVector256(double e3, double e2, double e1, double e0) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_set1_epi8 (char a); HELPER
- /// __m256i _mm256_set1_epi16 (short a); HELPER
- /// __m256i _mm256_set1_epi32 (int a); HELPER
- /// __m256i _mm256_set1_epi64x (long long a); HELPER
- /// __m256 _mm256_set1_ps (float a); HELPER
- /// __m256d _mm256_set1_pd (double a); HELPER
+ /// __m256i _mm256_set1_epi8 (char a);
+ /// HELPER
+ /// __m256i _mm256_set1_epi16 (short a);
+ /// HELPER
+ /// __m256i _mm256_set1_epi32 (int a);
+ /// HELPER
+ /// __m256i _mm256_set1_epi64x (long long a);
+ /// HELPER
+ /// __m256 _mm256_set1_ps (float a);
+ /// HELPER
+ /// __m256d _mm256_set1_pd (double a);
+ /// HELPER
/// </summary>
public static Vector256<T> SetAllVector256<T>(T value) where T : struct { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_set_m128 (__m128 hi, __m128 lo); HELPER
- /// __m256d _mm256_set_m128d (__m128d hi, __m128d lo); HELPER
- /// __m256i _mm256_set_m128i (__m128i hi, __m128i lo); HELPER
+ /// __m256 _mm256_set_m128 (__m128 hi, __m128 lo);
+ /// HELPER
+ /// __m256d _mm256_set_m128d (__m128d hi, __m128d lo);
+ /// HELPER
+ /// __m256i _mm256_set_m128i (__m128i hi, __m128i lo);
+ /// HELPER
/// </summary>
public static Vector256<T> SetHighLow<T>(Vector128<T> hi, Vector128<T> lo) where T : struct { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_setzero_si256 (void); HELPER
- /// __m256 _mm256_setzero_ps (void); HELPER
- /// __m256d _mm256_setzero_pd (void); HELPER
+ /// __m256i _mm256_setzero_si256 (void);
+ /// HELPER
+ /// __m256 _mm256_setzero_ps (void);
+ /// HELPER
+ /// __m256d _mm256_setzero_pd (void);
+ /// HELPER
/// </summary>
public static Vector256<T> SetZeroVector256<T>() where T : struct { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_shuffle_ps (__m256 a, __m256 b, const int imm8); VSHUFPS ymm, ymm, ymm/m256, imm8
+ /// __m256 _mm256_shuffle_ps (__m256 a, __m256 b, const int imm8);
+ /// VSHUFPS ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<float> Shuffle(Vector256<float> value, Vector256<float> right, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_shuffle_pd (__m256d a, __m256d b, const int imm8); VSHUFPD ymm, ymm, ymm/m256, imm8
+ /// __m256d _mm256_shuffle_pd (__m256d a, __m256d b, const int imm8);
+ /// VSHUFPD ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<double> Shuffle(Vector256<double> value, Vector256<double> right, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_sqrt_ps (__m256 a); VSQRTPS ymm, ymm/m256
+ /// __m256 _mm256_sqrt_ps (__m256 a);
+ /// VSQRTPS ymm, ymm/m256
/// </summary>
public static Vector256<float> Sqrt(Vector256<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_sqrt_pd (__m256d a); VSQRTPD ymm, ymm/m256
+ /// __m256d _mm256_sqrt_pd (__m256d a);
+ /// VSQRTPD ymm, ymm/m256
/// </summary>
public static Vector256<double> Sqrt(Vector256<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_castpd_ps (__m256d a); HELPER - No Codegen
- /// __m256i _mm256_castpd_si256 (__m256d a); HELPER - No Codegen
- /// __m256d _mm256_castps_pd (__m256 a); HELPER - No Codegen
- /// __m256i _mm256_castps_si256 (__m256 a); HELPER - No Codegen
- /// __m256d _mm256_castsi256_pd (__m256i a); HELPER - No Codegen
- /// __m256 _mm256_castsi256_ps (__m256i a); HELPER - No Codegen
+ /// __m256 _mm256_castpd_ps (__m256d a);
+ /// HELPER - No Codegen
+ /// __m256i _mm256_castpd_si256 (__m256d a);
+ /// HELPER - No Codegen
+ /// __m256d _mm256_castps_pd (__m256 a);
+ /// HELPER - No Codegen
+ /// __m256i _mm256_castps_si256 (__m256 a);
+ /// HELPER - No Codegen
+ /// __m256d _mm256_castsi256_pd (__m256i a);
+ /// HELPER - No Codegen
+ /// __m256 _mm256_castsi256_ps (__m256i a);
+ /// HELPER - No Codegen
/// </summary>
public static Vector256<U> StaticCast<T, U>(Vector256<T> value) where T : struct where U : struct { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a); MOVDQA m256, ymm
+ /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQA m256, ymm
/// </summary>
public static unsafe void StoreAligned(sbyte* address, Vector256<sbyte> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a); MOVDQA m256, ymm
+ /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQA m256, ymm
/// </summary>
public static unsafe void StoreAligned(byte* address, Vector256<byte> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a); MOVDQA m256, ymm
+ /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQA m256, ymm
/// </summary>
public static unsafe void StoreAligned(short* address, Vector256<short> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a); MOVDQA m256, ymm
+ /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQA m256, ymm
/// </summary>
public static unsafe void StoreAligned(ushort* address, Vector256<ushort> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a); MOVDQA m256, ymm
+ /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQA m256, ymm
/// </summary>
public static unsafe void StoreAligned(int* address, Vector256<int> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a); MOVDQA m256, ymm
+ /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQA m256, ymm
/// </summary>
public static unsafe void StoreAligned(uint* address, Vector256<uint> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a); MOVDQA m256, ymm
+ /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQA m256, ymm
/// </summary>
public static unsafe void StoreAligned(long* address, Vector256<long> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a); MOVDQA m256, ymm
+ /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQA m256, ymm
/// </summary>
public static unsafe void StoreAligned(ulong* address, Vector256<ulong> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_store_ps (float * mem_addr, __m256 a); VMOVAPS m256, ymm
+ /// void _mm256_store_ps (float * mem_addr, __m256 a);
+ /// VMOVAPS m256, ymm
/// </summary>
public static unsafe void StoreAligned(float* address, Vector256<float> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_store_pd (double * mem_addr, __m256d a); VMOVAPD m256, ymm
+ /// void _mm256_store_pd (double * mem_addr, __m256d a);
+ /// VMOVAPD m256, ymm
/// </summary>
public static unsafe void StoreAligned(double* address, Vector256<double> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a); VMOVNTDQ m256, ymm
+ /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a);
+ /// VMOVNTDQ m256, ymm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(sbyte* address, Vector256<sbyte> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a); VMOVNTDQ m256, ymm
+ /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a);
+ /// VMOVNTDQ m256, ymm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(byte* address, Vector256<byte> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a); VMOVNTDQ m256, ymm
+ /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a);
+ /// VMOVNTDQ m256, ymm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(short* address, Vector256<short> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a); VMOVNTDQ m256, ymm
+ /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a);
+ /// VMOVNTDQ m256, ymm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(ushort* address, Vector256<ushort> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a); VMOVNTDQ m256, ymm
+ /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a);
+ /// VMOVNTDQ m256, ymm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(int* address, Vector256<int> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a); VMOVNTDQ m256, ymm
+ /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a);
+ /// VMOVNTDQ m256, ymm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(uint* address, Vector256<uint> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a); VMOVNTDQ m256, ymm
+ /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a);
+ /// VMOVNTDQ m256, ymm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(long* address, Vector256<long> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a); VMOVNTDQ m256, ymm
+ /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a);
+ /// VMOVNTDQ m256, ymm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(ulong* address, Vector256<ulong> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_stream_ps (float * mem_addr, __m256 a); MOVNTPS m256, ymm
+ /// void _mm256_stream_ps (float * mem_addr, __m256 a);
+ /// MOVNTPS m256, ymm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(float* address, Vector256<float> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_stream_pd (double * mem_addr, __m256d a); MOVNTPD m256, ymm
+ /// void _mm256_stream_pd (double * mem_addr, __m256d a);
+ /// MOVNTPD m256, ymm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(double* address, Vector256<double> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a); MOVDQU m256, ymm
+ /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQU m256, ymm
/// </summary>
public static unsafe void Store(sbyte* address, Vector256<sbyte> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a); MOVDQU m256, ymm
+ /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQU m256, ymm
/// </summary>
public static unsafe void Store(byte* address, Vector256<byte> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a); MOVDQU m256, ymm
+ /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQU m256, ymm
/// </summary>
public static unsafe void Store(short* address, Vector256<short> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a); MOVDQU m256, ymm
+ /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQU m256, ymm
/// </summary>
public static unsafe void Store(ushort* address, Vector256<ushort> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a); MOVDQU m256, ymm
+ /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQU m256, ymm
/// </summary>
public static unsafe void Store(int* address, Vector256<int> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a); MOVDQU m256, ymm
+ /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQU m256, ymm
/// </summary>
public static unsafe void Store(uint* address, Vector256<uint> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a); MOVDQU m256, ymm
+ /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQU m256, ymm
/// </summary>
public static unsafe void Store(long* address, Vector256<long> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a); MOVDQU m256, ymm
+ /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQU m256, ymm
/// </summary>
public static unsafe void Store(ulong* address, Vector256<ulong> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_storeu_ps (float * mem_addr, __m256 a); MOVUPS m256, ymm
+ /// void _mm256_storeu_ps (float * mem_addr, __m256 a);
+ /// MOVUPS m256, ymm
/// </summary>
public static unsafe void Store(float* address, Vector256<float> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_storeu_pd (double * mem_addr, __m256d a); MOVUPD m256, ymm
+ /// void _mm256_storeu_pd (double * mem_addr, __m256d a);
+ /// MOVUPD m256, ymm
/// </summary>
public static unsafe void Store(double* address, Vector256<double> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_sub_ps (__m256 a, __m256 b); VSUBPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_sub_ps (__m256 a, __m256 b);
+ /// VSUBPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> Subtract(Vector256<float> left, Vector256<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_sub_pd (__m256d a, __m256d b); VSUBPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_sub_pd (__m256d a, __m256d b);
+ /// VSUBPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> Subtract(Vector256<double> left, Vector256<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_testc_ps (__m128 a, __m128 b); VTESTPS xmm, xmm/m128
+ /// int _mm_testc_ps (__m128 a, __m128 b);
+ /// VTESTPS xmm, xmm/m128
/// </summary>
public static bool TestC(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_testc_pd (__m128d a, __m128d b); VTESTPD xmm, xmm/m128
+ /// int _mm_testc_pd (__m128d a, __m128d b);
+ /// VTESTPD xmm, xmm/m128
/// </summary>
public static bool TestC(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm256_testc_si256 (__m256i a, __m256i b); VPTEST ymm, ymm/m256
- /// int _mm256_testc_ps (__m256 a, __m256 b); VTESTPS ymm, ymm/m256
- /// int _mm256_testc_pd (__m256d a, __m256d b); VTESTPS ymm, ymm/m256
+ /// int _mm256_testc_si256 (__m256i a, __m256i b);
+ /// VPTEST ymm, ymm/m256
+ /// int _mm256_testc_ps (__m256 a, __m256 b);
+ /// VTESTPS ymm, ymm/m256
+ /// int _mm256_testc_pd (__m256d a, __m256d b);
+ /// VTESTPS ymm, ymm/m256
/// </summary>
public static bool TestC<T>(Vector256<T> left, Vector256<T> right) where T : struct { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_testnzc_ps (__m128 a, __m128 b); VTESTPS xmm, xmm/m128
+ /// int _mm_testnzc_ps (__m128 a, __m128 b);
+ /// VTESTPS xmm, xmm/m128
/// </summary>
public static bool TestNotZAndNotC(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_testnzc_pd (__m128d a, __m128d b); VTESTPD xmm, xmm/m128
+ /// int _mm_testnzc_pd (__m128d a, __m128d b);
+ /// VTESTPD xmm, xmm/m128
/// </summary>
public static bool TestNotZAndNotC(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm256_testnzc_si256 (__m256i a, __m256i b); VPTEST ymm, ymm/m256
- /// int _mm256_testnzc_ps (__m256 a, __m256 b); VTESTPS ymm, ymm/m256
- /// int _mm256_testnzc_pd (__m256d a, __m256d b); VTESTPD ymm, ymm/m256
+ /// int _mm256_testnzc_si256 (__m256i a, __m256i b);
+ /// VPTEST ymm, ymm/m256
+ /// int _mm256_testnzc_ps (__m256 a, __m256 b);
+ /// VTESTPS ymm, ymm/m256
+ /// int _mm256_testnzc_pd (__m256d a, __m256d b);
+ /// VTESTPD ymm, ymm/m256
/// </summary>
public static bool TestNotZAndNotC<T>(Vector256<T> left, Vector256<T> right) where T : struct { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_testz_ps (__m128 a, __m128 b); VTESTPS xmm, xmm/m128
+ /// int _mm_testz_ps (__m128 a, __m128 b);
+ /// VTESTPS xmm, xmm/m128
/// </summary>
public static bool TestZ(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_testz_pd (__m128d a, __m128d b); VTESTPD xmm, xmm/m128
+ /// int _mm_testz_pd (__m128d a, __m128d b);
+ /// VTESTPD xmm, xmm/m128
/// </summary>
public static bool TestZ(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm256_testz_si256 (__m256i a, __m256i b); VPTEST ymm, ymm/m256
- /// int _mm256_testz_ps (__m256 a, __m256 b); VTESTPS ymm, ymm/m256
- /// int _mm256_testz_pd (__m256d a, __m256d b); VTESTPD ymm, ymm/m256
+ /// int _mm256_testz_si256 (__m256i a, __m256i b);
+ /// VPTEST ymm, ymm/m256
+ /// int _mm256_testz_ps (__m256 a, __m256 b);
+ /// VTESTPS ymm, ymm/m256
+ /// int _mm256_testz_pd (__m256d a, __m256d b);
+ /// VTESTPD ymm, ymm/m256
/// </summary>
public static bool TestZ<T>(Vector256<T> left, Vector256<T> right) where T : struct { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_unpackhi_ps (__m256 a, __m256 b); VUNPCKHPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_unpackhi_ps (__m256 a, __m256 b);
+ /// VUNPCKHPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> UnpackHigh(Vector256<float> left, Vector256<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_unpackhi_pd (__m256d a, __m256d b); VUNPCKHPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_unpackhi_pd (__m256d a, __m256d b);
+ /// VUNPCKHPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> UnpackHigh(Vector256<double> left, Vector256<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_unpacklo_ps (__m256 a, __m256 b); VUNPCKLPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_unpacklo_ps (__m256 a, __m256 b);
+ /// VUNPCKLPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> UnpackLow(Vector256<float> left, Vector256<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_unpacklo_pd (__m256d a, __m256d b); VUNPCKLPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_unpacklo_pd (__m256d a, __m256d b);
+ /// VUNPCKLPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> UnpackLow(Vector256<double> left, Vector256<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_xor_ps (__m256 a, __m256 b); VXORPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_xor_ps (__m256 a, __m256 b);
+ /// VXORPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> Xor(Vector256<float> left, Vector256<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_xor_pd (__m256d a, __m256d b); VXORPS ymm, ymm, ymm/m256
+ /// __m256d _mm256_xor_pd (__m256d a, __m256d b);
+ /// VXORPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> Xor(Vector256<double> left, Vector256<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_zeroall (void); VZEROALL
+ /// void _mm256_zeroall (void);
+ /// VZEROALL
/// </summary>
public static void ZeroAll() { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_zeroupper (void); VZEROUPPER
+ /// void _mm256_zeroupper (void);
+ /// VZEROUPPER
/// </summary>
public static void ZeroUpper() { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_zextpd128_pd256 (__m128d a); HELPER - No Codegen
- /// __m256 _mm256_zextps128_ps256 (__m128 a); HELPER - No Codegen
- /// __m256i _mm256_zextsi128_si256 (__m128i a); HELPER - No Codegen
+ /// __m256d _mm256_zextpd128_pd256 (__m128d a);
+ /// HELPER - No Codegen
+ /// __m256 _mm256_zextps128_ps256 (__m128 a);
+ /// HELPER - No Codegen
+ /// __m256i _mm256_zextsi128_si256 (__m128i a);
+ /// HELPER - No Codegen
/// </summary>
public static Vector256<T> ZeroExtendToVector256<T>(Vector128<T> value) where T : struct { throw new PlatformNotSupportedException(); }
}
diff --git a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Avx.cs b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Avx.cs
index 998d17dd13..148989d9e6 100644
--- a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Avx.cs
+++ b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Avx.cs
@@ -16,221 +16,272 @@ namespace System.Runtime.Intrinsics.X86
public static bool IsSupported { get => IsSupported; }
/// <summary>
- /// __m256 _mm256_add_ps (__m256 a, __m256 b); VADDPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_add_ps (__m256 a, __m256 b);
+ /// VADDPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> Add(Vector256<float> left, Vector256<float> right) => Add(left, right);
/// <summary>
- /// __m256d _mm256_add_pd (__m256d a, __m256d b); VADDPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_add_pd (__m256d a, __m256d b);
+ /// VADDPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> Add(Vector256<double> left, Vector256<double> right) => Add(left, right);
/// <summary>
- /// __m256 _mm256_addsub_ps (__m256 a, __m256 b); VADDSUBPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_addsub_ps (__m256 a, __m256 b);
+ /// VADDSUBPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> AddSubtract(Vector256<float> left, Vector256<float> right) => AddSubtract(left, right);
/// <summary>
- /// __m256d _mm256_addsub_pd (__m256d a, __m256d b); VADDSUBPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_addsub_pd (__m256d a, __m256d b);
+ /// VADDSUBPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> AddSubtract(Vector256<double> left, Vector256<double> right) => AddSubtract(left, right);
/// <summary>
- /// __m256 _mm256_and_ps (__m256 a, __m256 b); VANDPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_and_ps (__m256 a, __m256 b);
+ /// VANDPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> And(Vector256<float> left, Vector256<float> right) => And(left, right);
/// <summary>
- /// __m256d _mm256_and_pd (__m256d a, __m256d b); VANDPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_and_pd (__m256d a, __m256d b);
+ /// VANDPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> And(Vector256<double> left, Vector256<double> right) => And(left, right);
/// <summary>
- /// __m256 _mm256_andnot_ps (__m256 a, __m256 b); VANDNPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_andnot_ps (__m256 a, __m256 b);
+ /// VANDNPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> AndNot(Vector256<float> left, Vector256<float> right) => AndNot(left, right);
/// <summary>
- /// __m256d _mm256_andnot_pd (__m256d a, __m256d b); VANDNPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_andnot_pd (__m256d a, __m256d b);
+ /// VANDNPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> AndNot(Vector256<double> left, Vector256<double> right) => AndNot(left, right);
/// <summary>
- /// __m256 _mm256_blend_ps (__m256 a, __m256 b, const int imm8); VBLENDPS ymm, ymm, ymm/m256, imm8
+ /// __m256 _mm256_blend_ps (__m256 a, __m256 b, const int imm8);
+ /// VBLENDPS ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<float> Blend(Vector256<float> left, Vector256<float> right, byte control) => Blend(left, right, control);
/// <summary>
- /// __m256d _mm256_blend_pd (__m256d a, __m256d b, const int imm8); VBLENDPD ymm, ymm, ymm/m256, imm8
+ /// __m256d _mm256_blend_pd (__m256d a, __m256d b, const int imm8);
+ /// VBLENDPD ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<double> Blend(Vector256<double> left, Vector256<double> right, byte control) => Blend(left, right, control);
/// <summary>
- /// __m256 _mm256_blendv_ps (__m256 a, __m256 b, __m256 mask); VBLENDVPS ymm, ymm, ymm/m256, ymm
+ /// __m256 _mm256_blendv_ps (__m256 a, __m256 b, __m256 mask);
+ /// VBLENDVPS ymm, ymm, ymm/m256, ymm
/// </summary>
public static Vector256<float> BlendVariable(Vector256<float> left, Vector256<float> right, Vector256<float> mask) => BlendVariable(left, right, mask);
/// <summary>
- /// __m256d _mm256_blendv_pd (__m256d a, __m256d b, __m256d mask); VBLENDVPD ymm, ymm, ymm/m256, ymm
+ /// __m256d _mm256_blendv_pd (__m256d a, __m256d b, __m256d mask);
+ /// VBLENDVPD ymm, ymm, ymm/m256, ymm
/// </summary>
public static Vector256<double> BlendVariable(Vector256<double> left, Vector256<double> right, Vector256<double> mask) => BlendVariable(left, right, mask);
/// <summary>
- /// __m128 _mm_broadcast_ss (float const * mem_addr); VBROADCASTSS xmm, m32
+ /// __m128 _mm_broadcast_ss (float const * mem_addr);
+ /// VBROADCASTSS xmm, m32
/// </summary>
public static Vector128<float> BroadcastElementToVector128(float* source) => BroadcastElementToVector128(source);
/// <summary>
- /// __m256 _mm256_broadcast_ss (float const * mem_addr); VBROADCASTSS ymm, m32
+ /// __m256 _mm256_broadcast_ss (float const * mem_addr);
+ /// VBROADCASTSS ymm, m32
/// </summary>
public static Vector256<float> BroadcastElementToVector256(float* source) => BroadcastElementToVector256(source);
/// <summary>
- /// __m256d _mm256_broadcast_sd (double const * mem_addr); VBROADCASTSD ymm, m64
+ /// __m256d _mm256_broadcast_sd (double const * mem_addr);
+ /// VBROADCASTSD ymm, m64
/// </summary>
public static Vector256<double> BroadcastElementToVector256(double* source) => BroadcastElementToVector256(source);
/// <summary>
- /// __m256 _mm256_broadcast_ps (__m128 const * mem_addr); VBROADCASTF128, ymm, m128
+ /// __m256 _mm256_broadcast_ps (__m128 const * mem_addr);
+ /// VBROADCASTF128, ymm, m128
/// </summary>
public static unsafe Vector256<float> BroadcastVector128ToVector256(float* address) => BroadcastVector128ToVector256(address);
/// <summary>
- /// __m256d _mm256_broadcast_pd (__m128d const * mem_addr); VBROADCASTF128, ymm, m128
+ /// __m256d _mm256_broadcast_pd (__m128d const * mem_addr);
+ /// VBROADCASTF128, ymm, m128
/// </summary>
public static unsafe Vector256<double> BroadcastVector128ToVector256(double* address) => BroadcastVector128ToVector256(address);
/// <summary>
- /// __m256 _mm256_ceil_ps (__m256 a); VROUNDPS ymm, ymm/m256, imm8(10)
+ /// __m256 _mm256_ceil_ps (__m256 a);
+ /// VROUNDPS ymm, ymm/m256, imm8(10)
/// </summary>
public static Vector256<float> Ceiling(Vector256<float> value) => Ceiling(value);
/// <summary>
- /// __m256d _mm256_ceil_pd (__m256d a); VROUNDPD ymm, ymm/m256, imm8(10)
+ /// __m256d _mm256_ceil_pd (__m256d a);
+ /// VROUNDPD ymm, ymm/m256, imm8(10)
/// </summary>
public static Vector256<double> Ceiling(Vector256<double> value) => Ceiling(value);
/// <summary>
- /// __m128 _mm_cmp_ps (__m128 a, __m128 b, const int imm8); VCMPPS xmm, xmm, xmm/m128, imm8
+ /// __m128 _mm_cmp_ps (__m128 a, __m128 b, const int imm8);
+ /// VCMPPS xmm, xmm, xmm/m128, imm8
/// </summary>
public static Vector128<float> Compare(Vector128<float> left, Vector128<float> right, FloatComparisonMode mode) => Compare(left, right, mode);
/// <summary>
- /// __m128d _mm_cmp_pd (__m128d a, __m128d b, const int imm8); VCMPPD xmm, xmm, xmm/m128, imm8
+ /// __m128d _mm_cmp_pd (__m128d a, __m128d b, const int imm8);
+ /// VCMPPD xmm, xmm, xmm/m128, imm8
/// </summary>
public static Vector128<double> Compare(Vector128<double> left, Vector128<double> right, FloatComparisonMode mode) => Compare(left, right, mode);
/// <summary>
- /// __m256 _mm256_cmp_ps (__m256 a, __m256 b, const int imm8); VCMPPS ymm, ymm, ymm/m256, imm8
+ /// __m256 _mm256_cmp_ps (__m256 a, __m256 b, const int imm8);
+ /// VCMPPS ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<float> Compare(Vector256<float> left, Vector256<float> right, FloatComparisonMode mode) => Compare(left, right, mode);
/// <summary>
- /// __m256d _mm256_cmp_pd (__m256d a, __m256d b, const int imm8); VCMPPD ymm, ymm, ymm/m256, imm8
+ /// __m256d _mm256_cmp_pd (__m256d a, __m256d b, const int imm8);
+ /// VCMPPD ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<double> Compare(Vector256<double> left, Vector256<double> right, FloatComparisonMode mode) => Compare(left, right, mode);
/// <summary>
- /// __m128d _mm_cmp_sd (__m128d a, __m128d b, const int imm8); VCMPSS xmm, xmm, xmm/m32, imm8
+ /// __m128d _mm_cmp_sd (__m128d a, __m128d b, const int imm8);
+ /// VCMPSS xmm, xmm, xmm/m32, imm8
/// </summary>
public static Vector128<double> CompareScalar(Vector128<double> left, Vector128<double> right, FloatComparisonMode mode) => CompareScalar(left, right, mode);
/// <summary>
- /// __m128 _mm_cmp_ss (__m128 a, __m128 b, const int imm8); VCMPSD xmm, xmm, xmm/m64, imm8
+ /// __m128 _mm_cmp_ss (__m128 a, __m128 b, const int imm8);
+ /// VCMPSD xmm, xmm, xmm/m64, imm8
/// </summary>
public static Vector128<float> CompareScalar(Vector128<float> left, Vector128<float> right, FloatComparisonMode mode) => CompareScalar(left, right, mode);
/// <summary>
- /// float _mm256_cvtss_f32 (__m256 a); HELPER: VMOVSS
+ /// float _mm256_cvtss_f32 (__m256 a);
+ /// HELPER: VMOVSS
/// </summary>
public static float ConvertToSingle(Vector256<float> value) => ConvertToSingle(value);
/// <summary>
- /// __m128i _mm256_cvtpd_epi32 (__m256d a); VCVTPD2DQ xmm, ymm/m256
+ /// __m128i _mm256_cvtpd_epi32 (__m256d a);
+ /// VCVTPD2DQ xmm, ymm/m256
/// </summary>
public static Vector128<int> ConvertToVector128Int32(Vector256<double> value) => ConvertToVector128Int32(value);
/// <summary>
- /// __m128 _mm256_cvtpd_ps (__m256d a); VCVTPD2PS xmm, ymm/m256
+ /// __m128 _mm256_cvtpd_ps (__m256d a);
+ /// VCVTPD2PS xmm, ymm/m256
/// </summary>
public static Vector128<float> ConvertToVector128Single(Vector256<double> value) => ConvertToVector128Single(value);
/// <summary>
- /// __m256i _mm256_cvtps_epi32 (__m256 a); VCVTPS2DQ ymm, ymm/m256
+ /// __m256i _mm256_cvtps_epi32 (__m256 a);
+ /// VCVTPS2DQ ymm, ymm/m256
/// </summary>
public static Vector256<int> ConvertToVector256Int32(Vector256<float> value) => ConvertToVector256Int32(value);
/// <summary>
- /// __m256 _mm256_cvtepi32_ps (__m256i a); VCVTDQ2PS ymm, ymm/m256
+ /// __m256 _mm256_cvtepi32_ps (__m256i a);
+ /// VCVTDQ2PS ymm, ymm/m256
/// </summary>
public static Vector256<float> ConvertToVector256Single(Vector256<int> value) => ConvertToVector256Single(value);
/// <summary>
- /// __m256d _mm256_cvtps_pd (__m128 a); VCVTPS2PD ymm, xmm/m128
+ /// __m256d _mm256_cvtps_pd (__m128 a);
+ /// VCVTPS2PD ymm, xmm/m128
/// </summary>
public static Vector256<double> ConvertToVector256Double(Vector128<float> value) => ConvertToVector256Double(value);
/// <summary>
- /// __m256d _mm256_cvtepi32_pd (__m128i a); VCVTDQ2PD ymm, xmm/m128
+ /// __m256d _mm256_cvtepi32_pd (__m128i a);
+ /// VCVTDQ2PD ymm, xmm/m128
/// </summary>
public static Vector256<double> ConvertToVector256Double(Vector128<int> value) => ConvertToVector256Double(value);
/// <summary>
- /// __m128i _mm256_cvttpd_epi32 (__m256d a); VCVTTPD2DQ xmm, ymm/m256
+ /// __m128i _mm256_cvttpd_epi32 (__m256d a);
+ /// VCVTTPD2DQ xmm, ymm/m256
/// </summary>
public static Vector128<int> ConvertToVector128Int32WithTruncation(Vector256<double> value) => ConvertToVector128Int32WithTruncation(value);
/// <summary>
- /// __m256i _mm256_cvttps_epi32 (__m256 a); VCVTTPS2DQ ymm, ymm/m256
+ /// __m256i _mm256_cvttps_epi32 (__m256 a);
+ /// VCVTTPS2DQ ymm, ymm/m256
/// </summary>
public static Vector256<int> ConvertToVector256Int32WithTruncation(Vector256<float> value) => ConvertToVector256Int32WithTruncation(value);
/// <summary>
- /// __m256 _mm256_div_ps (__m256 a, __m256 b); VDIVPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_div_ps (__m256 a, __m256 b);
+ /// VDIVPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> Divide(Vector256<float> left, Vector256<float> right) => Divide(left, right);
/// <summary>
- /// __m256d _mm256_div_pd (__m256d a, __m256d b); VDIVPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_div_pd (__m256d a, __m256d b);
+ /// VDIVPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> Divide(Vector256<double> left, Vector256<double> right) => Divide(left, right);
/// <summary>
- /// __m256 _mm256_dp_ps (__m256 a, __m256 b, const int imm8); VDPPS ymm, ymm, ymm/m256, imm8
+ /// __m256 _mm256_dp_ps (__m256 a, __m256 b, const int imm8);
+ /// VDPPS ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<float> DotProduct(Vector256<float> left, Vector256<float> right, byte control) => DotProduct(left, right, control);
/// <summary>
- /// __m256 _mm256_moveldup_ps (__m256 a); VMOVSLDUP ymm, ymm/m256
+ /// __m256 _mm256_moveldup_ps (__m256 a);
+ /// VMOVSLDUP ymm, ymm/m256
/// </summary>
public static Vector256<float> DuplicateEvenIndexed(Vector256<float> value) => DuplicateEvenIndexed(value);
/// <summary>
- /// __m256d _mm256_movedup_pd (__m256d a); VMOVDDUP ymm, ymm/m256
+ /// __m256d _mm256_movedup_pd (__m256d a);
+ /// VMOVDDUP ymm, ymm/m256
/// </summary>
public static Vector256<double> DuplicateEvenIndexed(Vector256<double> value) => DuplicateEvenIndexed(value);
/// <summary>
- /// __m256 _mm256_movehdup_ps (__m256 a); VMOVSHDUP ymm, ymm/m256
+ /// __m256 _mm256_movehdup_ps (__m256 a);
+ /// VMOVSHDUP ymm, ymm/m256
/// </summary>
public static Vector256<float> DuplicateOddIndexed(Vector256<float> value) => DuplicateOddIndexed(value);
/// <summary>
- /// __int8 _mm256_extract_epi8 (__m256i a, const int index); HELPER
+ /// __int8 _mm256_extract_epi8 (__m256i a, const int index);
+ /// HELPER
/// </summary>
public static sbyte Extract(Vector256<sbyte> value, byte index) => Extract(value, index);
/// <summary>
- /// __int8 _mm256_extract_epi8 (__m256i a, const int index); HELPER
+ /// __int8 _mm256_extract_epi8 (__m256i a, const int index);
+ /// HELPER
/// </summary>
public static byte Extract(Vector256<byte> value, byte index) => Extract(value, index);
/// <summary>
- /// __int16 _mm256_extract_epi16 (__m256i a, const int index); HELPER
+ /// __int16 _mm256_extract_epi16 (__m256i a, const int index);
+ /// HELPER
/// </summary>
public static short Extract(Vector256<short> value, byte index) => Extract(value, index);
/// <summary>
- /// __int16 _mm256_extract_epi16 (__m256i a, const int index); HELPER
+ /// __int16 _mm256_extract_epi16 (__m256i a, const int index);
+ /// HELPER
/// </summary>
public static ushort Extract(Vector256<ushort> value, byte index) => Extract(value, index);
/// <summary>
- /// __int32 _mm256_extract_epi32 (__m256i a, const int index); HELPER
+ /// __int32 _mm256_extract_epi32 (__m256i a, const int index);
+ /// HELPER
/// </summary>
public static int Extract(Vector256<int> value, byte index) => Extract(value, index);
/// <summary>
- /// __int32 _mm256_extract_epi32 (__m256i a, const int index); HELPER
+ /// __int32 _mm256_extract_epi32 (__m256i a, const int index);
+ /// HELPER
/// </summary>
public static uint Extract(Vector256<uint> value, byte index) => Extract(value, index);
/// <summary>
- /// __int64 _mm256_extract_epi64 (__m256i a, const int index); HELPER
+ /// __int64 _mm256_extract_epi64 (__m256i a, const int index);
+ /// HELPER
/// </summary>
public static long Extract(Vector256<long> value, byte index) => Extract(value, index);
/// <summary>
- /// __int64 _mm256_extract_epi64 (__m256i a, const int index); HELPER
+ /// __int64 _mm256_extract_epi64 (__m256i a, const int index);
+ /// HELPER
/// </summary>
public static ulong Extract(Vector256<ulong> value, byte index) => Extract(value, index);
/// <summary>
- /// __m128 _mm256_extractf128_ps (__m256 a, const int imm8); VEXTRACTF128 xmm/m128, ymm, imm8
- /// __m128d _mm256_extractf128_pd (__m256d a, const int imm8); VEXTRACTF128 xmm/m128, ymm, imm8
- /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8); VEXTRACTF128 xmm/m128, ymm, imm8
+ /// __m128 _mm256_extractf128_ps (__m256 a, const int imm8);
+ /// VEXTRACTF128 xmm/m128, ymm, imm8
+ /// __m128d _mm256_extractf128_pd (__m256d a, const int imm8);
+ /// VEXTRACTF128 xmm/m128, ymm, imm8
+ /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTF128 xmm/m128, ymm, imm8
/// </summary>
public static Vector128<T> ExtractVector128<T>(Vector256<T> value, byte index) where T : struct
{
@@ -239,50 +290,63 @@ namespace System.Runtime.Intrinsics.X86
}
/// <summary>
- /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8); VEXTRACTF128 m128, ymm, imm8
+ /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTF128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(byte* address, Vector256<byte> value, byte index) => ExtractVector128(address, value, index);
/// <summary>
- /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8); VEXTRACTF128 m128, ymm, imm8
+ /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTF128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(sbyte* address, Vector256<sbyte> value, byte index) => ExtractVector128(address, value, index);
/// <summary>
- /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8); VEXTRACTF128 m128, ymm, imm8
+ /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTF128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(short* address, Vector256<short> value, byte index) => ExtractVector128(address, value, index);
/// <summary>
- /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8); VEXTRACTF128 m128, ymm, imm8
+ /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTF128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(ushort* address, Vector256<ushort> value, byte index) => ExtractVector128(address, value, index);
/// <summary>
- /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8); VEXTRACTF128 m128, ymm, imm8
+ /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTF128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(int* address, Vector256<int> value, byte index) => ExtractVector128(address, value, index);
/// <summary>
- /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8); VEXTRACTF128 m128, ymm, imm8
+ /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTF128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(uint* address, Vector256<uint> value, byte index) => ExtractVector128(address, value, index);
/// <summary>
- /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8); VEXTRACTF128 m128, ymm, imm8
+ /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTF128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(long* address, Vector256<long> value, byte index) => ExtractVector128(address, value, index);
/// <summary>
- /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8); VEXTRACTF128 m128, ymm, imm8
+ /// __m128i _mm256_extractf128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTF128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(ulong* address, Vector256<ulong> value, byte index) => ExtractVector128(address, value, index);
/// <summary>
- /// __m128 _mm256_extractf128_ps (__m256 a, const int imm8); VEXTRACTF128 m128, ymm, imm8
+ /// __m128 _mm256_extractf128_ps (__m256 a, const int imm8);
+ /// VEXTRACTF128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(float* address, Vector256<float> value, byte index) => ExtractVector128(address, value, index);
/// <summary>
- /// __m128d _mm256_extractf128_pd (__m256d a, const int imm8); VEXTRACTF128 m128, ymm, imm8
+ /// __m128d _mm256_extractf128_pd (__m256d a, const int imm8);
+ /// VEXTRACTF128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(double* address, Vector256<double> value, byte index) => ExtractVector128(address, value, index);
/// <summary>
- /// __m256d _mm256_castpd128_pd256 (__m128d a); HELPER - No Codegen
- /// __m256 _mm256_castps128_ps256 (__m128 a); HELPER - No Codegen
- /// __m256i _mm256_castsi128_si256 (__m128i a); HELPER - No Codegen
+ /// __m256d _mm256_castpd128_pd256 (__m128d a);
+ /// HELPER - No Codegen
+ /// __m256 _mm256_castps128_ps256 (__m128 a);
+ /// HELPER - No Codegen
+ /// __m256i _mm256_castsi128_si256 (__m128i a);
+ /// HELPER - No Codegen
/// </summary>
public static Vector256<T> ExtendToVector256<T>(Vector128<T> value) where T : struct
{
@@ -291,18 +355,23 @@ namespace System.Runtime.Intrinsics.X86
}
/// <summary>
- /// __m256 _mm256_floor_ps (__m256 a); VROUNDPS ymm, ymm/m256, imm8(9)
+ /// __m256 _mm256_floor_ps (__m256 a);
+ /// VROUNDPS ymm, ymm/m256, imm8(9)
/// </summary>
public static Vector256<float> Floor(Vector256<float> value) => Floor(value);
/// <summary>
- /// __m256d _mm256_floor_pd (__m256d a); VROUNDPS ymm, ymm/m256, imm8(9)
+ /// __m256d _mm256_floor_pd (__m256d a);
+ /// VROUNDPS ymm, ymm/m256, imm8(9)
/// </summary>
public static Vector256<double> Floor(Vector256<double> value) => Floor(value);
/// <summary>
- /// __m128d _mm256_castpd256_pd128 (__m256d a); HELPER - No Codegen
- /// __m128 _mm256_castps256_ps128 (__m256 a); HELPER - No Codegen
- /// __m128i _mm256_castsi256_si128 (__m256i a); HELPER - No Codegen
+ /// __m128d _mm256_castpd256_pd128 (__m256d a);
+ /// HELPER - No Codegen
+ /// __m128 _mm256_castps256_ps128 (__m256 a);
+ /// HELPER - No Codegen
+ /// __m128i _mm256_castsi256_si128 (__m256i a);
+ /// HELPER - No Codegen
/// </summary>
public static Vector128<T> GetLowerHalf<T>(Vector256<T> value) where T : struct
{
@@ -311,60 +380,75 @@ namespace System.Runtime.Intrinsics.X86
}
/// <summary>
- /// __m256 _mm256_hadd_ps (__m256 a, __m256 b); VHADDPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_hadd_ps (__m256 a, __m256 b);
+ /// VHADDPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> HorizontalAdd(Vector256<float> left, Vector256<float> right) => HorizontalAdd(left, right);
/// <summary>
- /// __m256d _mm256_hadd_pd (__m256d a, __m256d b); VHADDPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_hadd_pd (__m256d a, __m256d b);
+ /// VHADDPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> HorizontalAdd(Vector256<double> left, Vector256<double> right) => HorizontalAdd(left, right);
/// <summary>
- /// __m256 _mm256_hsub_ps (__m256 a, __m256 b); VHSUBPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_hsub_ps (__m256 a, __m256 b);
+ /// VHSUBPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> HorizontalSubtract(Vector256<float> left, Vector256<float> right) => HorizontalSubtract(left, right);
/// <summary>
- /// __m256d _mm256_hsub_pd (__m256d a, __m256d b); VHSUBPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_hsub_pd (__m256d a, __m256d b);
+ /// VHSUBPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> HorizontalSubtract(Vector256<double> left, Vector256<double> right) => HorizontalSubtract(left, right);
/// <summary>
- /// __m256i _mm256_insert_epi8 (__m256i a, __int8 i, const int index); HELPER
+ /// __m256i _mm256_insert_epi8 (__m256i a, __int8 i, const int index);
+ /// HELPER
/// </summary>
public static Vector256<sbyte> Insert(Vector256<sbyte> value, sbyte data, byte index) => Insert(value, data, index);
/// <summary>
- /// __m256i _mm256_insert_epi8 (__m256i a, __int8 i, const int index); HELPER
+ /// __m256i _mm256_insert_epi8 (__m256i a, __int8 i, const int index);
+ /// HELPER
/// </summary>
public static Vector256<byte> Insert(Vector256<byte> value, byte data, byte index) => Insert(value, data, index);
/// <summary>
- /// __m256i _mm256_insert_epi16 (__m256i a, __int16 i, const int index); HELPER
+ /// __m256i _mm256_insert_epi16 (__m256i a, __int16 i, const int index);
+ /// HELPER
/// </summary>
public static Vector256<short> Insert(Vector256<short> value, short data, byte index) => Insert(value, data, index);
/// <summary>
- /// __m256i _mm256_insert_epi16 (__m256i a, __int16 i, const int index); HELPER
+ /// __m256i _mm256_insert_epi16 (__m256i a, __int16 i, const int index);
+ /// HELPER
/// </summary>
public static Vector256<ushort> Insert(Vector256<ushort> value, ushort data, byte index) => Insert(value, data, index);
/// <summary>
- /// __m256i _mm256_insert_epi32 (__m256i a, __int32 i, const int index); HELPER
+ /// __m256i _mm256_insert_epi32 (__m256i a, __int32 i, const int index);
+ /// HELPER
/// </summary>
public static Vector256<int> Insert(Vector256<int> value, int data, byte index) => Insert(value, data, index);
/// <summary>
- /// __m256i _mm256_insert_epi32 (__m256i a, __int32 i, const int index); HELPER
+ /// __m256i _mm256_insert_epi32 (__m256i a, __int32 i, const int index);
+ /// HELPER
/// </summary>
public static Vector256<uint> Insert(Vector256<uint> value, uint data, byte index) => Insert(value, data, index);
/// <summary>
- /// __m256i _mm256_insert_epi64 (__m256i a, __int64 i, const int index); HELPER
+ /// __m256i _mm256_insert_epi64 (__m256i a, __int64 i, const int index);
+ /// HELPER
/// </summary>
public static Vector256<long> Insert(Vector256<long> value, long data, byte index) => Insert(value, data, index);
/// <summary>
- /// __m256i _mm256_insert_epi64 (__m256i a, __int64 i, const int index); HELPER
+ /// __m256i _mm256_insert_epi64 (__m256i a, __int64 i, const int index);
+ /// HELPER
/// </summary>
public static Vector256<ulong> Insert(Vector256<ulong> value, ulong data, byte index) => Insert(value, data, index);
/// <summary>
- /// __m256 _mm256_insertf128_ps (__m256 a, __m128 b, int imm8); VINSERTF128 ymm, ymm, xmm/m128, imm8
- /// __m256d _mm256_insertf128_pd (__m256d a, __m128d b, int imm8); VINSERTF128 ymm, ymm, xmm/m128, imm8
- /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8); VINSERTF128 ymm, ymm, xmm/m128, imm8
+ /// __m256 _mm256_insertf128_ps (__m256 a, __m128 b, int imm8);
+ /// VINSERTF128 ymm, ymm, xmm/m128, imm8
+ /// __m256d _mm256_insertf128_pd (__m256d a, __m128d b, int imm8);
+ /// VINSERTF128 ymm, ymm, xmm/m128, imm8
+ /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8);
+ /// VINSERTF128 ymm, ymm, xmm/m128, imm8
/// </summary>
public static Vector256<T> Insert<T>(Vector256<T> value, Vector128<T> data, byte index) where T : struct
{
@@ -373,264 +457,327 @@ namespace System.Runtime.Intrinsics.X86
}
/// <summary>
- /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8); VINSERTF128 ymm, ymm, m128, imm8
+ /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8);
+ /// VINSERTF128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<sbyte> Insert(Vector256<sbyte> value, sbyte* address, byte index) => Insert(value, address, index);
/// <summary>
- /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8); VINSERTF128 ymm, ymm, m128, imm8
+ /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8);
+ /// VINSERTF128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<byte> Insert(Vector256<byte> value, byte* address, byte index) => Insert(value, address, index);
/// <summary>
- /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8); VINSERTF128 ymm, ymm, m128, imm8
+ /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8);
+ /// VINSERTF128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<short> Insert(Vector256<short> value, short* address, byte index) => Insert(value, address, index);
/// <summary>
- /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8); VINSERTF128 ymm, ymm, m128, imm8
+ /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8);
+ /// VINSERTF128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<ushort> Insert(Vector256<ushort> value, ushort* address, byte index) => Insert(value, address, index);
/// <summary>
- /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8); VINSERTF128 ymm, ymm, m128, imm8
+ /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8);
+ /// VINSERTF128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<int> Insert(Vector256<int> value, int* address, byte index) => Insert(value, address, index);
/// <summary>
- /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8); VINSERTF128 ymm, ymm, m128, imm8
+ /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8);
+ /// VINSERTF128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<uint> Insert(Vector256<uint> value, uint* address, byte index) => Insert(value, address, index);
/// <summary>
- /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8); VINSERTF128 ymm, ymm, m128, imm8
+ /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8);
+ /// VINSERTF128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<long> Insert(Vector256<long> value, long* address, byte index) => Insert(value, address, index);
/// <summary>
- /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8); VINSERTF128 ymm, ymm, m128, imm8
+ /// __m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8);
+ /// VINSERTF128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<ulong> Insert(Vector256<ulong> value, ulong* address, byte index) => Insert(value, address, index);
/// <summary>
- /// __m256 _mm256_insertf128_ps (__m256 a, __m128 b, int imm8); VINSERTF128 ymm, ymm, m128, imm8
+ /// __m256 _mm256_insertf128_ps (__m256 a, __m128 b, int imm8);
+ /// VINSERTF128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<float> Insert(Vector256<float> value, float* address, byte index) => Insert(value, address, index);
/// <summary>
- /// __m256d _mm256_insertf128_pd (__m256d a, __m128d b, int imm8); VINSERTF128 ymm, ymm, m128, imm8
+ /// __m256d _mm256_insertf128_pd (__m256d a, __m128d b, int imm8);
+ /// VINSERTF128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<double> Insert(Vector256<double> value, double* address, byte index) => Insert(value, address, index);
/// <summary>
- /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr); VMOVDQU ymm, m256
+ /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr);
+ /// VMOVDQU ymm, m256
/// </summary>
public static unsafe Vector256<sbyte> LoadVector256(sbyte* address) => LoadVector256(address);
/// <summary>
- /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr); VMOVDQU ymm, m256
+ /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr);
+ /// VMOVDQU ymm, m256
/// </summary>
public static unsafe Vector256<byte> LoadVector256(byte* address) => LoadVector256(address);
/// <summary>
- /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr); VMOVDQU ymm, m256
+ /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr);
+ /// VMOVDQU ymm, m256
/// </summary>
public static unsafe Vector256<short> LoadVector256(short* address) => LoadVector256(address);
/// <summary>
- /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr); VMOVDQU ymm, m256
+ /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr);
+ /// VMOVDQU ymm, m256
/// </summary>
public static unsafe Vector256<ushort> LoadVector256(ushort* address) => LoadVector256(address);
/// <summary>
- /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr); VMOVDQU ymm, m256
+ /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr);
+ /// VMOVDQU ymm, m256
/// </summary>
public static unsafe Vector256<int> LoadVector256(int* address) => LoadVector256(address);
/// <summary>
- /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr); VMOVDQU ymm, m256
+ /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr);
+ /// VMOVDQU ymm, m256
/// </summary>
public static unsafe Vector256<uint> LoadVector256(uint* address) => LoadVector256(address);
/// <summary>
- /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr); VMOVDQU ymm, m256
+ /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr);
+ /// VMOVDQU ymm, m256
/// </summary>
public static unsafe Vector256<long> LoadVector256(long* address) => LoadVector256(address);
/// <summary>
- /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr); VMOVDQU ymm, m256
+ /// __m256i _mm256_loadu_si256 (__m256i const * mem_addr);
+ /// VMOVDQU ymm, m256
/// </summary>
public static unsafe Vector256<ulong> LoadVector256(ulong* address) => LoadVector256(address);
/// <summary>
- /// __m256 _mm256_loadu_ps (float const * mem_addr); VMOVUPS ymm, ymm/m256
+ /// __m256 _mm256_loadu_ps (float const * mem_addr);
+ /// VMOVUPS ymm, ymm/m256
/// </summary>
public static unsafe Vector256<float> LoadVector256(float* address) => LoadVector256(address);
/// <summary>
- /// __m256d _mm256_loadu_pd (double const * mem_addr); VMOVUPD ymm, ymm/m256
+ /// __m256d _mm256_loadu_pd (double const * mem_addr);
+ /// VMOVUPD ymm, ymm/m256
/// </summary>
public static unsafe Vector256<double> LoadVector256(double* address) => LoadVector256(address);
/// <summary>
- /// __m256i _mm256_load_si256 (__m256i const * mem_addr); VMOVDQA ymm, m256
+ /// __m256i _mm256_load_si256 (__m256i const * mem_addr);
+ /// VMOVDQA ymm, m256
/// </summary>
public static unsafe Vector256<sbyte> LoadAlignedVector256(sbyte* address) => LoadAlignedVector256(address);
/// <summary>
- /// __m256i _mm256_load_si256 (__m256i const * mem_addr); VMOVDQA ymm, m256
+ /// __m256i _mm256_load_si256 (__m256i const * mem_addr);
+ /// VMOVDQA ymm, m256
/// </summary>
public static unsafe Vector256<byte> LoadAlignedVector256(byte* address) => LoadAlignedVector256(address);
/// <summary>
- /// __m256i _mm256_load_si256 (__m256i const * mem_addr); VMOVDQA ymm, m256
+ /// __m256i _mm256_load_si256 (__m256i const * mem_addr);
+ /// VMOVDQA ymm, m256
/// </summary>
public static unsafe Vector256<short> LoadAlignedVector256(short* address) => LoadAlignedVector256(address);
/// <summary>
- /// __m256i _mm256_load_si256 (__m256i const * mem_addr); VMOVDQA ymm, m256
+ /// __m256i _mm256_load_si256 (__m256i const * mem_addr);
+ /// VMOVDQA ymm, m256
/// </summary>
public static unsafe Vector256<ushort> LoadAlignedVector256(ushort* address) => LoadAlignedVector256(address);
/// <summary>
- /// __m256i _mm256_load_si256 (__m256i const * mem_addr); VMOVDQA ymm, m256
+ /// __m256i _mm256_load_si256 (__m256i const * mem_addr);
+ /// VMOVDQA ymm, m256
/// </summary>
public static unsafe Vector256<int> LoadAlignedVector256(int* address) => LoadAlignedVector256(address);
/// <summary>
- /// __m256i _mm256_load_si256 (__m256i const * mem_addr); VMOVDQA ymm, m256
+ /// __m256i _mm256_load_si256 (__m256i const * mem_addr);
+ /// VMOVDQA ymm, m256
/// </summary>
public static unsafe Vector256<uint> LoadAlignedVector256(uint* address) => LoadAlignedVector256(address);
/// <summary>
- /// __m256i _mm256_load_si256 (__m256i const * mem_addr); VMOVDQA ymm, m256
+ /// __m256i _mm256_load_si256 (__m256i const * mem_addr);
+ /// VMOVDQA ymm, m256
/// </summary>
public static unsafe Vector256<long> LoadAlignedVector256(long* address) => LoadAlignedVector256(address);
/// <summary>
- /// __m256i _mm256_load_si256 (__m256i const * mem_addr); VMOVDQA ymm, m256
+ /// __m256i _mm256_load_si256 (__m256i const * mem_addr);
+ /// VMOVDQA ymm, m256
/// </summary>
public static unsafe Vector256<ulong> LoadAlignedVector256(ulong* address) => LoadAlignedVector256(address);
/// <summary>
- /// __m256 _mm256_load_ps (float const * mem_addr); VMOVAPS ymm, ymm/m256
+ /// __m256 _mm256_load_ps (float const * mem_addr);
+ /// VMOVAPS ymm, ymm/m256
/// </summary>
public static unsafe Vector256<float> LoadAlignedVector256(float* address) => LoadAlignedVector256(address);
/// <summary>
- /// __m256d _mm256_load_pd (double const * mem_addr); VMOVAPD ymm, ymm/m256
+ /// __m256d _mm256_load_pd (double const * mem_addr);
+ /// VMOVAPD ymm, ymm/m256
/// </summary>
public static unsafe Vector256<double> LoadAlignedVector256(double* address) => LoadAlignedVector256(address);
/// <summary>
- /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr); VLDDQU ymm, m256
+ /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr);
+ /// VLDDQU ymm, m256
/// </summary>
public static unsafe Vector256<sbyte> LoadDquVector256(sbyte* address) => LoadDquVector256(address);
/// <summary>
- /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr); VLDDQU ymm, m256
+ /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr);
+ /// VLDDQU ymm, m256
/// </summary>
public static unsafe Vector256<byte> LoadDquVector256(byte* address) => LoadDquVector256(address);
/// <summary>
- /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr); VLDDQU ymm, m256
+ /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr);
+ /// VLDDQU ymm, m256
/// </summary>
public static unsafe Vector256<short> LoadDquVector256(short* address) => LoadDquVector256(address);
/// <summary>
- /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr); VLDDQU ymm, m256
+ /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr);
+ /// VLDDQU ymm, m256
/// </summary>
public static unsafe Vector256<ushort> LoadDquVector256(ushort* address) => LoadDquVector256(address);
/// <summary>
- /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr); VLDDQU ymm, m256
+ /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr);
+ /// VLDDQU ymm, m256
/// </summary>
public static unsafe Vector256<int> LoadDquVector256(int* address) => LoadDquVector256(address);
/// <summary>
- /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr); VLDDQU ymm, m256
+ /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr);
+ /// VLDDQU ymm, m256
/// </summary>
public static unsafe Vector256<uint> LoadDquVector256(uint* address) => LoadDquVector256(address);
/// <summary>
- /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr); VLDDQU ymm, m256
+ /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr);
+ /// VLDDQU ymm, m256
/// </summary>
public static unsafe Vector256<long> LoadDquVector256(long* address) => LoadDquVector256(address);
/// <summary>
- /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr); VLDDQU ymm, m256
+ /// __m256i _mm256_lddqu_si256 (__m256i const * mem_addr);
+ /// VLDDQU ymm, m256
/// </summary>
public static unsafe Vector256<ulong> LoadDquVector256(ulong* address) => LoadDquVector256(address);
/// <summary>
- /// __m128 _mm_maskload_ps (float const * mem_addr, __m128i mask); VMASKMOVPS xmm, xmm, m128
+ /// __m128 _mm_maskload_ps (float const * mem_addr, __m128i mask);
+ /// VMASKMOVPS xmm, xmm, m128
/// </summary>
public static unsafe Vector128<float> MaskLoad(float* address, Vector128<uint> mask) => MaskLoad(address, mask);
/// <summary>
- /// __m128d _mm_maskload_pd (double const * mem_addr, __m128i mask); VMASKMOVPD xmm, xmm, m128
+ /// __m128d _mm_maskload_pd (double const * mem_addr, __m128i mask);
+ /// VMASKMOVPD xmm, xmm, m128
/// </summary>
public static unsafe Vector128<double> MaskLoad(double* address, Vector128<ulong> mask) => MaskLoad(address, mask);
/// <summary>
- /// __m256 _mm256_maskload_ps (float const * mem_addr, __m256i mask); VMASKMOVPS ymm, ymm, m256
+ /// __m256 _mm256_maskload_ps (float const * mem_addr, __m256i mask);
+ /// VMASKMOVPS ymm, ymm, m256
/// </summary>
public static unsafe Vector256<float> MaskLoad(float* address, Vector256<uint> mask) => MaskLoad(address, mask);
/// <summary>
- /// __m256d _mm256_maskload_pd (double const * mem_addr, __m256i mask); VMASKMOVPD ymm, ymm, m256
+ /// __m256d _mm256_maskload_pd (double const * mem_addr, __m256i mask);
+ /// VMASKMOVPD ymm, ymm, m256
/// </summary>
public static unsafe Vector256<double> MaskLoad(double* address, Vector256<ulong> mask) => MaskLoad(address, mask);
/// <summary>
- /// void _mm_maskstore_ps (float * mem_addr, __m128i mask, __m128 a); VMASKMOVPS m128, xmm, xmm
+ /// void _mm_maskstore_ps (float * mem_addr, __m128i mask, __m128 a);
+ /// VMASKMOVPS m128, xmm, xmm
/// </summary>
public static unsafe void MaskStore(float* address, Vector128<float> mask, Vector128<uint> source) => MaskStore(address, mask, source);
/// <summary>
- /// void _mm_maskstore_pd (double * mem_addr, __m128i mask, __m128d a); VMASKMOVPD m128, xmm, xmm
+ /// void _mm_maskstore_pd (double * mem_addr, __m128i mask, __m128d a);
+ /// VMASKMOVPD m128, xmm, xmm
/// </summary>
public static unsafe void MaskStore(double* address, Vector128<double> mask, Vector128<ulong> source) => MaskStore(address, mask, source);
/// <summary>
- /// void _mm256_maskstore_ps (float * mem_addr, __m256i mask, __m256 a); VMASKMOVPS m256, ymm, ymm
+ /// void _mm256_maskstore_ps (float * mem_addr, __m256i mask, __m256 a);
+ /// VMASKMOVPS m256, ymm, ymm
/// </summary>
public static unsafe void MaskStore(float* address, Vector256<float> mask, Vector256<uint> source) => MaskStore(address, mask, source);
/// <summary>
- /// void _mm256_maskstore_pd (double * mem_addr, __m256i mask, __m256d a); VMASKMOVPD m256, ymm, ymm
+ /// void _mm256_maskstore_pd (double * mem_addr, __m256i mask, __m256d a);
+ /// VMASKMOVPD m256, ymm, ymm
/// </summary>
public static unsafe void MaskStore(double* address, Vector256<double> mask, Vector256<ulong> source) => MaskStore(address, mask, source);
/// <summary>
- /// __m256 _mm256_max_ps (__m256 a, __m256 b); VMAXPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_max_ps (__m256 a, __m256 b);
+ /// VMAXPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> Max(Vector256<float> left, Vector256<float> right) => Max(left, right);
/// <summary>
- /// __m256d _mm256_max_pd (__m256d a, __m256d b); VMAXPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_max_pd (__m256d a, __m256d b);
+ /// VMAXPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> Max(Vector256<double> left, Vector256<double> right) => Max(left, right);
/// <summary>
- /// __m256 _mm256_min_ps (__m256 a, __m256 b); VMINPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_min_ps (__m256 a, __m256 b);
+ /// VMINPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> Min(Vector256<float> left, Vector256<float> right) => Min(left, right);
/// <summary>
- /// __m256d _mm256_min_pd (__m256d a, __m256d b); VMINPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_min_pd (__m256d a, __m256d b);
+ /// VMINPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> Min(Vector256<double> left, Vector256<double> right) => Min(left, right);
/// <summary>
- /// int _mm256_movemask_ps (__m256 a); VMOVMSKPS reg, ymm
+ /// int _mm256_movemask_ps (__m256 a);
+ /// VMOVMSKPS reg, ymm
/// </summary>
public static int MoveMask(Vector256<float> value) => MoveMask(value);
/// <summary>
- /// int _mm256_movemask_pd (__m256d a); VMOVMSKPD reg, ymm
+ /// int _mm256_movemask_pd (__m256d a);
+ /// VMOVMSKPD reg, ymm
/// </summary>
public static int MoveMask(Vector256<double> value) => MoveMask(value);
/// <summary>
- /// __m256 _mm256_mul_ps (__m256 a, __m256 b); VMULPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_mul_ps (__m256 a, __m256 b);
+ /// VMULPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> Multiply(Vector256<float> left, Vector256<float> right) => Multiply(left, right);
/// <summary>
- /// __m256d _mm256_mul_pd (__m256d a, __m256d b); VMULPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_mul_pd (__m256d a, __m256d b);
+ /// VMULPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> Multiply(Vector256<double> left, Vector256<double> right) => Multiply(left, right);
/// <summary>
- /// __m256 _mm256_or_ps (__m256 a, __m256 b); VORPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_or_ps (__m256 a, __m256 b);
+ /// VORPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> Or(Vector256<float> left, Vector256<float> right) => Or(left, right);
/// <summary>
- /// __m256d _mm256_or_pd (__m256d a, __m256d b); VORPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_or_pd (__m256d a, __m256d b);
+ /// VORPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> Or(Vector256<double> left, Vector256<double> right) => Or(left, right);
/// <summary>
- /// __m128 _mm_permute_ps (__m128 a, int imm8); VPERMILPS xmm, xmm, imm8
+ /// __m128 _mm_permute_ps (__m128 a, int imm8);
+ /// VPERMILPS xmm, xmm, imm8
/// </summary>
public static Vector128<float> Permute(Vector128<float> value, byte control) => Permute(value, control);
/// <summary>
- /// __m128d _mm_permute_pd (__m128d a, int imm8); VPERMILPD xmm, xmm, imm8
+ /// __m128d _mm_permute_pd (__m128d a, int imm8);
+ /// VPERMILPD xmm, xmm, imm8
/// </summary>
public static Vector128<double> Permute(Vector128<double> value, byte control) => Permute(value, control);
/// <summary>
- /// __m256 _mm256_permute_ps (__m256 a, int imm8); VPERMILPS ymm, ymm, imm8
+ /// __m256 _mm256_permute_ps (__m256 a, int imm8);
+ /// VPERMILPS ymm, ymm, imm8
/// </summary>
public static Vector256<float> Permute(Vector256<float> value, byte control) => Permute(value, control);
/// <summary>
- /// __m256d _mm256_permute_pd (__m256d a, int imm8); VPERMILPD ymm, ymm, imm8
+ /// __m256d _mm256_permute_pd (__m256d a, int imm8);
+ /// VPERMILPD ymm, ymm, imm8
/// </summary>
public static Vector256<double> Permute(Vector256<double> value, byte control) => Permute(value, control);
/// <summary>
- /// __m256 _mm256_permute2f128_ps (__m256 a, __m256 b, int imm8); VPERM2F128 ymm, ymm, ymm/m256, imm8
- /// __m256d _mm256_permute2f128_pd (__m256d a, __m256d b, int imm8); VPERM2F128 ymm, ymm, ymm/m256, imm8
- /// __m256i _mm256_permute2f128_si256 (__m256i a, __m256i b, int imm8); VPERM2F128 ymm, ymm, ymm/m256, imm8
+ /// __m256 _mm256_permute2f128_ps (__m256 a, __m256 b, int imm8);
+ /// VPERM2F128 ymm, ymm, ymm/m256, imm8
+ /// __m256d _mm256_permute2f128_pd (__m256d a, __m256d b, int imm8);
+ /// VPERM2F128 ymm, ymm, ymm/m256, imm8
+ /// __m256i _mm256_permute2f128_si256 (__m256i a, __m256i b, int imm8);
+ /// VPERM2F128 ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<T> Permute2x128<T>(Vector256<T> left, Vector256<T> right, byte control) where T : struct
{
@@ -639,34 +786,41 @@ namespace System.Runtime.Intrinsics.X86
}
/// <summary>
- /// __m128 _mm_permutevar_ps (__m128 a, __m128i b); VPERMILPS xmm, xmm, xmm/m128
+ /// __m128 _mm_permutevar_ps (__m128 a, __m128i b);
+ /// VPERMILPS xmm, xmm, xmm/m128
/// </summary>
public static Vector128<float> PermuteVar(Vector128<float> left, Vector128<float> mask) => PermuteVar(left, mask);
/// <summary>
- /// __m128d _mm_permutevar_pd (__m128d a, __m128i b); VPERMILPD xmm, xmm, xmm/m128
+ /// __m128d _mm_permutevar_pd (__m128d a, __m128i b);
+ /// VPERMILPD xmm, xmm, xmm/m128
/// </summary>
public static Vector128<double> PermuteVar(Vector128<double> left, Vector128<double> mask) => PermuteVar(left, mask);
/// <summary>
- /// __m256 _mm256_permutevar_ps (__m256 a, __m256i b); VPERMILPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_permutevar_ps (__m256 a, __m256i b);
+ /// VPERMILPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> PermuteVar(Vector256<float> left, Vector256<float> mask) => PermuteVar(left, mask);
/// <summary>
- /// __m256d _mm256_permutevar_pd (__m256d a, __m256i b); VPERMILPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_permutevar_pd (__m256d a, __m256i b);
+ /// VPERMILPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> PermuteVar(Vector256<double> left, Vector256<double> mask) => PermuteVar(left, mask);
/// <summary>
- /// __m256 _mm256_rcp_ps (__m256 a); VRCPPS ymm, ymm/m256
+ /// __m256 _mm256_rcp_ps (__m256 a);
+ /// VRCPPS ymm, ymm/m256
/// </summary>
public static Vector256<float> Reciprocal(Vector256<float> value) => Reciprocal(value);
/// <summary>
- /// __m256 _mm256_rsqrt_ps (__m256 a); VRSQRTPS ymm, ymm/m256
+ /// __m256 _mm256_rsqrt_ps (__m256 a);
+ /// VRSQRTPS ymm, ymm/m256
/// </summary>
public static Vector256<float> ReciprocalSqrt(Vector256<float> value) => ReciprocalSqrt(value);
/// <summary>
- /// __m256 _mm256_round_ps (__m256 a, int rounding); VROUNDPS ymm, ymm/m256, imm8(8)
+ /// __m256 _mm256_round_ps (__m256 a, int rounding);
+ /// VROUNDPS ymm, ymm/m256, imm8(8)
/// _MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC
/// </summary>
public static Vector256<float> RoundToNearestInteger(Vector256<float> value) => RoundToNearestInteger(value);
@@ -688,7 +842,8 @@ namespace System.Runtime.Intrinsics.X86
public static Vector256<float> RoundCurrentDirection(Vector256<float> value) => RoundCurrentDirection(value);
/// <summary>
- /// __m256d _mm256_round_pd (__m256d a, int rounding); VROUNDPD ymm, ymm/m256, imm8(8)
+ /// __m256d _mm256_round_pd (__m256d a, int rounding);
+ /// VROUNDPD ymm, ymm/m256, imm8(8)
/// _MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC
/// </summary>
public static Vector256<double> RoundToNearestInteger(Vector256<double> value) => RoundToNearestInteger(value);
@@ -710,53 +865,69 @@ namespace System.Runtime.Intrinsics.X86
public static Vector256<double> RoundCurrentDirection(Vector256<double> value) => RoundCurrentDirection(value);
/// <summary>
- /// __m256i _mm256_set_epi8 (char e31, char e30, char e29, char e28, char e27, char e26, char e25, char e24, char e23, char e22, char e21, char e20, char e19, char e18, char e17, char e16, char e15, char e14, char e13, char e12, char e11, char e10, char e9, char e8, char e7, char e6, char e5, char e4, char e3, char e2, char e1, char e0); HELPER
+ /// __m256i _mm256_set_epi8 (char e31, char e30, char e29, char e28, char e27, char e26, char e25, char e24, char e23, char e22, char e21, char e20, char e19, char e18, char e17, char e16, char e15, char e14, char e13, char e12, char e11, char e10, char e9, char e8, char e7, char e6, char e5, char e4, char e3, char e2, char e1, char e0);
+ /// HELPER
/// </summary>
public static Vector256<sbyte> SetVector256(sbyte e31, sbyte e30, sbyte e29, sbyte e28, sbyte e27, sbyte e26, sbyte e25, sbyte e24, sbyte e23, sbyte e22, sbyte e21, sbyte e20, sbyte e19, sbyte e18, sbyte e17, sbyte e16, sbyte e15, sbyte e14, sbyte e13, sbyte e12, sbyte e11, sbyte e10, sbyte e9, sbyte e8, sbyte e7, sbyte e6, sbyte e5, sbyte e4, sbyte e3, sbyte e2, sbyte e1, sbyte e0) => SetVector256(e31, e30, e29, e28, e27, e26, e25, e24, e23, e22, e21, e20, e19, e18, e17, e16, e15, e14, e13, e12, e11, e10, e9, e8, e7, e6, e5, e4, e3, e2, e1, e0);
/// <summary>
- /// __m256i _mm256_set_epi8 (char e31, char e30, char e29, char e28, char e27, char e26, char e25, char e24, char e23, char e22, char e21, char e20, char e19, char e18, char e17, char e16, char e15, char e14, char e13, char e12, char e11, char e10, char e9, char e8, char e7, char e6, char e5, char e4, char e3, char e2, char e1, char e0); HELPER
+ /// __m256i _mm256_set_epi8 (char e31, char e30, char e29, char e28, char e27, char e26, char e25, char e24, char e23, char e22, char e21, char e20, char e19, char e18, char e17, char e16, char e15, char e14, char e13, char e12, char e11, char e10, char e9, char e8, char e7, char e6, char e5, char e4, char e3, char e2, char e1, char e0);
+ /// HELPER
/// </summary>
public static Vector256<byte> SetVector256(byte e31, byte e30, byte e29, byte e28, byte e27, byte e26, byte e25, byte e24, byte e23, byte e22, byte e21, byte e20, byte e19, byte e18, byte e17, byte e16, byte e15, byte e14, byte e13, byte e12, byte e11, byte e10, byte e9, byte e8, byte e7, byte e6, byte e5, byte e4, byte e3, byte e2, byte e1, byte e0) => SetVector256(e31, e30, e29, e28, e27, e26, e25, e24, e23, e22, e21, e20, e19, e18, e17, e16, e15, e14, e13, e12, e11, e10, e9, e8, e7, e6, e5, e4, e3, e2, e1, e0);
/// <summary>
- /// __m256i _mm256_set_epi16 (short e15, short e14, short e13, short e12, short e11, short e10, short e9, short e8, short e7, short e6, short e5, short e4, short e3, short e2, short e1, short e0); HELPER
+ /// __m256i _mm256_set_epi16 (short e15, short e14, short e13, short e12, short e11, short e10, short e9, short e8, short e7, short e6, short e5, short e4, short e3, short e2, short e1, short e0);
+ /// HELPER
/// </summary>
public static Vector256<short> SetVector256(short e15, short e14, short e13, short e12, short e11, short e10, short e9, short e8, short e7, short e6, short e5, short e4, short e3, short e2, short e1, short e0) => SetVector256(e15, e14, e13, e12, e11, e10, e9, e8, e7, e6, e5, e4, e3, e2, e1, e0);
/// <summary>
- /// __m256i _mm256_set_epi16 (short e15, short e14, short e13, short e12, short e11, short e10, short e9, short e8, short e7, short e6, short e5, short e4, short e3, short e2, short e1, short e0); HELPER
+ /// __m256i _mm256_set_epi16 (short e15, short e14, short e13, short e12, short e11, short e10, short e9, short e8, short e7, short e6, short e5, short e4, short e3, short e2, short e1, short e0);
+ /// HELPER
/// </summary>
public static Vector256<ushort> SetVector256(ushort e15, ushort e14, ushort e13, ushort e12, ushort e11, ushort e10, ushort e9, ushort e8, ushort e7, ushort e6, ushort e5, ushort e4, ushort e3, ushort e2, ushort e1, ushort e0) => SetVector256(e15, e14, e13, e12, e11, e10, e9, e8, e7, e6, e5, e4, e3, e2, e1, e0);
/// <summary>
- /// __m256i _mm256_set_epi32 (int e7, int e6, int e5, int e4, int e3, int e2, int e1, int e0); HELPER
+ /// __m256i _mm256_set_epi32 (int e7, int e6, int e5, int e4, int e3, int e2, int e1, int e0);
+ /// HELPER
/// </summary>
public static Vector256<int> SetVector256(int e7, int e6, int e5, int e4, int e3, int e2, int e1, int e0) => SetVector256(e7, e6, e5, e4, e3, e2, e1, e0);
/// <summary>
- /// __m256i _mm256_set_epi32 (int e7, int e6, int e5, int e4, int e3, int e2, int e1, int e0); HELPER
+ /// __m256i _mm256_set_epi32 (int e7, int e6, int e5, int e4, int e3, int e2, int e1, int e0);
+ /// HELPER
/// </summary>
public static Vector256<uint> SetVector256(uint e7, uint e6, uint e5, uint e4, uint e3, uint e2, uint e1, uint e0) => SetVector256(e7, e6, e5, e4, e3, e2, e1, e0);
/// <summary>
- /// __m256i _mm256_set_epi64x (__int64 e3, __int64 e2, __int64 e1, __int64 e0); HELPER
+ /// __m256i _mm256_set_epi64x (__int64 e3, __int64 e2, __int64 e1, __int64 e0);
+ /// HELPER
/// </summary>
public static Vector256<long> SetVector256(long e3, long e2, long e1, long e0) => SetVector256(e3, e2, e1, e0);
/// <summary>
- /// __m256i _mm256_set_epi64x (__int64 e3, __int64 e2, __int64 e1, __int64 e0); HELPER
+ /// __m256i _mm256_set_epi64x (__int64 e3, __int64 e2, __int64 e1, __int64 e0);
+ /// HELPER
/// </summary>
public static Vector256<ulong> SetVector256(ulong e3, ulong e2, ulong e1, ulong e0) => SetVector256(e3, e2, e1, e0);
/// <summary>
- /// __m256 _mm256_set_ps (float e7, float e6, float e5, float e4, float e3, float e2, float e1, float e0); HELPER
+ /// __m256 _mm256_set_ps (float e7, float e6, float e5, float e4, float e3, float e2, float e1, float e0);
+ /// HELPER
/// </summary>
public static Vector256<float> SetVector256(float e7, float e6, float e5, float e4, float e3, float e2, float e1, float e0) => SetVector256(e7, e6, e5, e4, e3, e2, e1, e0);
/// <summary>
- /// __m256d _mm256_set_pd (double e3, double e2, double e1, double e0); HELPER
+ /// __m256d _mm256_set_pd (double e3, double e2, double e1, double e0);
+ /// HELPER
/// </summary>
public static Vector256<double> SetVector256(double e3, double e2, double e1, double e0) => SetVector256(e3, e2, e1, e0);
/// <summary>
- /// __m256i _mm256_set1_epi8 (char a); HELPER
- /// __m256i _mm256_set1_epi16 (short a); HELPER
- /// __m256i _mm256_set1_epi32 (int a); HELPER
- /// __m256i _mm256_set1_epi64x (long long a); HELPER
- /// __m256 _mm256_set1_ps (float a); HELPER
- /// __m256d _mm256_set1_pd (double a); HELPER
+ /// __m256i _mm256_set1_epi8 (char a);
+ /// HELPER
+ /// __m256i _mm256_set1_epi16 (short a);
+ /// HELPER
+ /// __m256i _mm256_set1_epi32 (int a);
+ /// HELPER
+ /// __m256i _mm256_set1_epi64x (long long a);
+ /// HELPER
+ /// __m256 _mm256_set1_ps (float a);
+ /// HELPER
+ /// __m256d _mm256_set1_pd (double a);
+ /// HELPER
/// </summary>
public static Vector256<T> SetAllVector256<T>(T value) where T : struct
{
@@ -765,9 +936,12 @@ namespace System.Runtime.Intrinsics.X86
}
/// <summary>
- /// __m256 _mm256_set_m128 (__m128 hi, __m128 lo); HELPER
- /// __m256d _mm256_set_m128d (__m128d hi, __m128d lo); HELPER
- /// __m256i _mm256_set_m128i (__m128i hi, __m128i lo); HELPER
+ /// __m256 _mm256_set_m128 (__m128 hi, __m128 lo);
+ /// HELPER
+ /// __m256d _mm256_set_m128d (__m128d hi, __m128d lo);
+ /// HELPER
+ /// __m256i _mm256_set_m128i (__m128i hi, __m128i lo);
+ /// HELPER
/// </summary>
public static Vector256<T> SetHighLow<T>(Vector128<T> hi, Vector128<T> lo) where T : struct
{
@@ -776,9 +950,12 @@ namespace System.Runtime.Intrinsics.X86
}
/// <summary>
- /// __m256i _mm256_setzero_si256 (void); HELPER
- /// __m256 _mm256_setzero_ps (void); HELPER
- /// __m256d _mm256_setzero_pd (void); HELPER
+ /// __m256i _mm256_setzero_si256 (void);
+ /// HELPER
+ /// __m256 _mm256_setzero_ps (void);
+ /// HELPER
+ /// __m256d _mm256_setzero_pd (void);
+ /// HELPER
/// </summary>
public static Vector256<T> SetZeroVector256<T>() where T : struct
{
@@ -787,30 +964,40 @@ namespace System.Runtime.Intrinsics.X86
}
/// <summary>
- /// __m256 _mm256_shuffle_ps (__m256 a, __m256 b, const int imm8); VSHUFPS ymm, ymm, ymm/m256, imm8
+ /// __m256 _mm256_shuffle_ps (__m256 a, __m256 b, const int imm8);
+ /// VSHUFPS ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<float> Shuffle(Vector256<float> value, Vector256<float> right, byte control) => Shuffle(value, right, control);
/// <summary>
- /// __m256d _mm256_shuffle_pd (__m256d a, __m256d b, const int imm8); VSHUFPD ymm, ymm, ymm/m256, imm8
+ /// __m256d _mm256_shuffle_pd (__m256d a, __m256d b, const int imm8);
+ /// VSHUFPD ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<double> Shuffle(Vector256<double> value, Vector256<double> right, byte control) => Shuffle(value, right, control);
/// <summary>
- /// __m256 _mm256_sqrt_ps (__m256 a); VSQRTPS ymm, ymm/m256
+ /// __m256 _mm256_sqrt_ps (__m256 a);
+ /// VSQRTPS ymm, ymm/m256
/// </summary>
public static Vector256<float> Sqrt(Vector256<float> value) => Sqrt(value);
/// <summary>
- /// __m256d _mm256_sqrt_pd (__m256d a); VSQRTPD ymm, ymm/m256
+ /// __m256d _mm256_sqrt_pd (__m256d a);
+ /// VSQRTPD ymm, ymm/m256
/// </summary>
public static Vector256<double> Sqrt(Vector256<double> value) => Sqrt(value);
/// <summary>
- /// __m256 _mm256_castpd_ps (__m256d a); HELPER - No Codegen
- /// __m256i _mm256_castpd_si256 (__m256d a); HELPER - No Codegen
- /// __m256d _mm256_castps_pd (__m256 a); HELPER - No Codegen
- /// __m256i _mm256_castps_si256 (__m256 a); HELPER - No Codegen
- /// __m256d _mm256_castsi256_pd (__m256i a); HELPER - No Codegen
- /// __m256 _mm256_castsi256_ps (__m256i a); HELPER - No Codegen
+ /// __m256 _mm256_castpd_ps (__m256d a);
+ /// HELPER - No Codegen
+ /// __m256i _mm256_castpd_si256 (__m256d a);
+ /// HELPER - No Codegen
+ /// __m256d _mm256_castps_pd (__m256 a);
+ /// HELPER - No Codegen
+ /// __m256i _mm256_castps_si256 (__m256 a);
+ /// HELPER - No Codegen
+ /// __m256d _mm256_castsi256_pd (__m256i a);
+ /// HELPER - No Codegen
+ /// __m256 _mm256_castsi256_ps (__m256i a);
+ /// HELPER - No Codegen
/// </summary>
public static Vector256<U> StaticCast<T, U>(Vector256<T> value) where T : struct where U : struct
{
@@ -820,150 +1007,187 @@ namespace System.Runtime.Intrinsics.X86
}
/// <summary>
- /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a); MOVDQA m256, ymm
+ /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQA m256, ymm
/// </summary>
public static unsafe void StoreAligned(sbyte* address, Vector256<sbyte> source) => StoreAligned(address, source);
/// <summary>
- /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a); MOVDQA m256, ymm
+ /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQA m256, ymm
/// </summary>
public static unsafe void StoreAligned(byte* address, Vector256<byte> source) => StoreAligned(address, source);
/// <summary>
- /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a); MOVDQA m256, ymm
+ /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQA m256, ymm
/// </summary>
public static unsafe void StoreAligned(short* address, Vector256<short> source) => StoreAligned(address, source);
/// <summary>
- /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a); MOVDQA m256, ymm
+ /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQA m256, ymm
/// </summary>
public static unsafe void StoreAligned(ushort* address, Vector256<ushort> source) => StoreAligned(address, source);
/// <summary>
- /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a); MOVDQA m256, ymm
+ /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQA m256, ymm
/// </summary>
public static unsafe void StoreAligned(int* address, Vector256<int> source) => StoreAligned(address, source);
/// <summary>
- /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a); MOVDQA m256, ymm
+ /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQA m256, ymm
/// </summary>
public static unsafe void StoreAligned(uint* address, Vector256<uint> source) => StoreAligned(address, source);
/// <summary>
- /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a); MOVDQA m256, ymm
+ /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQA m256, ymm
/// </summary>
public static unsafe void StoreAligned(long* address, Vector256<long> source) => StoreAligned(address, source);
/// <summary>
- /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a); MOVDQA m256, ymm
+ /// void _mm256_store_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQA m256, ymm
/// </summary>
public static unsafe void StoreAligned(ulong* address, Vector256<ulong> source) => StoreAligned(address, source);
/// <summary>
- /// void _mm256_store_ps (float * mem_addr, __m256 a); VMOVAPS m256, ymm
+ /// void _mm256_store_ps (float * mem_addr, __m256 a);
+ /// VMOVAPS m256, ymm
/// </summary>
public static unsafe void StoreAligned(float* address, Vector256<float> source) => StoreAligned(address, source);
/// <summary>
- /// void _mm256_store_pd (double * mem_addr, __m256d a); VMOVAPD m256, ymm
+ /// void _mm256_store_pd (double * mem_addr, __m256d a);
+ /// VMOVAPD m256, ymm
/// </summary>
public static unsafe void StoreAligned(double* address, Vector256<double> source) => StoreAligned(address, source);
/// <summary>
- /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a); VMOVNTDQ m256, ymm
+ /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a);
+ /// VMOVNTDQ m256, ymm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(sbyte* address, Vector256<sbyte> source) => StoreAlignedNonTemporal(address, source);
/// <summary>
- /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a); VMOVNTDQ m256, ymm
+ /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a);
+ /// VMOVNTDQ m256, ymm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(byte* address, Vector256<byte> source) => StoreAlignedNonTemporal(address, source);
/// <summary>
- /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a); VMOVNTDQ m256, ymm
+ /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a);
+ /// VMOVNTDQ m256, ymm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(short* address, Vector256<short> source) => StoreAlignedNonTemporal(address, source);
/// <summary>
- /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a); VMOVNTDQ m256, ymm
+ /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a);
+ /// VMOVNTDQ m256, ymm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(ushort* address, Vector256<ushort> source) => StoreAlignedNonTemporal(address, source);
/// <summary>
- /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a); VMOVNTDQ m256, ymm
+ /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a);
+ /// VMOVNTDQ m256, ymm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(int* address, Vector256<int> source) => StoreAlignedNonTemporal(address, source);
/// <summary>
- /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a); VMOVNTDQ m256, ymm
+ /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a);
+ /// VMOVNTDQ m256, ymm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(uint* address, Vector256<uint> source) => StoreAlignedNonTemporal(address, source);
/// <summary>
- /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a); VMOVNTDQ m256, ymm
+ /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a);
+ /// VMOVNTDQ m256, ymm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(long* address, Vector256<long> source) => StoreAlignedNonTemporal(address, source);
/// <summary>
- /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a); VMOVNTDQ m256, ymm
+ /// void _mm256_stream_si256 (__m256i * mem_addr, __m256i a);
+ /// VMOVNTDQ m256, ymm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(ulong* address, Vector256<ulong> source) => StoreAlignedNonTemporal(address, source);
/// <summary>
- /// void _mm256_stream_ps (float * mem_addr, __m256 a); MOVNTPS m256, ymm
+ /// void _mm256_stream_ps (float * mem_addr, __m256 a);
+ /// MOVNTPS m256, ymm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(float* address, Vector256<float> source) => StoreAlignedNonTemporal(address, source);
/// <summary>
- /// void _mm256_stream_pd (double * mem_addr, __m256d a); MOVNTPD m256, ymm
+ /// void _mm256_stream_pd (double * mem_addr, __m256d a);
+ /// MOVNTPD m256, ymm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(double* address, Vector256<double> source) => StoreAlignedNonTemporal(address, source);
/// <summary>
- /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a); MOVDQU m256, ymm
+ /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQU m256, ymm
/// </summary>
public static unsafe void Store(sbyte* address, Vector256<sbyte> source) => Store(address, source);
/// <summary>
- /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a); MOVDQU m256, ymm
+ /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQU m256, ymm
/// </summary>
public static unsafe void Store(byte* address, Vector256<byte> source) => Store(address, source);
/// <summary>
- /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a); MOVDQU m256, ymm
+ /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQU m256, ymm
/// </summary>
public static unsafe void Store(short* address, Vector256<short> source) => Store(address, source);
/// <summary>
- /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a); MOVDQU m256, ymm
+ /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQU m256, ymm
/// </summary>
public static unsafe void Store(ushort* address, Vector256<ushort> source) => Store(address, source);
/// <summary>
- /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a); MOVDQU m256, ymm
+ /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQU m256, ymm
/// </summary>
public static unsafe void Store(int* address, Vector256<int> source) => Store(address, source);
/// <summary>
- /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a); MOVDQU m256, ymm
+ /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQU m256, ymm
/// </summary>
public static unsafe void Store(uint* address, Vector256<uint> source) => Store(address, source);
/// <summary>
- /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a); MOVDQU m256, ymm
+ /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQU m256, ymm
/// </summary>
public static unsafe void Store(long* address, Vector256<long> source) => Store(address, source);
/// <summary>
- /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a); MOVDQU m256, ymm
+ /// void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a);
+ /// MOVDQU m256, ymm
/// </summary>
public static unsafe void Store(ulong* address, Vector256<ulong> source) => Store(address, source);
/// <summary>
- /// void _mm256_storeu_ps (float * mem_addr, __m256 a); MOVUPS m256, ymm
+ /// void _mm256_storeu_ps (float * mem_addr, __m256 a);
+ /// MOVUPS m256, ymm
/// </summary>
public static unsafe void Store(float* address, Vector256<float> source) => Store(address, source);
/// <summary>
- /// void _mm256_storeu_pd (double * mem_addr, __m256d a); MOVUPD m256, ymm
+ /// void _mm256_storeu_pd (double * mem_addr, __m256d a);
+ /// MOVUPD m256, ymm
/// </summary>
public static unsafe void Store(double* address, Vector256<double> source) => Store(address, source);
/// <summary>
- /// __m256 _mm256_sub_ps (__m256 a, __m256 b); VSUBPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_sub_ps (__m256 a, __m256 b);
+ /// VSUBPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> Subtract(Vector256<float> left, Vector256<float> right) => Subtract(left, right);
/// <summary>
- /// __m256d _mm256_sub_pd (__m256d a, __m256d b); VSUBPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_sub_pd (__m256d a, __m256d b);
+ /// VSUBPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> Subtract(Vector256<double> left, Vector256<double> right) => Subtract(left, right);
/// <summary>
- /// int _mm_testc_ps (__m128 a, __m128 b); VTESTPS xmm, xmm/m128
+ /// int _mm_testc_ps (__m128 a, __m128 b);
+ /// VTESTPS xmm, xmm/m128
/// </summary>
public static bool TestC(Vector128<float> left, Vector128<float> right) => TestC(left, right);
/// <summary>
- /// int _mm_testc_pd (__m128d a, __m128d b); VTESTPD xmm, xmm/m128
+ /// int _mm_testc_pd (__m128d a, __m128d b);
+ /// VTESTPD xmm, xmm/m128
/// </summary>
public static bool TestC(Vector128<double> left, Vector128<double> right) => TestC(left, right);
/// <summary>
- /// int _mm256_testc_si256 (__m256i a, __m256i b); VPTEST ymm, ymm/m256
- /// int _mm256_testc_ps (__m256 a, __m256 b); VTESTPS ymm, ymm/m256
- /// int _mm256_testc_pd (__m256d a, __m256d b); VTESTPS ymm, ymm/m256
+ /// int _mm256_testc_si256 (__m256i a, __m256i b);
+ /// VPTEST ymm, ymm/m256
+ /// int _mm256_testc_ps (__m256 a, __m256 b);
+ /// VTESTPS ymm, ymm/m256
+ /// int _mm256_testc_pd (__m256d a, __m256d b);
+ /// VTESTPS ymm, ymm/m256
/// </summary>
public static bool TestC<T>(Vector256<T> left, Vector256<T> right) where T : struct
{
@@ -972,18 +1196,23 @@ namespace System.Runtime.Intrinsics.X86
}
/// <summary>
- /// int _mm_testnzc_ps (__m128 a, __m128 b); VTESTPS xmm, xmm/m128
+ /// int _mm_testnzc_ps (__m128 a, __m128 b);
+ /// VTESTPS xmm, xmm/m128
/// </summary>
public static bool TestNotZAndNotC(Vector128<float> left, Vector128<float> right) => TestNotZAndNotC(left, right);
/// <summary>
- /// int _mm_testnzc_pd (__m128d a, __m128d b); VTESTPD xmm, xmm/m128
+ /// int _mm_testnzc_pd (__m128d a, __m128d b);
+ /// VTESTPD xmm, xmm/m128
/// </summary>
public static bool TestNotZAndNotC(Vector128<double> left, Vector128<double> right) => TestNotZAndNotC(left, right);
/// <summary>
- /// int _mm256_testnzc_si256 (__m256i a, __m256i b); VPTEST ymm, ymm/m256
- /// int _mm256_testnzc_ps (__m256 a, __m256 b); VTESTPS ymm, ymm/m256
- /// int _mm256_testnzc_pd (__m256d a, __m256d b); VTESTPD ymm, ymm/m256
+ /// int _mm256_testnzc_si256 (__m256i a, __m256i b);
+ /// VPTEST ymm, ymm/m256
+ /// int _mm256_testnzc_ps (__m256 a, __m256 b);
+ /// VTESTPS ymm, ymm/m256
+ /// int _mm256_testnzc_pd (__m256d a, __m256d b);
+ /// VTESTPD ymm, ymm/m256
/// </summary>
public static bool TestNotZAndNotC<T>(Vector256<T> left, Vector256<T> right) where T : struct
{
@@ -992,18 +1221,23 @@ namespace System.Runtime.Intrinsics.X86
}
/// <summary>
- /// int _mm_testz_ps (__m128 a, __m128 b); VTESTPS xmm, xmm/m128
+ /// int _mm_testz_ps (__m128 a, __m128 b);
+ /// VTESTPS xmm, xmm/m128
/// </summary>
public static bool TestZ(Vector128<float> left, Vector128<float> right) => TestZ(left, right);
/// <summary>
- /// int _mm_testz_pd (__m128d a, __m128d b); VTESTPD xmm, xmm/m128
+ /// int _mm_testz_pd (__m128d a, __m128d b);
+ /// VTESTPD xmm, xmm/m128
/// </summary>
public static bool TestZ(Vector128<double> left, Vector128<double> right) => TestZ(left, right);
/// <summary>
- /// int _mm256_testz_si256 (__m256i a, __m256i b); VPTEST ymm, ymm/m256
- /// int _mm256_testz_ps (__m256 a, __m256 b); VTESTPS ymm, ymm/m256
- /// int _mm256_testz_pd (__m256d a, __m256d b); VTESTPD ymm, ymm/m256
+ /// int _mm256_testz_si256 (__m256i a, __m256i b);
+ /// VPTEST ymm, ymm/m256
+ /// int _mm256_testz_ps (__m256 a, __m256 b);
+ /// VTESTPS ymm, ymm/m256
+ /// int _mm256_testz_pd (__m256d a, __m256d b);
+ /// VTESTPD ymm, ymm/m256
/// </summary>
public static bool TestZ<T>(Vector256<T> left, Vector256<T> right) where T : struct
{
@@ -1012,45 +1246,56 @@ namespace System.Runtime.Intrinsics.X86
}
/// <summary>
- /// __m256 _mm256_unpackhi_ps (__m256 a, __m256 b); VUNPCKHPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_unpackhi_ps (__m256 a, __m256 b);
+ /// VUNPCKHPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> UnpackHigh(Vector256<float> left, Vector256<float> right) => UnpackHigh(left, right);
/// <summary>
- /// __m256d _mm256_unpackhi_pd (__m256d a, __m256d b); VUNPCKHPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_unpackhi_pd (__m256d a, __m256d b);
+ /// VUNPCKHPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> UnpackHigh(Vector256<double> left, Vector256<double> right) => UnpackHigh(left, right);
/// <summary>
- /// __m256 _mm256_unpacklo_ps (__m256 a, __m256 b); VUNPCKLPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_unpacklo_ps (__m256 a, __m256 b);
+ /// VUNPCKLPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> UnpackLow(Vector256<float> left, Vector256<float> right) => UnpackLow(left, right);
/// <summary>
- /// __m256d _mm256_unpacklo_pd (__m256d a, __m256d b); VUNPCKLPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_unpacklo_pd (__m256d a, __m256d b);
+ /// VUNPCKLPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> UnpackLow(Vector256<double> left, Vector256<double> right) => UnpackLow(left, right);
/// <summary>
- /// __m256 _mm256_xor_ps (__m256 a, __m256 b); VXORPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_xor_ps (__m256 a, __m256 b);
+ /// VXORPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> Xor(Vector256<float> left, Vector256<float> right) => Xor(left, right);
/// <summary>
- /// __m256d _mm256_xor_pd (__m256d a, __m256d b); VXORPS ymm, ymm, ymm/m256
+ /// __m256d _mm256_xor_pd (__m256d a, __m256d b);
+ /// VXORPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> Xor(Vector256<double> left, Vector256<double> right) => Xor(left, right);
/// <summary>
- /// void _mm256_zeroall (void); VZEROALL
+ /// void _mm256_zeroall (void);
+ /// VZEROALL
/// </summary>
public static void ZeroAll() => ZeroAll();
/// <summary>
- /// void _mm256_zeroupper (void); VZEROUPPER
+ /// void _mm256_zeroupper (void);
+ /// VZEROUPPER
/// </summary>
public static void ZeroUpper() => ZeroUpper();
/// <summary>
- /// __m256d _mm256_zextpd128_pd256 (__m128d a); HELPER - No Codegen
- /// __m256 _mm256_zextps128_ps256 (__m128 a); HELPER - No Codegen
- /// __m256i _mm256_zextsi128_si256 (__m128i a); HELPER - No Codegen
+ /// __m256d _mm256_zextpd128_pd256 (__m128d a);
+ /// HELPER - No Codegen
+ /// __m256 _mm256_zextps128_ps256 (__m128 a);
+ /// HELPER - No Codegen
+ /// __m256i _mm256_zextsi128_si256 (__m128i a);
+ /// HELPER - No Codegen
/// </summary>
public static Vector256<T> ZeroExtendToVector256<T>(Vector128<T> value) where T : struct
{
diff --git a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Avx2.PlatformNotSupported.cs b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Avx2.PlatformNotSupported.cs
index 5836a2b179..45e0f51c48 100644
--- a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Avx2.PlatformNotSupported.cs
+++ b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Avx2.PlatformNotSupported.cs
@@ -16,336 +16,423 @@ namespace System.Runtime.Intrinsics.X86
public static bool IsSupported { get { return false; } }
/// <summary>
- /// __m256i _mm256_abs_epi8 (__m256i a); VPABSB ymm, ymm/m256
+ /// __m256i _mm256_abs_epi8 (__m256i a);
+ /// VPABSB ymm, ymm/m256
/// </summary>
public static Vector256<byte> Abs(Vector256<sbyte> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_abs_epi16 (__m256i a); VPABSW ymm, ymm/m256
+ /// __m256i _mm256_abs_epi16 (__m256i a);
+ /// VPABSW ymm, ymm/m256
/// </summary>
public static Vector256<ushort> Abs(Vector256<short> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_abs_epi32 (__m256i a); VPABSD ymm, ymm/m256
+ /// __m256i _mm256_abs_epi32 (__m256i a);
+ /// VPABSD ymm, ymm/m256
/// </summary>
public static Vector256<uint> Abs(Vector256<int> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_add_epi8 (__m256i a, __m256i b); VPADDB ymm, ymm, ymm/m256
+ /// __m256i _mm256_add_epi8 (__m256i a, __m256i b);
+ /// VPADDB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> Add(Vector256<sbyte> left, Vector256<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_add_epi8 (__m256i a, __m256i b); VPADDB ymm, ymm, ymm/m256
+ /// __m256i _mm256_add_epi8 (__m256i a, __m256i b);
+ /// VPADDB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> Add(Vector256<byte> left, Vector256<byte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_add_epi16 (__m256i a, __m256i b); VPADDW ymm, ymm, ymm/m256
+ /// __m256i _mm256_add_epi16 (__m256i a, __m256i b);
+ /// VPADDW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> Add(Vector256<short> left, Vector256<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_add_epi16 (__m256i a, __m256i b); VPADDW ymm, ymm, ymm/m256
+ /// __m256i _mm256_add_epi16 (__m256i a, __m256i b);
+ /// VPADDW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> Add(Vector256<ushort> left, Vector256<ushort> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_add_epi32 (__m256i a, __m256i b); VPADDD ymm, ymm, ymm/m256
+ /// __m256i _mm256_add_epi32 (__m256i a, __m256i b);
+ /// VPADDD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> Add(Vector256<int> left, Vector256<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_add_epi32 (__m256i a, __m256i b); VPADDD ymm, ymm, ymm/m256
+ /// __m256i _mm256_add_epi32 (__m256i a, __m256i b);
+ /// VPADDD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<uint> Add(Vector256<uint> left, Vector256<uint> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_add_epi64 (__m256i a, __m256i b); VPADDQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_add_epi64 (__m256i a, __m256i b);
+ /// VPADDQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<long> Add(Vector256<long> left, Vector256<long> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_add_epi64 (__m256i a, __m256i b); VPADDQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_add_epi64 (__m256i a, __m256i b);
+ /// VPADDQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ulong> Add(Vector256<ulong> left, Vector256<ulong> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_adds_epi8 (__m256i a, __m256i b); VPADDSB ymm, ymm, ymm/m256
+ /// __m256i _mm256_adds_epi8 (__m256i a, __m256i b);
+ /// VPADDSB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> AddSaturate(Vector256<sbyte> left, Vector256<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_adds_epu8 (__m256i a, __m256i b); VPADDUSB ymm, ymm, ymm/m256
+ /// __m256i _mm256_adds_epu8 (__m256i a, __m256i b);
+ /// VPADDUSB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> AddSaturate(Vector256<byte> left, Vector256<byte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_adds_epi16 (__m256i a, __m256i b); VPADDSW ymm, ymm, ymm/m256
+ /// __m256i _mm256_adds_epi16 (__m256i a, __m256i b);
+ /// VPADDSW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> AddSaturate(Vector256<short> left, Vector256<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_adds_epu16 (__m256i a, __m256i b); VPADDUSW ymm, ymm, ymm/m256
+ /// __m256i _mm256_adds_epu16 (__m256i a, __m256i b);
+ /// VPADDUSW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> AddSaturate(Vector256<ushort> left, Vector256<ushort> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_alignr_epi8 (__m256i a, __m256i b, const int count); VPALIGNR ymm, ymm, ymm/m256, imm8
+ /// __m256i _mm256_alignr_epi8 (__m256i a, __m256i b, const int count);
+ /// VPALIGNR ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<sbyte> AlignRight(Vector256<sbyte> left, Vector256<sbyte> right, byte mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_and_si256 (__m256i a, __m256i b); VPAND ymm, ymm, ymm/m256
+ /// __m256i _mm256_and_si256 (__m256i a, __m256i b);
+ /// VPAND ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> And(Vector256<sbyte> left, Vector256<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_and_si256 (__m256i a, __m256i b); VPAND ymm, ymm, ymm/m256
+ /// __m256i _mm256_and_si256 (__m256i a, __m256i b);
+ /// VPAND ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> And(Vector256<byte> left, Vector256<byte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_and_si256 (__m256i a, __m256i b); VPAND ymm, ymm, ymm/m256
+ /// __m256i _mm256_and_si256 (__m256i a, __m256i b);
+ /// VPAND ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> And(Vector256<short> left, Vector256<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_and_si256 (__m256i a, __m256i b); VPAND ymm, ymm, ymm/m256
+ /// __m256i _mm256_and_si256 (__m256i a, __m256i b);
+ /// VPAND ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> And(Vector256<ushort> left, Vector256<ushort> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_and_si256 (__m256i a, __m256i b); VPAND ymm, ymm, ymm/m256
+ /// __m256i _mm256_and_si256 (__m256i a, __m256i b);
+ /// VPAND ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> And(Vector256<int> left, Vector256<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_and_si256 (__m256i a, __m256i b); VPAND ymm, ymm, ymm/m256
+ /// __m256i _mm256_and_si256 (__m256i a, __m256i b);
+ /// VPAND ymm, ymm, ymm/m256
/// </summary>
public static Vector256<uint> And(Vector256<uint> left, Vector256<uint> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_and_si256 (__m256i a, __m256i b); VPAND ymm, ymm, ymm/m256
+ /// __m256i _mm256_and_si256 (__m256i a, __m256i b);
+ /// VPAND ymm, ymm, ymm/m256
/// </summary>
public static Vector256<long> And(Vector256<long> left, Vector256<long> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_and_si256 (__m256i a, __m256i b); VPAND ymm, ymm, ymm/m256
+ /// __m256i _mm256_and_si256 (__m256i a, __m256i b);
+ /// VPAND ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ulong> And(Vector256<ulong> left, Vector256<ulong> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b); VPANDN ymm, ymm, ymm/m256
+ /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b);
+ /// VPANDN ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> AndNot(Vector256<sbyte> left, Vector256<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b); VPANDN ymm, ymm, ymm/m256
+ /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b);
+ /// VPANDN ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> AndNot(Vector256<byte> left, Vector256<byte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b); VPANDN ymm, ymm, ymm/m256
+ /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b);
+ /// VPANDN ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> AndNot(Vector256<short> left, Vector256<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b); VPANDN ymm, ymm, ymm/m256
+ /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b);
+ /// VPANDN ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> AndNot(Vector256<ushort> left, Vector256<ushort> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b); VPANDN ymm, ymm, ymm/m256
+ /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b);
+ /// VPANDN ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> AndNot(Vector256<int> left, Vector256<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b); VPANDN ymm, ymm, ymm/m256
+ /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b);
+ /// VPANDN ymm, ymm, ymm/m256
/// </summary>
public static Vector256<uint> AndNot(Vector256<uint> left, Vector256<uint> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b); VPANDN ymm, ymm, ymm/m256
+ /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b);
+ /// VPANDN ymm, ymm, ymm/m256
/// </summary>
public static Vector256<long> AndNot(Vector256<long> left, Vector256<long> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b); VPANDN ymm, ymm, ymm/m256
+ /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b);
+ /// VPANDN ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ulong> AndNot(Vector256<ulong> left, Vector256<ulong> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_avg_epu8 (__m256i a, __m256i b); VPAVGB ymm, ymm, ymm/m256
+ /// __m256i _mm256_avg_epu8 (__m256i a, __m256i b);
+ /// VPAVGB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> Average(Vector256<byte> left, Vector256<byte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_avg_epu16 (__m256i a, __m256i b); VPAVGW ymm, ymm, ymm/m256
+ /// __m256i _mm256_avg_epu16 (__m256i a, __m256i b);
+ /// VPAVGW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> Average(Vector256<ushort> left, Vector256<ushort> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_blend_epi32 (__m128i a, __m128i b, const int imm8); VPBLENDD xmm, xmm, xmm/m128, imm8
+ /// __m128i _mm_blend_epi32 (__m128i a, __m128i b, const int imm8);
+ /// VPBLENDD xmm, xmm, xmm/m128, imm8
/// </summary>
public static Vector128<int> Blend(Vector128<int> left, Vector128<int> right, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_blend_epi32 (__m128i a, __m128i b, const int imm8); VPBLENDD xmm, xmm, xmm/m128, imm8
+ /// __m128i _mm_blend_epi32 (__m128i a, __m128i b, const int imm8);
+ /// VPBLENDD xmm, xmm, xmm/m128, imm8
/// </summary>
public static Vector128<uint> Blend(Vector128<uint> left, Vector128<uint> right, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_blend_epi16 (__m256i a, __m256i b, const int imm8); VPBLENDW ymm, ymm, ymm/m256, imm8
+ /// __m256i _mm256_blend_epi16 (__m256i a, __m256i b, const int imm8);
+ /// VPBLENDW ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<short> Blend(Vector256<short> left, Vector256<short> right, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_blend_epi16 (__m256i a, __m256i b, const int imm8); VPBLENDW ymm, ymm, ymm/m256, imm8
+ /// __m256i _mm256_blend_epi16 (__m256i a, __m256i b, const int imm8);
+ /// VPBLENDW ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<ushort> Blend(Vector256<ushort> left, Vector256<ushort> right, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_blend_epi32 (__m256i a, __m256i b, const int imm8); VPBLENDD ymm, ymm, ymm/m256, imm8
+ /// __m256i _mm256_blend_epi32 (__m256i a, __m256i b, const int imm8);
+ /// VPBLENDD ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<int> Blend(Vector256<int> left, Vector256<int> right, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_blend_epi32 (__m256i a, __m256i b, const int imm8); VPBLENDD ymm, ymm, ymm/m256, imm8
+ /// __m256i _mm256_blend_epi32 (__m256i a, __m256i b, const int imm8);
+ /// VPBLENDD ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<uint> Blend(Vector256<uint> left, Vector256<uint> right, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_blendv_epi8 (__m256i a, __m256i b, __m256i mask); PBLENDVB ymm, ymm, ymm/m256, ymm
+ /// __m256i _mm256_blendv_epi8 (__m256i a, __m256i b, __m256i mask);
+ /// PBLENDVB ymm, ymm, ymm/m256, ymm
/// </summary>
public static Vector256<sbyte> BlendVariable(Vector256<sbyte> left, Vector256<sbyte> right, Vector256<sbyte> mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_blendv_epi8 (__m256i a, __m256i b, __m256i mask); PBLENDVB ymm, ymm, ymm/m256, ymm
+ /// __m256i _mm256_blendv_epi8 (__m256i a, __m256i b, __m256i mask);
+ /// PBLENDVB ymm, ymm, ymm/m256, ymm
/// </summary>
public static Vector256<byte> BlendVariable(Vector256<byte> left, Vector256<byte> right, Vector256<byte> mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_broadcastb_epi8 (__m128i a); VPBROADCASTB xmm, xmm
- /// __m128i _mm_broadcastw_epi16 (__m128i a); VPBROADCASTW xmm, xmm
- /// __m128i _mm_broadcastd_epi32 (__m128i a); VPBROADCASTD xmm, xmm
- /// __m128i _mm_broadcastq_epi64 (__m128i a); VPBROADCASTQ xmm, xmm
- /// __m128 _mm_broadcastss_ps (__m128 a); VBROADCASTSS xmm, xmm
- /// __m128d _mm_broadcastsd_pd (__m128d a); VBROADCASTSD xmm, xmm
+ /// __m128i _mm_broadcastb_epi8 (__m128i a);
+ /// VPBROADCASTB xmm, xmm
+ /// __m128i _mm_broadcastw_epi16 (__m128i a);
+ /// VPBROADCASTW xmm, xmm
+ /// __m128i _mm_broadcastd_epi32 (__m128i a);
+ /// VPBROADCASTD xmm, xmm
+ /// __m128i _mm_broadcastq_epi64 (__m128i a);
+ /// VPBROADCASTQ xmm, xmm
+ /// __m128 _mm_broadcastss_ps (__m128 a);
+ /// VBROADCASTSS xmm, xmm
+ /// __m128d _mm_broadcastsd_pd (__m128d a);
+ /// VBROADCASTSD xmm, xmm
/// </summary>
public static Vector128<T> BroadcastElementToVector128<T>(Vector128<T> value) where T : struct { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_broadcastb_epi8 (__m128i a); VPBROADCASTB ymm, xmm
- /// __m256i _mm256_broadcastw_epi16 (__m128i a); VPBROADCASTW ymm, xmm
- /// __m256i _mm256_broadcastd_epi32 (__m128i a); VPBROADCASTD ymm, xmm
- /// __m256i _mm256_broadcastq_epi64 (__m128i a); VPBROADCASTQ ymm, xmm
- /// __m256 _mm256_broadcastss_ps (__m128 a); VBROADCASTSS ymm, xmm
- /// __m256d _mm256_broadcastsd_pd (__m128d a); VBROADCASTSD ymm, xmm
+ /// __m256i _mm256_broadcastb_epi8 (__m128i a);
+ /// VPBROADCASTB ymm, xmm
+ /// __m256i _mm256_broadcastw_epi16 (__m128i a);
+ /// VPBROADCASTW ymm, xmm
+ /// __m256i _mm256_broadcastd_epi32 (__m128i a);
+ /// VPBROADCASTD ymm, xmm
+ /// __m256i _mm256_broadcastq_epi64 (__m128i a);
+ /// VPBROADCASTQ ymm, xmm
+ /// __m256 _mm256_broadcastss_ps (__m128 a);
+ /// VBROADCASTSS ymm, xmm
+ /// __m256d _mm256_broadcastsd_pd (__m128d a);
+ /// VBROADCASTSD ymm, xmm
/// </summary>
public static Vector256<T> BroadcastElementToVector256<T>(Vector128<T> value) where T : struct { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_broadcastsi128_si256 (__m128i a); VBROADCASTI128 xmm, m8
+ /// __m256i _mm256_broadcastsi128_si256 (__m128i a);
+ /// VBROADCASTI128 xmm, m8
/// </summary>
public static unsafe Vector256<sbyte> BroadcastVector128ToVector256(sbyte* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_broadcastsi128_si256 (__m128i a); VBROADCASTI128 xmm, m8
+ /// __m256i _mm256_broadcastsi128_si256 (__m128i a);
+ /// VBROADCASTI128 xmm, m8
/// </summary>
public static unsafe Vector256<byte> BroadcastVector128ToVector256(byte* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_broadcastsi128_si256 (__m128i a); VBROADCASTI128 xmm, m16
+ /// __m256i _mm256_broadcastsi128_si256 (__m128i a);
+ /// VBROADCASTI128 xmm, m16
/// </summary>
public static unsafe Vector256<short> BroadcastVector128ToVector256(short* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_broadcastsi128_si256 (__m128i a); VBROADCASTI128 xmm, m16
+ /// __m256i _mm256_broadcastsi128_si256 (__m128i a);
+ /// VBROADCASTI128 xmm, m16
/// </summary>
public static unsafe Vector256<ushort> BroadcastVector128ToVector256(ushort* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_broadcastsi128_si256 (__m128i a); VBROADCASTI128 xmm, m32
+ /// __m256i _mm256_broadcastsi128_si256 (__m128i a);
+ /// VBROADCASTI128 xmm, m32
/// </summary>
public static unsafe Vector256<int> BroadcastVector128ToVector256(int* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_broadcastsi128_si256 (__m128i a); VBROADCASTI128 xmm, m32
+ /// __m256i _mm256_broadcastsi128_si256 (__m128i a);
+ /// VBROADCASTI128 xmm, m32
/// </summary>
public static unsafe Vector256<uint> BroadcastVector128ToVector256(uint* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_broadcastsi128_si256 (__m128i a); VBROADCASTI128 xmm, m64
+ /// __m256i _mm256_broadcastsi128_si256 (__m128i a);
+ /// VBROADCASTI128 xmm, m64
/// </summary>
public static unsafe Vector256<long> BroadcastVector128ToVector256(long* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_broadcastsi128_si256 (__m128i a); VBROADCASTI128 xmm, m64
+ /// __m256i _mm256_broadcastsi128_si256 (__m128i a);
+ /// VBROADCASTI128 xmm, m64
/// </summary>
public static unsafe Vector256<ulong> BroadcastVector128ToVector256(ulong* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_cmpeq_epi8 (__m256i a, __m256i b); VPCMPEQB ymm, ymm, ymm/m256
+ /// __m256i _mm256_cmpeq_epi8 (__m256i a, __m256i b);
+ /// VPCMPEQB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> CompareEqual(Vector256<sbyte> left, Vector256<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_cmpeq_epi8 (__m256i a, __m256i b); VPCMPEQB ymm, ymm, ymm/m256
+ /// __m256i _mm256_cmpeq_epi8 (__m256i a, __m256i b);
+ /// VPCMPEQB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> CompareEqual(Vector256<byte> left, Vector256<byte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_cmpeq_epi16 (__m256i a, __m256i b); VPCMPEQW ymm, ymm, ymm/m256
+ /// __m256i _mm256_cmpeq_epi16 (__m256i a, __m256i b);
+ /// VPCMPEQW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> CompareEqual(Vector256<short> left, Vector256<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_cmpeq_epi16 (__m256i a, __m256i b); VPCMPEQW ymm, ymm, ymm/m256
+ /// __m256i _mm256_cmpeq_epi16 (__m256i a, __m256i b);
+ /// VPCMPEQW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> CompareEqual(Vector256<ushort> left, Vector256<ushort> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_cmpeq_epi32 (__m256i a, __m256i b); VPCMPEQD ymm, ymm, ymm/m256
+ /// __m256i _mm256_cmpeq_epi32 (__m256i a, __m256i b);
+ /// VPCMPEQD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> CompareEqual(Vector256<int> left, Vector256<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_cmpeq_epi32 (__m256i a, __m256i b); VPCMPEQD ymm, ymm, ymm/m256
+ /// __m256i _mm256_cmpeq_epi32 (__m256i a, __m256i b);
+ /// VPCMPEQD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<uint> CompareEqual(Vector256<uint> left, Vector256<uint> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_cmpeq_epi64 (__m256i a, __m256i b); VPCMPEQQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_cmpeq_epi64 (__m256i a, __m256i b);
+ /// VPCMPEQQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<long> CompareEqual(Vector256<long> left, Vector256<long> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_cmpeq_epi64 (__m256i a, __m256i b); VPCMPEQQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_cmpeq_epi64 (__m256i a, __m256i b);
+ /// VPCMPEQQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ulong> CompareEqual(Vector256<ulong> left, Vector256<ulong> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_cmpgt_epi8 (__m256i a, __m256i b); VPCMPGTB ymm, ymm, ymm/m256
+ /// __m256i _mm256_cmpgt_epi8 (__m256i a, __m256i b);
+ /// VPCMPGTB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> CompareGreaterThan(Vector256<sbyte> left, Vector256<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_cmpgt_epi16 (__m256i a, __m256i b); VPCMPGTW ymm, ymm, ymm/m256
+ /// __m256i _mm256_cmpgt_epi16 (__m256i a, __m256i b);
+ /// VPCMPGTW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> CompareGreaterThan(Vector256<short> left, Vector256<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_cmpgt_epi32 (__m256i a, __m256i b); VPCMPGTD ymm, ymm, ymm/m256
+ /// __m256i _mm256_cmpgt_epi32 (__m256i a, __m256i b);
+ /// VPCMPGTD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> CompareGreaterThan(Vector256<int> left, Vector256<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_cmpgt_epi64 (__m256i a, __m256i b); VPCMPGTQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_cmpgt_epi64 (__m256i a, __m256i b);
+ /// VPCMPGTQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<long> CompareGreaterThan(Vector256<long> left, Vector256<long> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// double _mm256_cvtsd_f64 (__m256d a); HELPER: MOVSD
+ /// double _mm256_cvtsd_f64 (__m256d a);
+ /// HELPER: MOVSD
/// </summary>
public static double ConvertToDouble(Vector256<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm256_cvtsi256_si32 (__m256i a); MOVD reg/m32, xmm
+ /// int _mm256_cvtsi256_si32 (__m256i a);
+ /// MOVD reg/m32, xmm
/// </summary>
public static int ConvertToInt32(Vector256<int> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm256_cvtsi256_si32 (__m256i a); MOVD reg/m32, xmm
+ /// int _mm256_cvtsi256_si32 (__m256i a);
+ /// MOVD reg/m32, xmm
/// </summary>
public static uint ConvertToUInt32(Vector256<uint> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_cvtepi8_epi16 (__m128i a); VPMOVSXBW ymm, xmm/m128
+ /// __m256i _mm256_cvtepi8_epi16 (__m128i a);
+ /// VPMOVSXBW ymm, xmm/m128
/// </summary>
public static Vector256<short> ConvertToVector256Int16(Vector128<sbyte> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_cvtepu8_epi16 (__m128i a); VPMOVZXBW ymm, xmm/m128
+ /// __m256i _mm256_cvtepu8_epi16 (__m128i a);
+ /// VPMOVZXBW ymm, xmm/m128
/// </summary>
public static Vector256<ushort> ConvertToVector256UInt16(Vector128<byte> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_cvtepi8_epi32 (__m128i a); VPMOVSXBD ymm, xmm/m128
+ /// __m256i _mm256_cvtepi8_epi32 (__m128i a);
+ /// VPMOVSXBD ymm, xmm/m128
/// </summary>
public static Vector256<int> ConvertToVector256Int32(Vector128<sbyte> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_cvtepi16_epi32 (__m128i a); VPMOVSXWD ymm, xmm/m128
+ /// __m256i _mm256_cvtepi16_epi32 (__m128i a);
+ /// VPMOVSXWD ymm, xmm/m128
/// </summary>
public static Vector256<int> ConvertToVector256Int32(Vector128<short> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_cvtepu8_epi32 (__m128i a); VPMOVZXBD ymm, xmm/m128
+ /// __m256i _mm256_cvtepu8_epi32 (__m128i a);
+ /// VPMOVZXBD ymm, xmm/m128
/// </summary>
public static Vector256<uint> ConvertToVector256UInt32(Vector128<byte> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_cvtepu16_epi32 (__m128i a); VPMOVZXWD ymm, xmm/m128
+ /// __m256i _mm256_cvtepu16_epi32 (__m128i a);
+ /// VPMOVZXWD ymm, xmm/m128
/// </summary>
public static Vector256<uint> ConvertToVector256UInt32(Vector128<ushort> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_cvtepi8_epi64 (__m128i a); VPMOVSXBQ ymm, xmm/m128
+ /// __m256i _mm256_cvtepi8_epi64 (__m128i a);
+ /// VPMOVSXBQ ymm, xmm/m128
/// </summary>
public static Vector256<long> ConvertToVector256Int64(Vector128<sbyte> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_cvtepi16_epi64 (__m128i a); VPMOVSXWQ ymm, xmm/m128
+ /// __m256i _mm256_cvtepi16_epi64 (__m128i a);
+ /// VPMOVSXWQ ymm, xmm/m128
/// </summary>
public static Vector256<long> ConvertToVector256Int64(Vector128<short> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_cvtepi32_epi64 (__m128i a); VPMOVSXDQ ymm, xmm/m128
+ /// __m256i _mm256_cvtepi32_epi64 (__m128i a);
+ /// VPMOVSXDQ ymm, xmm/m128
/// </summary>
public static Vector256<long> ConvertToVector256Int64(Vector128<int> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_cvtepu8_epi64 (__m128i a); VPMOVZXBQ ymm, xmm/m128
+ /// __m256i _mm256_cvtepu8_epi64 (__m128i a);
+ /// VPMOVZXBQ ymm, xmm/m128
/// </summary>
public static Vector256<ulong> ConvertToVector256UInt64(Vector128<byte> value) { throw new PlatformNotSupportedException(); }
/// <summary>
@@ -353,1153 +440,1424 @@ namespace System.Runtime.Intrinsics.X86
/// </summary>
public static Vector256<ulong> ConvertToVector256UInt64(Vector128<ushort> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_cvtepu32_epi64 (__m128i a); VPMOVZXDQ ymm, xmm/m128
+ /// __m256i _mm256_cvtepu32_epi64 (__m128i a);
+ /// VPMOVZXDQ ymm, xmm/m128
/// </summary>
public static Vector256<ulong> ConvertToVector256UInt64(Vector128<uint> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 xmm, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 xmm, ymm, imm8
/// </summary>
public static Vector128<sbyte> ExtractVector128(Vector256<sbyte> value, byte index) { throw new PlatformNotSupportedException(); }
// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 m128, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(sbyte* address, Vector256<sbyte> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 xmm, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 xmm, ymm, imm8
/// </summary>
public static Vector128<byte> ExtractVector128(Vector256<byte> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 m128, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(byte* address, Vector256<byte> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 xmm, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 xmm, ymm, imm8
/// </summary>
public static Vector128<short> ExtractVector128(Vector256<short> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 m128, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(short* address, Vector256<short> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 xmm, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 xmm, ymm, imm8
/// </summary>
public static Vector128<ushort> ExtractVector128(Vector256<ushort> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 m128, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(ushort* address, Vector256<ushort> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 xmm, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 xmm, ymm, imm8
/// </summary>
public static Vector128<int> ExtractVector128(Vector256<int> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 m128, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(int* address, Vector256<int> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 xmm, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 xmm, ymm, imm8
/// </summary>
public static Vector128<uint> ExtractVector128(Vector256<uint> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 m128, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(uint* address, Vector256<uint> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 xmm, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 xmm, ymm, imm8
/// </summary>
public static Vector128<long> ExtractVector128(Vector256<long> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 m128, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(long* address, Vector256<long> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 xmm, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 xmm, ymm, imm8
/// </summary>
public static Vector128<ulong> ExtractVector128(Vector256<ulong> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 m128, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(ulong* address, Vector256<ulong> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_i32gather_epi32 (int const* base_addr, __m128i vindex, const int scale); VPGATHERDD xmm, vm32x, xmm
+ /// __m128i _mm_i32gather_epi32 (int const* base_addr, __m128i vindex, const int scale);
+ /// VPGATHERDD xmm, vm32x, xmm
/// </summary>
public static unsafe Vector128<int> GatherVector128(int* baseAddress, Vector128<int> index, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_i32gather_epi32 (int const* base_addr, __m128i vindex, const int scale); VPGATHERDD xmm, vm32x, xmm
+ /// __m128i _mm_i32gather_epi32 (int const* base_addr, __m128i vindex, const int scale);
+ /// VPGATHERDD xmm, vm32x, xmm
/// </summary>
public static unsafe Vector128<uint> GatherVector128(uint* baseAddress, Vector128<int> index, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_i32gather_epi64 (__int64 const* base_addr, __m128i vindex, const int scale); VPGATHERDQ xmm, vm32x, xmm
+ /// __m128i _mm_i32gather_epi64 (__int64 const* base_addr, __m128i vindex, const int scale);
+ /// VPGATHERDQ xmm, vm32x, xmm
/// </summary>
public static unsafe Vector128<long> GatherVector128(long* baseAddress, Vector128<int> index, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_i32gather_epi64 (__int64 const* base_addr, __m128i vindex, const int scale); VPGATHERDQ xmm, vm32x, xmm
+ /// __m128i _mm_i32gather_epi64 (__int64 const* base_addr, __m128i vindex, const int scale);
+ /// VPGATHERDQ xmm, vm32x, xmm
/// </summary>
public static unsafe Vector128<ulong> GatherVector128(ulong* baseAddress, Vector128<int> index, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_i32gather_ps (float const* base_addr, __m128i vindex, const int scale); VGATHERDPS xmm, vm32x, xmm
+ /// __m128 _mm_i32gather_ps (float const* base_addr, __m128i vindex, const int scale);
+ /// VGATHERDPS xmm, vm32x, xmm
/// </summary>
public static unsafe Vector128<float> GatherVector128(float* baseAddress, Vector128<int> index, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_i32gather_pd (double const* base_addr, __m128i vindex, const int scale); VGATHERDPD xmm, vm32x, xmm
+ /// __m128d _mm_i32gather_pd (double const* base_addr, __m128i vindex, const int scale);
+ /// VGATHERDPD xmm, vm32x, xmm
/// </summary>
public static unsafe Vector128<double> GatherVector128(double* baseAddress, Vector128<int> index, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_i64gather_epi32 (int const* base_addr, __m128i vindex, const int scale); VPGATHERQD xmm, vm64x, xmm
+ /// __m128i _mm_i64gather_epi32 (int const* base_addr, __m128i vindex, const int scale);
+ /// VPGATHERQD xmm, vm64x, xmm
/// </summary>
public static unsafe Vector128<int> GatherVector128(int* baseAddress, Vector128<long> index, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_i64gather_epi32 (int const* base_addr, __m128i vindex, const int scale); VPGATHERQD xmm, vm64x, xmm
+ /// __m128i _mm_i64gather_epi32 (int const* base_addr, __m128i vindex, const int scale);
+ /// VPGATHERQD xmm, vm64x, xmm
/// </summary>
public static unsafe Vector128<uint> GatherVector128(uint* baseAddress, Vector128<long> index, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_i64gather_epi64 (__int64 const* base_addr, __m128i vindex, const int scale); VPGATHERQQ xmm, vm64x, xmm
+ /// __m128i _mm_i64gather_epi64 (__int64 const* base_addr, __m128i vindex, const int scale);
+ /// VPGATHERQQ xmm, vm64x, xmm
/// </summary>
public static unsafe Vector128<long> GatherVector128(long* baseAddress, Vector128<long> index, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_i64gather_epi64 (__int64 const* base_addr, __m128i vindex, const int scale); VPGATHERQQ xmm, vm64x, xmm
+ /// __m128i _mm_i64gather_epi64 (__int64 const* base_addr, __m128i vindex, const int scale);
+ /// VPGATHERQQ xmm, vm64x, xmm
/// </summary>
public static unsafe Vector128<ulong> GatherVector128(ulong* baseAddress, Vector128<long> index, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_i64gather_ps (float const* base_addr, __m128i vindex, const int scale); VGATHERQPS xmm, vm64x, xmm
+ /// __m128 _mm_i64gather_ps (float const* base_addr, __m128i vindex, const int scale);
+ /// VGATHERQPS xmm, vm64x, xmm
/// </summary>
public static unsafe Vector128<float> GatherVector128(float* baseAddress, Vector128<long> index, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_i64gather_pd (double const* base_addr, __m128i vindex, const int scale); VGATHERQPD xmm, vm64x, xmm
+ /// __m128d _mm_i64gather_pd (double const* base_addr, __m128i vindex, const int scale);
+ /// VGATHERQPD xmm, vm64x, xmm
/// </summary>
public static unsafe Vector128<double> GatherVector128(double* baseAddress, Vector128<long> index, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_i32gather_epi32 (int const* base_addr, __m256i vindex, const int scale); VPGATHERDD ymm, vm32y, ymm
+ /// __m256i _mm256_i32gather_epi32 (int const* base_addr, __m256i vindex, const int scale);
+ /// VPGATHERDD ymm, vm32y, ymm
/// </summary>
public static unsafe Vector256<int> GatherVector256(int* baseAddress, Vector256<int> index, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_i32gather_epi32 (int const* base_addr, __m256i vindex, const int scale); VPGATHERDD ymm, vm32y, ymm
+ /// __m256i _mm256_i32gather_epi32 (int const* base_addr, __m256i vindex, const int scale);
+ /// VPGATHERDD ymm, vm32y, ymm
/// </summary>
public static unsafe Vector256<uint> GatherVector256(uint* baseAddress, Vector256<int> index, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_i32gather_epi64 (__int64 const* base_addr, __m128i vindex, const int scale); VPGATHERDQ ymm, vm32y, ymm
+ /// __m256i _mm256_i32gather_epi64 (__int64 const* base_addr, __m128i vindex, const int scale);
+ /// VPGATHERDQ ymm, vm32y, ymm
/// </summary>
public static unsafe Vector256<long> GatherVector256(long* baseAddress, Vector128<int> index, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_i32gather_epi64 (__int64 const* base_addr, __m128i vindex, const int scale); VPGATHERDQ ymm, vm32y, ymm
+ /// __m256i _mm256_i32gather_epi64 (__int64 const* base_addr, __m128i vindex, const int scale);
+ /// VPGATHERDQ ymm, vm32y, ymm
/// </summary>
public static unsafe Vector256<ulong> GatherVector256(ulong* baseAddress, Vector128<int> index, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_i32gather_ps (float const* base_addr, __m256i vindex, const int scale); VGATHERDPS ymm, vm32y, ymm
+ /// __m256 _mm256_i32gather_ps (float const* base_addr, __m256i vindex, const int scale);
+ /// VGATHERDPS ymm, vm32y, ymm
/// </summary>
public static unsafe Vector256<float> GatherVector256(float* baseAddress, Vector256<int> index, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_i32gather_pd (double const* base_addr, __m128i vindex, const int scale); VGATHERDPD ymm, vm32y, ymm
+ /// __m256d _mm256_i32gather_pd (double const* base_addr, __m128i vindex, const int scale);
+ /// VGATHERDPD ymm, vm32y, ymm
/// </summary>
public static unsafe Vector256<double> GatherVector256(double* baseAddress, Vector128<int> index, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm256_i64gather_epi32 (int const* base_addr, __m256i vindex, const int scale); VPGATHERQD ymm, vm64y, ymm
+ /// __m128i _mm256_i64gather_epi32 (int const* base_addr, __m256i vindex, const int scale);
+ /// VPGATHERQD ymm, vm64y, ymm
/// </summary>
public static unsafe Vector128<int> GatherVector128(int* baseAddress, Vector256<long> index, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm256_i64gather_epi32 (int const* base_addr, __m256i vindex, const int scale); VPGATHERQD ymm, vm64y, ymm
+ /// __m128i _mm256_i64gather_epi32 (int const* base_addr, __m256i vindex, const int scale);
+ /// VPGATHERQD ymm, vm64y, ymm
/// </summary>
public static unsafe Vector128<uint> GatherVector128(uint* baseAddress, Vector256<long> index, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_i64gather_epi64 (__int64 const* base_addr, __m256i vindex, const int scale); VPGATHERQQ ymm, vm64y, ymm
+ /// __m256i _mm256_i64gather_epi64 (__int64 const* base_addr, __m256i vindex, const int scale);
+ /// VPGATHERQQ ymm, vm64y, ymm
/// </summary>
public static unsafe Vector256<long> GatherVector256(long* baseAddress, Vector256<long> index, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_i64gather_epi64 (__int64 const* base_addr, __m256i vindex, const int scale); VPGATHERQQ ymm, vm64y, ymm
+ /// __m256i _mm256_i64gather_epi64 (__int64 const* base_addr, __m256i vindex, const int scale);
+ /// VPGATHERQQ ymm, vm64y, ymm
/// </summary>
public static unsafe Vector256<ulong> GatherVector256(ulong* baseAddress, Vector256<long> index, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm256_i64gather_ps (float const* base_addr, __m256i vindex, const int scale); VGATHERQPS ymm, vm64y, ymm
+ /// __m128 _mm256_i64gather_ps (float const* base_addr, __m256i vindex, const int scale);
+ /// VGATHERQPS ymm, vm64y, ymm
/// </summary>
public static unsafe Vector128<float> GatherVector128(float* baseAddress, Vector256<long> index, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_i64gather_pd (double const* base_addr, __m256i vindex, const int scale); VGATHERQPD ymm, vm64y, ymm
+ /// __m256d _mm256_i64gather_pd (double const* base_addr, __m256i vindex, const int scale);
+ /// VGATHERQPD ymm, vm64y, ymm
/// </summary>
public static unsafe Vector256<double> GatherVector256(double* baseAddress, Vector256<long> index, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_mask_i32gather_epi32 (__m128i src, int const* base_addr, __m128i vindex, __m128i mask, const int scale); VPGATHERDD xmm, vm32x, xmm
+ /// __m128i _mm_mask_i32gather_epi32 (__m128i src, int const* base_addr, __m128i vindex, __m128i mask, const int scale);
+ /// VPGATHERDD xmm, vm32x, xmm
/// </summary>
public static unsafe Vector128<int> GatherMaskVector128(Vector128<int> source, int* baseAddress, Vector128<int> index, Vector128<int> mask, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_mask_i32gather_epi32 (__m128i src, int const* base_addr, __m128i vindex, __m128i mask, const int scale); VPGATHERDD xmm, vm32x, xmm
+ /// __m128i _mm_mask_i32gather_epi32 (__m128i src, int const* base_addr, __m128i vindex, __m128i mask, const int scale);
+ /// VPGATHERDD xmm, vm32x, xmm
/// </summary>
public static unsafe Vector128<uint> GatherMaskVector128(Vector128<uint> source, uint* baseAddress, Vector128<int> index, Vector128<uint> mask, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_mask_i32gather_epi64 (__m128i src, __int64 const* base_addr, __m128i vindex, __m128i mask, const int scale); VPGATHERDQ xmm, vm32x, xmm
+ /// __m128i _mm_mask_i32gather_epi64 (__m128i src, __int64 const* base_addr, __m128i vindex, __m128i mask, const int scale);
+ /// VPGATHERDQ xmm, vm32x, xmm
/// </summary>
public static unsafe Vector128<long> GatherMaskVector128(Vector128<long> source, long* baseAddress, Vector128<int> index, Vector128<long> mask, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_mask_i32gather_epi64 (__m128i src, __int64 const* base_addr, __m128i vindex, __m128i mask, const int scale); VPGATHERDQ xmm, vm32x, xmm
+ /// __m128i _mm_mask_i32gather_epi64 (__m128i src, __int64 const* base_addr, __m128i vindex, __m128i mask, const int scale);
+ /// VPGATHERDQ xmm, vm32x, xmm
/// </summary>
public static unsafe Vector128<ulong> GatherMaskVector128(Vector128<ulong> source, ulong* baseAddress, Vector128<int> index, Vector128<ulong> mask, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_mask_i32gather_ps (__m128 src, float const* base_addr, __m128i vindex, __m128 mask, const int scale); VGATHERDPS xmm, vm32x, xmm
+ /// __m128 _mm_mask_i32gather_ps (__m128 src, float const* base_addr, __m128i vindex, __m128 mask, const int scale);
+ /// VGATHERDPS xmm, vm32x, xmm
/// </summary>
public static unsafe Vector128<float> GatherMaskVector128(Vector128<float> source, float* baseAddress, Vector128<int> index, Vector128<float> mask, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_mask_i32gather_pd (__m128d src, double const* base_addr, __m128i vindex, __m128d mask, const int scale); VGATHERDPD xmm, vm32x, xmm
+ /// __m128d _mm_mask_i32gather_pd (__m128d src, double const* base_addr, __m128i vindex, __m128d mask, const int scale);
+ /// VGATHERDPD xmm, vm32x, xmm
/// </summary>
public static unsafe Vector128<double> GatherMaskVector128(Vector128<double> source, double* baseAddress, Vector128<int> index, Vector128<double> mask, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_mask_i64gather_epi32 (__m128i src, int const* base_addr, __m128i vindex, __m128i mask, const int scale); VPGATHERQD xmm, vm64x, xmm
+ /// __m128i _mm_mask_i64gather_epi32 (__m128i src, int const* base_addr, __m128i vindex, __m128i mask, const int scale);
+ /// VPGATHERQD xmm, vm64x, xmm
/// </summary>
public static unsafe Vector128<int> GatherMaskVector128(Vector128<int> source, int* baseAddress, Vector128<long> index, Vector128<int> mask, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_mask_i64gather_epi32 (__m128i src, int const* base_addr, __m128i vindex, __m128i mask, const int scale); VPGATHERQD xmm, vm64x, xmm
+ /// __m128i _mm_mask_i64gather_epi32 (__m128i src, int const* base_addr, __m128i vindex, __m128i mask, const int scale);
+ /// VPGATHERQD xmm, vm64x, xmm
/// </summary>
public static unsafe Vector128<uint> GatherMaskVector128(Vector128<uint> source, uint* baseAddress, Vector128<long> index, Vector128<uint> mask, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_mask_i64gather_epi64 (__m128i src, __int64 const* base_addr, __m128i vindex, __m128i mask, const int scale); VPGATHERQQ xmm, vm64x, xmm
+ /// __m128i _mm_mask_i64gather_epi64 (__m128i src, __int64 const* base_addr, __m128i vindex, __m128i mask, const int scale);
+ /// VPGATHERQQ xmm, vm64x, xmm
/// </summary>
public static unsafe Vector128<long> GatherMaskVector128(Vector128<long> source, long* baseAddress, Vector128<long> index, Vector128<long> mask, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_mask_i64gather_epi64 (__m128i src, __int64 const* base_addr, __m128i vindex, __m128i mask, const int scale); VPGATHERQQ xmm, vm64x, xmm
+ /// __m128i _mm_mask_i64gather_epi64 (__m128i src, __int64 const* base_addr, __m128i vindex, __m128i mask, const int scale);
+ /// VPGATHERQQ xmm, vm64x, xmm
/// </summary>
public static unsafe Vector128<ulong> GatherMaskVector128(Vector128<ulong> source, ulong* baseAddress, Vector128<long> index, Vector128<ulong> mask, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_mask_i64gather_ps (__m128 src, float const* base_addr, __m128i vindex, __m128 mask, const int scale); VPGATHERQPS xmm, vm64x, xmm
+ /// __m128 _mm_mask_i64gather_ps (__m128 src, float const* base_addr, __m128i vindex, __m128 mask, const int scale);
+ /// VPGATHERQPS xmm, vm64x, xmm
/// </summary>
public static unsafe Vector128<float> GatherMaskVector128(Vector128<float> source, float* baseAddress, Vector128<long> index, Vector128<float> mask, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_mask_i64gather_pd (__m128d src, double const* base_addr, __m128i vindex, __m128d mask, const int scale); VPGATHERQPD xmm, vm64x, xmm
+ /// __m128d _mm_mask_i64gather_pd (__m128d src, double const* base_addr, __m128i vindex, __m128d mask, const int scale);
+ /// VPGATHERQPD xmm, vm64x, xmm
/// </summary>
public static unsafe Vector128<double> GatherMaskVector128(Vector128<double> source, double* baseAddress, Vector128<long> index, Vector128<double> mask, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_mask_i32gather_epi32 (__m256i src, int const* base_addr, __m256i vindex, __m256i mask, const int scale); VPGATHERDD ymm, vm32y, ymm
+ /// __m256i _mm256_mask_i32gather_epi32 (__m256i src, int const* base_addr, __m256i vindex, __m256i mask, const int scale);
+ /// VPGATHERDD ymm, vm32y, ymm
/// </summary>
public static unsafe Vector256<int> GatherMaskVector256(Vector256<int> source, int* baseAddress, Vector256<int> index, Vector256<int> mask, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_mask_i32gather_epi32 (__m256i src, int const* base_addr, __m256i vindex, __m256i mask, const int scale); VPGATHERDD ymm, vm32y, ymm
+ /// __m256i _mm256_mask_i32gather_epi32 (__m256i src, int const* base_addr, __m256i vindex, __m256i mask, const int scale);
+ /// VPGATHERDD ymm, vm32y, ymm
/// </summary>
public static unsafe Vector256<uint> GatherMaskVector256(Vector256<uint> source, uint* baseAddress, Vector256<int> index, Vector256<uint> mask, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_mask_i32gather_epi64 (__m256i src, __int64 const* base_addr, __m128i vindex, __m256i mask, const int scale); VPGATHERDQ ymm, vm32y, ymm
+ /// __m256i _mm256_mask_i32gather_epi64 (__m256i src, __int64 const* base_addr, __m128i vindex, __m256i mask, const int scale);
+ /// VPGATHERDQ ymm, vm32y, ymm
/// </summary>
public static unsafe Vector256<long> GatherMaskVector256(Vector256<long> source, long* baseAddress, Vector128<int> index, Vector256<long> mask, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_mask_i32gather_epi64 (__m256i src, __int64 const* base_addr, __m128i vindex, __m256i mask, const int scale); VPGATHERDQ ymm, vm32y, ymm
+ /// __m256i _mm256_mask_i32gather_epi64 (__m256i src, __int64 const* base_addr, __m128i vindex, __m256i mask, const int scale);
+ /// VPGATHERDQ ymm, vm32y, ymm
/// </summary>
public static unsafe Vector256<ulong> GatherMaskVector256(Vector256<ulong> source, ulong* baseAddress, Vector128<int> index, Vector256<ulong> mask, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_mask_i32gather_ps (__m256 src, float const* base_addr, __m256i vindex, __m256 mask, const int scale); VPGATHERDPS ymm, vm32y, ymm
+ /// __m256 _mm256_mask_i32gather_ps (__m256 src, float const* base_addr, __m256i vindex, __m256 mask, const int scale);
+ /// VPGATHERDPS ymm, vm32y, ymm
/// </summary>
public static unsafe Vector256<float> GatherMaskVector256(Vector256<float> source, float* baseAddress, Vector256<int> index, Vector256<float> mask, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_mask_i32gather_pd (__m256d src, double const* base_addr, __m128i vindex, __m256d mask, const int scale); VPGATHERDPD ymm, vm32y, ymm
+ /// __m256d _mm256_mask_i32gather_pd (__m256d src, double const* base_addr, __m128i vindex, __m256d mask, const int scale);
+ /// VPGATHERDPD ymm, vm32y, ymm
/// </summary>
public static unsafe Vector256<double> GatherMaskVector256(Vector256<double> source, double* baseAddress, Vector128<int> index, Vector256<double> mask, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm256_mask_i64gather_epi32 (__m128i src, int const* base_addr, __m256i vindex, __m128i mask, const int scale); VPGATHERQD ymm, vm32y, ymm
+ /// __m128i _mm256_mask_i64gather_epi32 (__m128i src, int const* base_addr, __m256i vindex, __m128i mask, const int scale);
+ /// VPGATHERQD ymm, vm32y, ymm
/// </summary>
public static unsafe Vector128<int> GatherMaskVector128(Vector128<int> source, int* baseAddress, Vector256<long> index, Vector128<int> mask, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm256_mask_i64gather_epi32 (__m128i src, int const* base_addr, __m256i vindex, __m128i mask, const int scale); VPGATHERQD ymm, vm32y, ymm
+ /// __m128i _mm256_mask_i64gather_epi32 (__m128i src, int const* base_addr, __m256i vindex, __m128i mask, const int scale);
+ /// VPGATHERQD ymm, vm32y, ymm
/// </summary>
public static unsafe Vector128<uint> GatherMaskVector128(Vector128<uint> source, uint* baseAddress, Vector256<long> index, Vector128<uint> mask, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_mask_i64gather_epi64 (__m256i src, __int64 const* base_addr, __m256i vindex, __m256i mask, const int scale); VPGATHERQQ ymm, vm32y, ymm
+ /// __m256i _mm256_mask_i64gather_epi64 (__m256i src, __int64 const* base_addr, __m256i vindex, __m256i mask, const int scale);
+ /// VPGATHERQQ ymm, vm32y, ymm
/// </summary>
public static unsafe Vector256<long> GatherMaskVector256(Vector256<long> source, long* baseAddress, Vector256<long> index, Vector256<long> mask, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_mask_i64gather_epi64 (__m256i src, __int64 const* base_addr, __m256i vindex, __m256i mask, const int scale); VPGATHERQQ ymm, vm32y, ymm
+ /// __m256i _mm256_mask_i64gather_epi64 (__m256i src, __int64 const* base_addr, __m256i vindex, __m256i mask, const int scale);
+ /// VPGATHERQQ ymm, vm32y, ymm
/// </summary>
public static unsafe Vector256<ulong> GatherMaskVector256(Vector256<ulong> source, ulong* baseAddress, Vector256<long> index, Vector256<ulong> mask, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm256_mask_i64gather_ps (__m128 src, float const* base_addr, __m256i vindex, __m128 mask, const int scale); VPGATHERQPS ymm, vm32y, ymm
+ /// __m128 _mm256_mask_i64gather_ps (__m128 src, float const* base_addr, __m256i vindex, __m128 mask, const int scale);
+ /// VPGATHERQPS ymm, vm32y, ymm
/// </summary>
public static unsafe Vector128<float> GatherMaskVector128(Vector128<float> source, float* baseAddress, Vector256<long> index, Vector128<float> mask, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_mask_i64gather_pd (__m256d src, double const* base_addr, __m256i vindex, __m256d mask, const int scale); VPGATHERQPD ymm, vm32y, ymm
+ /// __m256d _mm256_mask_i64gather_pd (__m256d src, double const* base_addr, __m256i vindex, __m256d mask, const int scale);
+ /// VPGATHERQPD ymm, vm32y, ymm
/// </summary>
public static unsafe Vector256<double> GatherMaskVector256(Vector256<double> source, double* baseAddress, Vector256<long> index, Vector256<double> mask, byte scale) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_hadd_epi16 (__m256i a, __m256i b); VPHADDW ymm, ymm, ymm/m256
+ /// __m256i _mm256_hadd_epi16 (__m256i a, __m256i b);
+ /// VPHADDW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> HorizontalAdd(Vector256<short> left, Vector256<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_hadd_epi32 (__m256i a, __m256i b); VPHADDD ymm, ymm, ymm/m256
+ /// __m256i _mm256_hadd_epi32 (__m256i a, __m256i b);
+ /// VPHADDD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> HorizontalAdd(Vector256<int> left, Vector256<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_hadds_epi16 (__m256i a, __m256i b); VPHADDSW ymm, ymm, ymm/m256
+ /// __m256i _mm256_hadds_epi16 (__m256i a, __m256i b);
+ /// VPHADDSW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> HorizontalAddSaturate(Vector256<short> left, Vector256<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_hsub_epi16 (__m256i a, __m256i b); VPHSUBW ymm, ymm, ymm/m256
+ /// __m256i _mm256_hsub_epi16 (__m256i a, __m256i b);
+ /// VPHSUBW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> HorizontalSubtract(Vector256<short> left, Vector256<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_hsub_epi32 (__m256i a, __m256i b); VPHSUBD ymm, ymm, ymm/m256
+ /// __m256i _mm256_hsub_epi32 (__m256i a, __m256i b);
+ /// VPHSUBD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> HorizontalSubtract(Vector256<int> left, Vector256<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_hsubs_epi16 (__m256i a, __m256i b); VPHSUBSW ymm, ymm, ymm/m256
+ /// __m256i _mm256_hsubs_epi16 (__m256i a, __m256i b);
+ /// VPHSUBSW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> HorizontalSubtractSaturate(Vector256<short> left, Vector256<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, xmm, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, xmm, imm8
/// </summary>
public static Vector256<sbyte> Insert(Vector256<sbyte> value, Vector128<sbyte> data, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, xm128, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, xm128, imm8
/// </summary>
public static unsafe Vector256<sbyte> Insert(Vector256<sbyte> value, sbyte* address, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, xmm, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, xmm, imm8
/// </summary>
public static Vector256<byte> Insert(Vector256<byte> value, Vector128<byte> data, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, m128, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<byte> Insert(Vector256<byte> value, byte* address, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, xmm, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, xmm, imm8
/// </summary>
public static Vector256<short> Insert(Vector256<short> value, Vector128<short> data, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, m128, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<short> Insert(Vector256<short> value, short* address, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, xmm, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, xmm, imm8
/// </summary>
public static Vector256<ushort> Insert(Vector256<ushort> value, Vector128<ushort> data, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, m128, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<ushort> Insert(Vector256<ushort> value, ushort* address, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, xmm, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, xmm, imm8
/// </summary>
public static Vector256<int> Insert(Vector256<int> value, Vector128<int> data, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, m128, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<int> Insert(Vector256<int> value, int* address, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, xmm, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, xmm, imm8
/// </summary>
public static Vector256<uint> Insert(Vector256<uint> value, Vector128<uint> data, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, m128, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<uint> Insert(Vector256<uint> value, uint* address, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, xmm, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, xmm, imm8
/// </summary>
public static Vector256<long> Insert(Vector256<long> value, Vector128<long> data, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, m128, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<long> Insert(Vector256<long> value, long* address, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, xmm, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, xmm, imm8
/// </summary>
public static Vector256<ulong> Insert(Vector256<ulong> value, Vector128<ulong> data, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, m128, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<ulong> Insert(Vector256<ulong> value, ulong* address, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr); VMOVNTDQA ymm, m256
+ /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr);
+ /// VMOVNTDQA ymm, m256
/// </summary>
public static unsafe Vector256<sbyte> LoadAlignedVector256NonTemporal(sbyte* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr); VMOVNTDQA ymm, m256
+ /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr);
+ /// VMOVNTDQA ymm, m256
/// </summary>
public static unsafe Vector256<byte> LoadAlignedVector256NonTemporal(byte* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr); VMOVNTDQA ymm, m256
+ /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr);
+ /// VMOVNTDQA ymm, m256
/// </summary>
public static unsafe Vector256<short> LoadAlignedVector256NonTemporal(short* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr); VMOVNTDQA ymm, m256
+ /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr);
+ /// VMOVNTDQA ymm, m256
/// </summary>
public static unsafe Vector256<ushort> LoadAlignedVector256NonTemporal(ushort* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr); VMOVNTDQA ymm, m256
+ /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr);
+ /// VMOVNTDQA ymm, m256
/// </summary>
public static unsafe Vector256<int> LoadAlignedVector256NonTemporal(int* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr); VMOVNTDQA ymm, m256
+ /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr);
+ /// VMOVNTDQA ymm, m256
/// </summary>
public static unsafe Vector256<uint> LoadAlignedVector256NonTemporal(uint* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr); VMOVNTDQA ymm, m256
+ /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr);
+ /// VMOVNTDQA ymm, m256
/// </summary>
public static unsafe Vector256<long> LoadAlignedVector256NonTemporal(long* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr); VMOVNTDQA ymm, m256
+ /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr);
+ /// VMOVNTDQA ymm, m256
/// </summary>
public static unsafe Vector256<ulong> LoadAlignedVector256NonTemporal(ulong* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_maskload_epi32 (int const* mem_addr, __m128i mask); VPMASKMOVD xmm, xmm, m128
+ /// __m128i _mm_maskload_epi32 (int const* mem_addr, __m128i mask);
+ /// VPMASKMOVD xmm, xmm, m128
/// </summary>
public static unsafe Vector128<int> MaskLoad(int* address, Vector128<int> mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_maskload_epi32 (int const* mem_addr, __m128i mask); VPMASKMOVD xmm, xmm, m128
+ /// __m128i _mm_maskload_epi32 (int const* mem_addr, __m128i mask);
+ /// VPMASKMOVD xmm, xmm, m128
/// </summary>
public static unsafe Vector128<uint> MaskLoad(uint* address, Vector128<uint> mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_maskload_epi64 (__int64 const* mem_addr, __m128i mask); VPMASKMOVQ xmm, xmm, m128
+ /// __m128i _mm_maskload_epi64 (__int64 const* mem_addr, __m128i mask);
+ /// VPMASKMOVQ xmm, xmm, m128
/// </summary>
public static unsafe Vector128<long> MaskLoad(long* address, Vector128<long> mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_maskload_epi64 (__int64 const* mem_addr, __m128i mask); VPMASKMOVQ xmm, xmm, m128
+ /// __m128i _mm_maskload_epi64 (__int64 const* mem_addr, __m128i mask);
+ /// VPMASKMOVQ xmm, xmm, m128
/// </summary>
public static unsafe Vector128<ulong> MaskLoad(ulong* address, Vector128<ulong> mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_maskload_epi32 (int const* mem_addr, __m256i mask); VPMASKMOVD ymm, ymm, m256
+ /// __m256i _mm256_maskload_epi32 (int const* mem_addr, __m256i mask);
+ /// VPMASKMOVD ymm, ymm, m256
/// </summary>
public static unsafe Vector256<int> MaskLoad(int* address, Vector256<int> mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_maskload_epi32 (int const* mem_addr, __m256i mask); VPMASKMOVD ymm, ymm, m256
+ /// __m256i _mm256_maskload_epi32 (int const* mem_addr, __m256i mask);
+ /// VPMASKMOVD ymm, ymm, m256
/// </summary>
public static unsafe Vector256<uint> MaskLoad(uint* address, Vector256<uint> mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_maskload_epi64 (__int64 const* mem_addr, __m256i mask); VPMASKMOVQ ymm, ymm, m256
+ /// __m256i _mm256_maskload_epi64 (__int64 const* mem_addr, __m256i mask);
+ /// VPMASKMOVQ ymm, ymm, m256
/// </summary>
public static unsafe Vector256<long> MaskLoad(long* address, Vector256<long> mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_maskload_epi64 (__int64 const* mem_addr, __m256i mask); VPMASKMOVQ ymm, ymm, m256
+ /// __m256i _mm256_maskload_epi64 (__int64 const* mem_addr, __m256i mask);
+ /// VPMASKMOVQ ymm, ymm, m256
/// </summary>
public static unsafe Vector256<ulong> MaskLoad(ulong* address, Vector256<ulong> mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_maskstore_epi32 (int* mem_addr, __m128i mask, __m128i a); VPMASKMOVD m128, xmm, xmm
+ /// void _mm_maskstore_epi32 (int* mem_addr, __m128i mask, __m128i a);
+ /// VPMASKMOVD m128, xmm, xmm
/// </summary>
public static unsafe void MaskStore(int* address, Vector128<int> mask, Vector128<int> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_maskstore_epi32 (int* mem_addr, __m128i mask, __m128i a); VPMASKMOVD m128, xmm, xmm
+ /// void _mm_maskstore_epi32 (int* mem_addr, __m128i mask, __m128i a);
+ /// VPMASKMOVD m128, xmm, xmm
/// </summary>
public static unsafe void MaskStore(uint* address, Vector128<uint> mask, Vector128<uint> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_maskstore_epi64 (__int64* mem_addr, __m128i mask, __m128i a); VPMASKMOVQ m128, xmm, xmm
+ /// void _mm_maskstore_epi64 (__int64* mem_addr, __m128i mask, __m128i a);
+ /// VPMASKMOVQ m128, xmm, xmm
/// </summary>
public static unsafe void MaskStore(long* address, Vector128<long> mask, Vector128<long> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_maskstore_epi64 (__int64* mem_addr, __m128i mask, __m128i a); VPMASKMOVQ m128, xmm, xmm
+ /// void _mm_maskstore_epi64 (__int64* mem_addr, __m128i mask, __m128i a);
+ /// VPMASKMOVQ m128, xmm, xmm
/// </summary>
public static unsafe void MaskStore(ulong* address, Vector128<ulong> mask, Vector128<ulong> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_maskstore_epi32 (int* mem_addr, __m256i mask, __m256i a); VPMASKMOVD m256, ymm, ymm
+ /// void _mm256_maskstore_epi32 (int* mem_addr, __m256i mask, __m256i a);
+ /// VPMASKMOVD m256, ymm, ymm
/// </summary>
public static unsafe void MaskStore(int* address, Vector256<int> mask, Vector256<int> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_maskstore_epi32 (int* mem_addr, __m256i mask, __m256i a); VPMASKMOVD m256, ymm, ymm
+ /// void _mm256_maskstore_epi32 (int* mem_addr, __m256i mask, __m256i a);
+ /// VPMASKMOVD m256, ymm, ymm
/// </summary>
public static unsafe void MaskStore(uint* address, Vector256<uint> mask, Vector256<uint> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_maskstore_epi64 (__int64* mem_addr, __m256i mask, __m256i a); VPMASKMOVQ m256, ymm, ymm
+ /// void _mm256_maskstore_epi64 (__int64* mem_addr, __m256i mask, __m256i a);
+ /// VPMASKMOVQ m256, ymm, ymm
/// </summary>
public static unsafe void MaskStore(long* address, Vector256<long> mask, Vector256<long> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm256_maskstore_epi64 (__int64* mem_addr, __m256i mask, __m256i a); VPMASKMOVQ m256, ymm, ymm
+ /// void _mm256_maskstore_epi64 (__int64* mem_addr, __m256i mask, __m256i a);
+ /// VPMASKMOVQ m256, ymm, ymm
/// </summary>
public static unsafe void MaskStore(ulong* address, Vector256<ulong> mask, Vector256<ulong> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_madd_epi16 (__m256i a, __m256i b); VPMADDWD ymm, ymm, ymm/m256
+ /// __m256i _mm256_madd_epi16 (__m256i a, __m256i b);
+ /// VPMADDWD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> MultiplyAddAdjacent(Vector256<short> left, Vector256<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_maddubs_epi16 (__m256i a, __m256i b); VPMADDUBSW ymm, ymm, ymm/m256
+ /// __m256i _mm256_maddubs_epi16 (__m256i a, __m256i b);
+ /// VPMADDUBSW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> MultiplyAddAdjacent(Vector256<byte> left, Vector256<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_max_epi8 (__m256i a, __m256i b); VPMAXSB ymm, ymm, ymm/m256
+ /// __m256i _mm256_max_epi8 (__m256i a, __m256i b);
+ /// VPMAXSB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> Max(Vector256<sbyte> left, Vector256<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_max_epu8 (__m256i a, __m256i b); VPMAXUB ymm, ymm, ymm/m256
+ /// __m256i _mm256_max_epu8 (__m256i a, __m256i b);
+ /// VPMAXUB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> Max(Vector256<byte> left, Vector256<byte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_max_epi16 (__m256i a, __m256i b); VPMAXSW ymm, ymm, ymm/m256
+ /// __m256i _mm256_max_epi16 (__m256i a, __m256i b);
+ /// VPMAXSW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> Max(Vector256<short> left, Vector256<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_max_epu16 (__m256i a, __m256i b); VPMAXUW ymm, ymm, ymm/m256
+ /// __m256i _mm256_max_epu16 (__m256i a, __m256i b);
+ /// VPMAXUW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> Max(Vector256<ushort> left, Vector256<ushort> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_max_epi32 (__m256i a, __m256i b); VPMAXSD ymm, ymm, ymm/m256
+ /// __m256i _mm256_max_epi32 (__m256i a, __m256i b);
+ /// VPMAXSD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> Max(Vector256<int> left, Vector256<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_max_epu32 (__m256i a, __m256i b); VPMAXUD ymm, ymm, ymm/m256
+ /// __m256i _mm256_max_epu32 (__m256i a, __m256i b);
+ /// VPMAXUD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<uint> Max(Vector256<uint> left, Vector256<uint> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_min_epi8 (__m256i a, __m256i b); VPMINSB ymm, ymm, ymm/m256
+ /// __m256i _mm256_min_epi8 (__m256i a, __m256i b);
+ /// VPMINSB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> Min(Vector256<sbyte> left, Vector256<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_min_epu8 (__m256i a, __m256i b); VPMINUB ymm, ymm, ymm/m256
+ /// __m256i _mm256_min_epu8 (__m256i a, __m256i b);
+ /// VPMINUB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> Min(Vector256<byte> left, Vector256<byte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_min_epi16 (__m256i a, __m256i b); VPMINSW ymm, ymm, ymm/m256
+ /// __m256i _mm256_min_epi16 (__m256i a, __m256i b);
+ /// VPMINSW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> Min(Vector256<short> left, Vector256<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_min_epu16 (__m256i a, __m256i b); VPMINUW ymm, ymm, ymm/m256
+ /// __m256i _mm256_min_epu16 (__m256i a, __m256i b);
+ /// VPMINUW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> Min(Vector256<ushort> left, Vector256<ushort> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_min_epi32 (__m256i a, __m256i b); VPMINSD ymm, ymm, ymm/m256
+ /// __m256i _mm256_min_epi32 (__m256i a, __m256i b);
+ /// VPMINSD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> Min(Vector256<int> left, Vector256<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_min_epu32 (__m256i a, __m256i b); VPMINUD ymm, ymm, ymm/m256
+ /// __m256i _mm256_min_epu32 (__m256i a, __m256i b);
+ /// VPMINUD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<uint> Min(Vector256<uint> left, Vector256<uint> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm256_movemask_epi8 (__m256i a); VPMOVMSKB reg, ymm
+ /// int _mm256_movemask_epi8 (__m256i a);
+ /// VPMOVMSKB reg, ymm
/// </summary>
public static int MoveMask(Vector256<sbyte> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm256_movemask_epi8 (__m256i a); VPMOVMSKB reg, ymm
+ /// int _mm256_movemask_epi8 (__m256i a);
+ /// VPMOVMSKB reg, ymm
/// </summary>
public static int MoveMask(Vector256<byte> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_mpsadbw_epu8 (__m256i a, __m256i b, const int imm8); VMPSADBW ymm, ymm, ymm/m256, imm8
+ /// __m256i _mm256_mpsadbw_epu8 (__m256i a, __m256i b, const int imm8);
+ /// VMPSADBW ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<ushort> MultipleSumAbsoluteDifferences(Vector256<byte> left, Vector256<byte> right, byte mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_mul_epi32 (__m256i a, __m256i b); VPMULDQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_mul_epi32 (__m256i a, __m256i b);
+ /// VPMULDQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<long> Multiply(Vector256<int> left, Vector256<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_mul_epu32 (__m256i a, __m256i b); VPMULUDQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_mul_epu32 (__m256i a, __m256i b);
+ /// VPMULUDQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ulong> Multiply(Vector256<uint> left, Vector256<uint> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_mulhi_epi16 (__m256i a, __m256i b); VPMULHW ymm, ymm, ymm/m256
+ /// __m256i _mm256_mulhi_epi16 (__m256i a, __m256i b);
+ /// VPMULHW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> MultiplyHigh(Vector256<short> left, Vector256<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_mulhi_epu16 (__m256i a, __m256i b); VPMULHUW ymm, ymm, ymm/m256
+ /// __m256i _mm256_mulhi_epu16 (__m256i a, __m256i b);
+ /// VPMULHUW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> MultiplyHigh(Vector256<ushort> left, Vector256<ushort> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_mulhrs_epi16 (__m256i a, __m256i b); VPMULHRSW ymm, ymm, ymm/m256
+ /// __m256i _mm256_mulhrs_epi16 (__m256i a, __m256i b);
+ /// VPMULHRSW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> MultiplyHighRoundScale(Vector256<short> left, Vector256<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_mullo_epi16 (__m256i a, __m256i b); VPMULLW ymm, ymm, ymm/m256
+ /// __m256i _mm256_mullo_epi16 (__m256i a, __m256i b);
+ /// VPMULLW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> MultiplyLow(Vector256<short> left, Vector256<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_mullo_epi32 (__m256i a, __m256i b); VPMULLD ymm, ymm, ymm/m256
+ /// __m256i _mm256_mullo_epi32 (__m256i a, __m256i b);
+ /// VPMULLD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> MultiplyLow(Vector256<int> left, Vector256<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_or_si256 (__m256i a, __m256i b); VPOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_or_si256 (__m256i a, __m256i b);
+ /// VPOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> Or(Vector256<sbyte> left, Vector256<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_or_si256 (__m256i a, __m256i b); VPOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_or_si256 (__m256i a, __m256i b);
+ /// VPOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> Or(Vector256<byte> left, Vector256<byte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_or_si256 (__m256i a, __m256i b); VPOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_or_si256 (__m256i a, __m256i b);
+ /// VPOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> Or(Vector256<short> left, Vector256<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_or_si256 (__m256i a, __m256i b); VPOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_or_si256 (__m256i a, __m256i b);
+ /// VPOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> Or(Vector256<ushort> left, Vector256<ushort> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_or_si256 (__m256i a, __m256i b); VPOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_or_si256 (__m256i a, __m256i b);
+ /// VPOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> Or(Vector256<int> left, Vector256<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_or_si256 (__m256i a, __m256i b); VPOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_or_si256 (__m256i a, __m256i b);
+ /// VPOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<uint> Or(Vector256<uint> left, Vector256<uint> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_or_si256 (__m256i a, __m256i b); VPOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_or_si256 (__m256i a, __m256i b);
+ /// VPOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<long> Or(Vector256<long> left, Vector256<long> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_or_si256 (__m256i a, __m256i b); VPOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_or_si256 (__m256i a, __m256i b);
+ /// VPOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ulong> Or(Vector256<ulong> left, Vector256<ulong> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_packs_epi16 (__m256i a, __m256i b); VPACKSSWB ymm, ymm, ymm/m256
+ /// __m256i _mm256_packs_epi16 (__m256i a, __m256i b);
+ /// VPACKSSWB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> PackSignedSaturate(Vector256<short> left, Vector256<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_packs_epi32 (__m256i a, __m256i b); VPACKSSDW ymm, ymm, ymm/m256
+ /// __m256i _mm256_packs_epi32 (__m256i a, __m256i b);
+ /// VPACKSSDW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> PackSignedSaturate(Vector256<int> left, Vector256<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_packus_epi16 (__m256i a, __m256i b); VPACKUSWB ymm, ymm, ymm/m256
+ /// __m256i _mm256_packus_epi16 (__m256i a, __m256i b);
+ /// VPACKUSWB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> PackUnsignedSaturate(Vector256<short> left, Vector256<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_packus_epi32 (__m256i a, __m256i b); VPACKUSDW ymm, ymm, ymm/m256
+ /// __m256i _mm256_packus_epi32 (__m256i a, __m256i b);
+ /// VPACKUSDW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> PackUnsignedSaturate(Vector256<int> left, Vector256<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8); VPERM2I128 ymm, ymm, ymm/m256, imm8
+ /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8);
+ /// VPERM2I128 ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<sbyte> Permute2x128(Vector256<sbyte> left, Vector256<sbyte> right, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8); VPERM2I128 ymm, ymm, ymm/m256, imm8
+ /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8);
+ /// VPERM2I128 ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<byte> Permute2x128(Vector256<byte> left, Vector256<byte> right, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8); VPERM2I128 ymm, ymm, ymm/m256, imm8
+ /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8);
+ /// VPERM2I128 ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<short> Permute2x128(Vector256<short> left, Vector256<short> right, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8); VPERM2I128 ymm, ymm, ymm/m256, imm8
+ /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8);
+ /// VPERM2I128 ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<ushort> Permute2x128(Vector256<ushort> left, Vector256<ushort> right, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8); VPERM2I128 ymm, ymm, ymm/m256, imm8
+ /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8);
+ /// VPERM2I128 ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<int> Permute2x128(Vector256<int> left, Vector256<int> right, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8); VPERM2I128 ymm, ymm, ymm/m256, imm8
+ /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8);
+ /// VPERM2I128 ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<uint> Permute2x128(Vector256<uint> left, Vector256<uint> right, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8); VPERM2I128 ymm, ymm, ymm/m256, imm8
+ /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8);
+ /// VPERM2I128 ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<long> Permute2x128(Vector256<long> left, Vector256<long> right, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8); VPERM2I128 ymm, ymm, ymm/m256, imm8
+ /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8);
+ /// VPERM2I128 ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<ulong> Permute2x128(Vector256<ulong> left, Vector256<ulong> right, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_permute4x64_epi64 (__m256i a, const int imm8); VPERMQ ymm, ymm/m256, imm8
+ /// __m256i _mm256_permute4x64_epi64 (__m256i a, const int imm8);
+ /// VPERMQ ymm, ymm/m256, imm8
/// </summary>
public static Vector256<long> Permute4x64(Vector256<long> value, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_permute4x64_epi64 (__m256i a, const int imm8); VPERMQ ymm, ymm/m256, imm8
+ /// __m256i _mm256_permute4x64_epi64 (__m256i a, const int imm8);
+ /// VPERMQ ymm, ymm/m256, imm8
/// </summary>
public static Vector256<ulong> Permute4x64(Vector256<ulong> value, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_permute4x64_pd (__m256d a, const int imm8); VPERMPD ymm, ymm/m256, imm8
+ /// __m256d _mm256_permute4x64_pd (__m256d a, const int imm8);
+ /// VPERMPD ymm, ymm/m256, imm8
/// </summary>
public static Vector256<double> Permute4x64(Vector256<double> value, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_permutevar8x32_epi32 (__m256i a, __m256i idx); VPERMD ymm, ymm/m256, imm8
+ /// __m256i _mm256_permutevar8x32_epi32 (__m256i a, __m256i idx);
+ /// VPERMD ymm, ymm/m256, imm8
/// </summary>
public static Vector256<int> PermuteVar8x32(Vector256<int> left, Vector256<int> mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_permutevar8x32_epi32 (__m256i a, __m256i idx); VPERMD ymm, ymm/m256, imm8
+ /// __m256i _mm256_permutevar8x32_epi32 (__m256i a, __m256i idx);
+ /// VPERMD ymm, ymm/m256, imm8
/// </summary>
public static Vector256<uint> PermuteVar8x32(Vector256<uint> left, Vector256<uint> mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_permutevar8x32_ps (__m256 a, __m256i idx); VPERMPS ymm, ymm/m256, imm8
+ /// __m256 _mm256_permutevar8x32_ps (__m256 a, __m256i idx);
+ /// VPERMPS ymm, ymm/m256, imm8
/// </summary>
public static Vector256<float> PermuteVar8x32(Vector256<float> left, Vector256<float> mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_sll_epi16 (__m256i a, __m128i count); VPSLLW ymm, ymm, xmm/m128
+ /// __m256i _mm256_sll_epi16 (__m256i a, __m128i count);
+ /// VPSLLW ymm, ymm, xmm/m128
/// </summary>
public static Vector256<short> ShiftLeftLogical(Vector256<short> value, Vector128<short> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_sll_epi16 (__m256i a, __m128i count); VPSLLW ymm, ymm, xmm/m128
+ /// __m256i _mm256_sll_epi16 (__m256i a, __m128i count);
+ /// VPSLLW ymm, ymm, xmm/m128
/// </summary>
public static Vector256<ushort> ShiftLeftLogical(Vector256<ushort> value, Vector128<ushort> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_sll_epi32 (__m256i a, __m128i count); VPSLLD ymm, ymm, xmm/m128
+ /// __m256i _mm256_sll_epi32 (__m256i a, __m128i count);
+ /// VPSLLD ymm, ymm, xmm/m128
/// </summary>
public static Vector256<int> ShiftLeftLogical(Vector256<int> value, Vector128<int> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_sll_epi32 (__m256i a, __m128i count); VPSLLD ymm, ymm, xmm/m128
+ /// __m256i _mm256_sll_epi32 (__m256i a, __m128i count);
+ /// VPSLLD ymm, ymm, xmm/m128
/// </summary>
public static Vector256<uint> ShiftLeftLogical(Vector256<uint> value, Vector128<uint> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_sll_epi64 (__m256i a, __m128i count); VPSLLQ ymm, ymm, xmm/m128
+ /// __m256i _mm256_sll_epi64 (__m256i a, __m128i count);
+ /// VPSLLQ ymm, ymm, xmm/m128
/// </summary>
public static Vector256<long> ShiftLeftLogical(Vector256<long> value, Vector128<long> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_sll_epi64 (__m256i a, __m128i count); VPSLLQ ymm, ymm, xmm/m128
+ /// __m256i _mm256_sll_epi64 (__m256i a, __m128i count);
+ /// VPSLLQ ymm, ymm, xmm/m128
/// </summary>
public static Vector256<ulong> ShiftLeftLogical(Vector256<ulong> value, Vector128<ulong> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_slli_epi16 (__m256i a, int imm8); VPSLLW ymm, ymm, imm8
+ /// __m256i _mm256_slli_epi16 (__m256i a, int imm8);
+ /// VPSLLW ymm, ymm, imm8
/// </summary>
public static Vector256<short> ShiftLeftLogical(Vector256<short> value, byte count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_slli_epi16 (__m256i a, int imm8); VPSLLW ymm, ymm, imm8
+ /// __m256i _mm256_slli_epi16 (__m256i a, int imm8);
+ /// VPSLLW ymm, ymm, imm8
/// </summary>
public static Vector256<ushort> ShiftLeftLogical(Vector256<ushort> value, byte count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_slli_epi32 (__m256i a, int imm8); VPSLLD ymm, ymm, imm8
+ /// __m256i _mm256_slli_epi32 (__m256i a, int imm8);
+ /// VPSLLD ymm, ymm, imm8
/// </summary>
public static Vector256<int> ShiftLeftLogical(Vector256<int> value, byte count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_slli_epi32 (__m256i a, int imm8); VPSLLD ymm, ymm, imm8
+ /// __m256i _mm256_slli_epi32 (__m256i a, int imm8);
+ /// VPSLLD ymm, ymm, imm8
/// </summary>
public static Vector256<uint> ShiftLeftLogical(Vector256<uint> value, byte count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_slli_epi64 (__m256i a, int imm8); VPSLLQ ymm, ymm, imm8
+ /// __m256i _mm256_slli_epi64 (__m256i a, int imm8);
+ /// VPSLLQ ymm, ymm, imm8
/// </summary>
public static Vector256<long> ShiftLeftLogical(Vector256<long> value, byte count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_slli_epi64 (__m256i a, int imm8); VPSLLQ ymm, ymm, imm8
+ /// __m256i _mm256_slli_epi64 (__m256i a, int imm8);
+ /// VPSLLQ ymm, ymm, imm8
/// </summary>
public static Vector256<ulong> ShiftLeftLogical(Vector256<ulong> value, byte count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8); VPSLLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8);
+ /// VPSLLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<sbyte> ShiftLeftLogical128BitLane(Vector256<sbyte> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8); VPSLLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8);
+ /// VPSLLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<byte> ShiftLeftLogical128BitLane(Vector256<byte> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8); VPSLLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8);
+ /// VPSLLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<short> ShiftLeftLogical128BitLane(Vector256<short> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8); VPSLLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8);
+ /// VPSLLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<ushort> ShiftLeftLogical128BitLane(Vector256<ushort> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8); VPSLLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8);
+ /// VPSLLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<int> ShiftLeftLogical128BitLane(Vector256<int> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8); VPSLLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8);
+ /// VPSLLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<uint> ShiftLeftLogical128BitLane(Vector256<uint> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8); VPSLLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8);
+ /// VPSLLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<long> ShiftLeftLogical128BitLane(Vector256<long> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8); VPSLLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8);
+ /// VPSLLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<ulong> ShiftLeftLogical128BitLane(Vector256<ulong> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_sllv_epi32 (__m256i a, __m256i count); VPSLLVD ymm, ymm, ymm/m256
+ /// __m256i _mm256_sllv_epi32 (__m256i a, __m256i count);
+ /// VPSLLVD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> ShiftLeftLogicalVariable(Vector256<int> value, Vector256<uint> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_sllv_epi32 (__m256i a, __m256i count); VPSLLVD ymm, ymm, ymm/m256
+ /// __m256i _mm256_sllv_epi32 (__m256i a, __m256i count);
+ /// VPSLLVD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<uint> ShiftLeftLogicalVariable(Vector256<uint> value, Vector256<uint> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_sllv_epi64 (__m256i a, __m256i count); VPSLLVQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_sllv_epi64 (__m256i a, __m256i count);
+ /// VPSLLVQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<long> ShiftLeftLogicalVariable(Vector256<long> value, Vector256<ulong> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_sllv_epi64 (__m256i a, __m256i count); VPSLLVQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_sllv_epi64 (__m256i a, __m256i count);
+ /// VPSLLVQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ulong> ShiftLeftLogicalVariable(Vector256<ulong> value, Vector256<ulong> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_sllv_epi32 (__m128i a, __m128i count); VPSLLVD xmm, ymm, xmm/m128
+ /// __m128i _mm_sllv_epi32 (__m128i a, __m128i count);
+ /// VPSLLVD xmm, ymm, xmm/m128
/// </summary>
public static Vector128<int> ShiftLeftLogicalVariable(Vector128<int> value, Vector128<uint> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_sllv_epi32 (__m128i a, __m128i count); VPSLLVD xmm, ymm, xmm/m128
+ /// __m128i _mm_sllv_epi32 (__m128i a, __m128i count);
+ /// VPSLLVD xmm, ymm, xmm/m128
/// </summary>
public static Vector128<uint> ShiftLeftLogicalVariable(Vector128<uint> value, Vector128<uint> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_sllv_epi64 (__m128i a, __m128i count); VPSLLVQ xmm, ymm, xmm/m128
+ /// __m128i _mm_sllv_epi64 (__m128i a, __m128i count);
+ /// VPSLLVQ xmm, ymm, xmm/m128
/// </summary>
public static Vector128<long> ShiftLeftLogicalVariable(Vector128<long> value, Vector128<ulong> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_sllv_epi64 (__m128i a, __m128i count); VPSLLVQ xmm, ymm, xmm/m128
+ /// __m128i _mm_sllv_epi64 (__m128i a, __m128i count);
+ /// VPSLLVQ xmm, ymm, xmm/m128
/// </summary>
public static Vector128<ulong> ShiftLeftLogicalVariable(Vector128<ulong> value, Vector128<ulong> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// _mm256_sra_epi16 (__m256i a, __m128i count); VPSRAW ymm, ymm, xmm/m128
+ /// _mm256_sra_epi16 (__m256i a, __m128i count);
+ /// VPSRAW ymm, ymm, xmm/m128
/// </summary>
public static Vector256<short> ShiftRightArithmetic(Vector256<short> value, Vector128<short> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// _mm256_sra_epi32 (__m256i a, __m128i count); VPSRAD ymm, ymm, xmm/m128
+ /// _mm256_sra_epi32 (__m256i a, __m128i count);
+ /// VPSRAD ymm, ymm, xmm/m128
/// </summary>
public static Vector256<int> ShiftRightArithmetic(Vector256<int> value, Vector128<int> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_srai_epi16 (__m256i a, int imm8); VPSRAW ymm, ymm, imm8
+ /// __m256i _mm256_srai_epi16 (__m256i a, int imm8);
+ /// VPSRAW ymm, ymm, imm8
/// </summary>
public static Vector256<short> ShiftRightArithmetic(Vector256<short> value, byte count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_srai_epi32 (__m256i a, int imm8); VPSRAD ymm, ymm, imm8
+ /// __m256i _mm256_srai_epi32 (__m256i a, int imm8);
+ /// VPSRAD ymm, ymm, imm8
/// </summary>
public static Vector256<int> ShiftRightArithmetic(Vector256<int> value, byte count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_srav_epi32 (__m256i a, __m256i count); VPSRAVD ymm, ymm, ymm/m256
+ /// __m256i _mm256_srav_epi32 (__m256i a, __m256i count);
+ /// VPSRAVD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> ShiftRightArithmeticVariable(Vector256<int> value, Vector256<uint> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_srav_epi32 (__m128i a, __m128i count); VPSRAVD xmm, xmm, xmm/m128
+ /// __m128i _mm_srav_epi32 (__m128i a, __m128i count);
+ /// VPSRAVD xmm, xmm, xmm/m128
/// </summary>
public static Vector128<int> ShiftRightArithmeticVariable(Vector128<int> value, Vector128<uint> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_srl_epi16 (__m256i a, __m128i count); VPSRLW ymm, ymm, xmm/m128
+ /// __m256i _mm256_srl_epi16 (__m256i a, __m128i count);
+ /// VPSRLW ymm, ymm, xmm/m128
/// </summary>
public static Vector256<short> ShiftRightLogical(Vector256<short> value, Vector128<short> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_srl_epi16 (__m256i a, __m128i count); VPSRLW ymm, ymm, xmm/m128
+ /// __m256i _mm256_srl_epi16 (__m256i a, __m128i count);
+ /// VPSRLW ymm, ymm, xmm/m128
/// </summary>
public static Vector256<ushort> ShiftRightLogical(Vector256<ushort> value, Vector128<ushort> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_srl_epi32 (__m256i a, __m128i count); VPSRLD ymm, ymm, xmm/m128
+ /// __m256i _mm256_srl_epi32 (__m256i a, __m128i count);
+ /// VPSRLD ymm, ymm, xmm/m128
/// </summary>
public static Vector256<int> ShiftRightLogical(Vector256<int> value, Vector128<int> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_srl_epi32 (__m256i a, __m128i count); VPSRLD ymm, ymm, xmm/m128
+ /// __m256i _mm256_srl_epi32 (__m256i a, __m128i count);
+ /// VPSRLD ymm, ymm, xmm/m128
/// </summary>
public static Vector256<uint> ShiftRightLogical(Vector256<uint> value, Vector128<uint> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_srl_epi64 (__m256i a, __m128i count); VPSRLQ ymm, ymm, xmm/m128
+ /// __m256i _mm256_srl_epi64 (__m256i a, __m128i count);
+ /// VPSRLQ ymm, ymm, xmm/m128
/// </summary>
public static Vector256<long> ShiftRightLogical(Vector256<long> value, Vector128<long> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_srl_epi64 (__m256i a, __m128i count); VPSRLQ ymm, ymm, xmm/m128
+ /// __m256i _mm256_srl_epi64 (__m256i a, __m128i count);
+ /// VPSRLQ ymm, ymm, xmm/m128
/// </summary>
public static Vector256<ulong> ShiftRightLogical(Vector256<ulong> value, Vector128<ulong> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_srli_epi16 (__m256i a, int imm8); VPSRLW ymm, ymm, imm8
+ /// __m256i _mm256_srli_epi16 (__m256i a, int imm8);
+ /// VPSRLW ymm, ymm, imm8
/// </summary>
public static Vector256<short> ShiftRightLogical(Vector256<short> value, byte count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_srli_epi16 (__m256i a, int imm8); VPSRLW ymm, ymm, imm8
+ /// __m256i _mm256_srli_epi16 (__m256i a, int imm8);
+ /// VPSRLW ymm, ymm, imm8
/// </summary>
public static Vector256<ushort> ShiftRightLogical(Vector256<ushort> value, byte count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_srli_epi32 (__m256i a, int imm8); VPSRLD ymm, ymm, imm8
+ /// __m256i _mm256_srli_epi32 (__m256i a, int imm8);
+ /// VPSRLD ymm, ymm, imm8
/// </summary>
public static Vector256<int> ShiftRightLogical(Vector256<int> value, byte count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_srli_epi32 (__m256i a, int imm8); VPSRLD ymm, ymm, imm8
+ /// __m256i _mm256_srli_epi32 (__m256i a, int imm8);
+ /// VPSRLD ymm, ymm, imm8
/// </summary>
public static Vector256<uint> ShiftRightLogical(Vector256<uint> value, byte count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_srli_epi64 (__m256i a, int imm8); VPSRLQ ymm, ymm, imm8
+ /// __m256i _mm256_srli_epi64 (__m256i a, int imm8);
+ /// VPSRLQ ymm, ymm, imm8
/// </summary>
public static Vector256<long> ShiftRightLogical(Vector256<long> value, byte count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_srli_epi64 (__m256i a, int imm8); VPSRLQ ymm, ymm, imm8
+ /// __m256i _mm256_srli_epi64 (__m256i a, int imm8);
+ /// VPSRLQ ymm, ymm, imm8
/// </summary>
public static Vector256<ulong> ShiftRightLogical(Vector256<ulong> value, byte count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8); VPSRLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8);
+ /// VPSRLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<sbyte> ShiftRightLogical128BitLane(Vector256<sbyte> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8); VPSRLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8);
+ /// VPSRLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<byte> ShiftRightLogical128BitLane(Vector256<byte> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8); VPSRLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8);
+ /// VPSRLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<short> ShiftRightLogical128BitLane(Vector256<short> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8); VPSRLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8);
+ /// VPSRLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<ushort> ShiftRightLogical128BitLane(Vector256<ushort> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8); VPSRLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8);
+ /// VPSRLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<int> ShiftRightLogical128BitLane(Vector256<int> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8); VPSRLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8);
+ /// VPSRLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<uint> ShiftRightLogical128BitLane(Vector256<uint> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8); VPSRLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8);
+ /// VPSRLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<long> ShiftRightLogical128BitLane(Vector256<long> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8); VPSRLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8);
+ /// VPSRLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<ulong> ShiftRightLogical128BitLane(Vector256<ulong> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_srlv_epi32 (__m256i a, __m256i count); VPSRLVD ymm, ymm, ymm/m256
+ /// __m256i _mm256_srlv_epi32 (__m256i a, __m256i count);
+ /// VPSRLVD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> ShiftRightLogicalVariable(Vector256<int> value, Vector256<uint> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_srlv_epi32 (__m256i a, __m256i count); VPSRLVD ymm, ymm, ymm/m256
+ /// __m256i _mm256_srlv_epi32 (__m256i a, __m256i count);
+ /// VPSRLVD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<uint> ShiftRightLogicalVariable(Vector256<uint> value, Vector256<uint> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_srlv_epi64 (__m256i a, __m256i count); VPSRLVQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_srlv_epi64 (__m256i a, __m256i count);
+ /// VPSRLVQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<long> ShiftRightLogicalVariable(Vector256<long> value, Vector256<ulong> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_srlv_epi64 (__m256i a, __m256i count); VPSRLVQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_srlv_epi64 (__m256i a, __m256i count);
+ /// VPSRLVQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ulong> ShiftRightLogicalVariable(Vector256<ulong> value, Vector256<ulong> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_srlv_epi32 (__m128i a, __m128i count); VPSRLVD xmm, xmm, xmm/m128
+ /// __m128i _mm_srlv_epi32 (__m128i a, __m128i count);
+ /// VPSRLVD xmm, xmm, xmm/m128
/// </summary>
public static Vector128<int> ShiftRightLogicalVariable(Vector128<int> value, Vector128<uint> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_srlv_epi32 (__m128i a, __m128i count); VPSRLVD xmm, xmm, xmm/m128
+ /// __m128i _mm_srlv_epi32 (__m128i a, __m128i count);
+ /// VPSRLVD xmm, xmm, xmm/m128
/// </summary>
public static Vector128<uint> ShiftRightLogicalVariable(Vector128<uint> value, Vector128<uint> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_srlv_epi64 (__m128i a, __m128i count); VPSRLVQ xmm, xmm, xmm/m128
+ /// __m128i _mm_srlv_epi64 (__m128i a, __m128i count);
+ /// VPSRLVQ xmm, xmm, xmm/m128
/// </summary>
public static Vector128<long> ShiftRightLogicalVariable(Vector128<long> value, Vector128<ulong> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_srlv_epi64 (__m128i a, __m128i count); VPSRLVQ xmm, xmm, xmm/m128
+ /// __m128i _mm_srlv_epi64 (__m128i a, __m128i count);
+ /// VPSRLVQ xmm, xmm, xmm/m128
/// </summary>
public static Vector128<ulong> ShiftRightLogicalVariable(Vector128<ulong> value, Vector128<ulong> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_shuffle_epi8 (__m256i a, __m256i b); VPSHUFB ymm, ymm, ymm/m256
+ /// __m256i _mm256_shuffle_epi8 (__m256i a, __m256i b);
+ /// VPSHUFB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> Shuffle(Vector256<sbyte> value, Vector256<sbyte> mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_shuffle_epi8 (__m256i a, __m256i b); VPSHUFB ymm, ymm, ymm/m256
+ /// __m256i _mm256_shuffle_epi8 (__m256i a, __m256i b);
+ /// VPSHUFB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> Shuffle(Vector256<byte> value, Vector256<byte> mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_shuffle_epi32 (__m256i a, const int imm8); VPSHUFD ymm, ymm, ymm/m256
+ /// __m256i _mm256_shuffle_epi32 (__m256i a, const int imm8);
+ /// VPSHUFD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> Shuffle(Vector256<int> value, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_shuffle_epi32 (__m256i a, const int imm8); VPSHUFD ymm, ymm, ymm/m256
+ /// __m256i _mm256_shuffle_epi32 (__m256i a, const int imm8);
+ /// VPSHUFD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<uint> Shuffle(Vector256<uint> value, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_shufflehi_epi16 (__m256i a, const int imm8); VPSHUFHW ymm, ymm/m256, imm8
+ /// __m256i _mm256_shufflehi_epi16 (__m256i a, const int imm8);
+ /// VPSHUFHW ymm, ymm/m256, imm8
/// </summary>
public static Vector256<short> ShuffleHigh(Vector256<short> value, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_shufflehi_epi16 (__m256i a, const int imm8); VPSHUFHW ymm, ymm/m256, imm8
+ /// __m256i _mm256_shufflehi_epi16 (__m256i a, const int imm8);
+ /// VPSHUFHW ymm, ymm/m256, imm8
/// </summary>
public static Vector256<ushort> ShuffleHigh(Vector256<ushort> value, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_shufflelo_epi16 (__m256i a, const int imm8); VPSHUFLW ymm, ymm/m256, imm8
+ /// __m256i _mm256_shufflelo_epi16 (__m256i a, const int imm8);
+ /// VPSHUFLW ymm, ymm/m256, imm8
/// </summary>
public static Vector256<short> ShuffleLow(Vector256<short> value, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_shufflelo_epi16 (__m256i a, const int imm8); VPSHUFLW ymm, ymm/m256, imm8
+ /// __m256i _mm256_shufflelo_epi16 (__m256i a, const int imm8);
+ /// VPSHUFLW ymm, ymm/m256, imm8
/// </summary>
public static Vector256<ushort> ShuffleLow(Vector256<ushort> value, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_sign_epi8 (__m256i a, __m256i b); VPSIGNB ymm, ymm, ymm/m256
+ /// __m256i _mm256_sign_epi8 (__m256i a, __m256i b);
+ /// VPSIGNB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> Sign(Vector256<sbyte> left, Vector256<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_sign_epi16 (__m256i a, __m256i b); VPSIGNW ymm, ymm, ymm/m256
+ /// __m256i _mm256_sign_epi16 (__m256i a, __m256i b);
+ /// VPSIGNW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> Sign(Vector256<short> left, Vector256<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_sign_epi32 (__m256i a, __m256i b); VPSIGND ymm, ymm, ymm/m256
+ /// __m256i _mm256_sign_epi32 (__m256i a, __m256i b);
+ /// VPSIGND ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> Sign(Vector256<int> left, Vector256<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_sub_epi8 (__m256i a, __m256i b); VPSUBB ymm, ymm, ymm/m256
+ /// __m256i _mm256_sub_epi8 (__m256i a, __m256i b);
+ /// VPSUBB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> Subtract(Vector256<sbyte> left, Vector256<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_sub_epi8 (__m256i a, __m256i b); VPSUBB ymm, ymm, ymm/m256
+ /// __m256i _mm256_sub_epi8 (__m256i a, __m256i b);
+ /// VPSUBB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> Subtract(Vector256<byte> left, Vector256<byte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_sub_epi16 (__m256i a, __m256i b); VPSUBW ymm, ymm, ymm/m256
+ /// __m256i _mm256_sub_epi16 (__m256i a, __m256i b);
+ /// VPSUBW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> Subtract(Vector256<short> left, Vector256<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_sub_epi16 (__m256i a, __m256i b); VPSUBW ymm, ymm, ymm/m256
+ /// __m256i _mm256_sub_epi16 (__m256i a, __m256i b);
+ /// VPSUBW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> Subtract(Vector256<ushort> left, Vector256<ushort> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_sub_epi32 (__m256i a, __m256i b); VPSUBD ymm, ymm, ymm/m256
+ /// __m256i _mm256_sub_epi32 (__m256i a, __m256i b);
+ /// VPSUBD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> Subtract(Vector256<int> left, Vector256<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_sub_epi32 (__m256i a, __m256i b); VPSUBD ymm, ymm, ymm/m256
+ /// __m256i _mm256_sub_epi32 (__m256i a, __m256i b);
+ /// VPSUBD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<uint> Subtract(Vector256<uint> left, Vector256<uint> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_sub_epi64 (__m256i a, __m256i b); VPSUBQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_sub_epi64 (__m256i a, __m256i b);
+ /// VPSUBQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<long> Subtract(Vector256<long> left, Vector256<long> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_sub_epi64 (__m256i a, __m256i b); VPSUBQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_sub_epi64 (__m256i a, __m256i b);
+ /// VPSUBQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ulong> Subtract(Vector256<ulong> left, Vector256<ulong> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_subs_epi8 (__m256i a, __m256i b); VPSUBSB ymm, ymm, ymm/m256
+ /// __m256i _mm256_subs_epi8 (__m256i a, __m256i b);
+ /// VPSUBSB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> SubtractSaturate(Vector256<sbyte> left, Vector256<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_subs_epi16 (__m256i a, __m256i b); VPSUBSW ymm, ymm, ymm/m256
+ /// __m256i _mm256_subs_epi16 (__m256i a, __m256i b);
+ /// VPSUBSW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> SubtractSaturate(Vector256<short> left, Vector256<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_subs_epu8 (__m256i a, __m256i b); VPSUBUSB ymm, ymm, ymm/m256
+ /// __m256i _mm256_subs_epu8 (__m256i a, __m256i b);
+ /// VPSUBUSB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> SubtractSaturate(Vector256<byte> left, Vector256<byte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_subs_epu16 (__m256i a, __m256i b); VPSUBUSW ymm, ymm, ymm/m256
+ /// __m256i _mm256_subs_epu16 (__m256i a, __m256i b);
+ /// VPSUBUSW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> SubtractSaturate(Vector256<ushort> left, Vector256<ushort> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_sad_epu8 (__m256i a, __m256i b); VPSADBW ymm, ymm, ymm/m256
+ /// __m256i _mm256_sad_epu8 (__m256i a, __m256i b);
+ /// VPSADBW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ulong> SumAbsoluteDifferences(Vector256<byte> left, Vector256<byte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_unpackhi_epi8 (__m256i a, __m256i b); VPUNPCKHBW ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpackhi_epi8 (__m256i a, __m256i b);
+ /// VPUNPCKHBW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> UnpackHigh(Vector256<sbyte> left, Vector256<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_unpackhi_epi8 (__m256i a, __m256i b); VPUNPCKHBW ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpackhi_epi8 (__m256i a, __m256i b);
+ /// VPUNPCKHBW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> UnpackHigh(Vector256<byte> left, Vector256<byte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_unpackhi_epi16 (__m256i a, __m256i b); VPUNPCKHWD ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpackhi_epi16 (__m256i a, __m256i b);
+ /// VPUNPCKHWD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> UnpackHigh(Vector256<short> left, Vector256<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_unpackhi_epi16 (__m256i a, __m256i b); VPUNPCKHWD ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpackhi_epi16 (__m256i a, __m256i b);
+ /// VPUNPCKHWD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> UnpackHigh(Vector256<ushort> left, Vector256<ushort> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_unpackhi_epi32 (__m256i a, __m256i b); VPUNPCKHDQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpackhi_epi32 (__m256i a, __m256i b);
+ /// VPUNPCKHDQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> UnpackHigh(Vector256<int> left, Vector256<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_unpackhi_epi32 (__m256i a, __m256i b); VPUNPCKHDQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpackhi_epi32 (__m256i a, __m256i b);
+ /// VPUNPCKHDQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<uint> UnpackHigh(Vector256<uint> left, Vector256<uint> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_unpackhi_epi64 (__m256i a, __m256i b); VPUNPCKHQDQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpackhi_epi64 (__m256i a, __m256i b);
+ /// VPUNPCKHQDQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<long> UnpackHigh(Vector256<long> left, Vector256<long> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_unpackhi_epi64 (__m256i a, __m256i b); VPUNPCKHQDQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpackhi_epi64 (__m256i a, __m256i b);
+ /// VPUNPCKHQDQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ulong> UnpackHigh(Vector256<ulong> left, Vector256<ulong> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_unpacklo_epi8 (__m256i a, __m256i b); VPUNPCKLBW ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpacklo_epi8 (__m256i a, __m256i b);
+ /// VPUNPCKLBW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> UnpackLow(Vector256<sbyte> left, Vector256<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_unpacklo_epi8 (__m256i a, __m256i b); VPUNPCKLBW ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpacklo_epi8 (__m256i a, __m256i b);
+ /// VPUNPCKLBW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> UnpackLow(Vector256<byte> left, Vector256<byte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_unpacklo_epi16 (__m256i a, __m256i b); VPUNPCKLWD ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpacklo_epi16 (__m256i a, __m256i b);
+ /// VPUNPCKLWD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> UnpackLow(Vector256<short> left, Vector256<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_unpacklo_epi16 (__m256i a, __m256i b); VPUNPCKLWD ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpacklo_epi16 (__m256i a, __m256i b);
+ /// VPUNPCKLWD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> UnpackLow(Vector256<ushort> left, Vector256<ushort> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_unpacklo_epi32 (__m256i a, __m256i b); VPUNPCKLDQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpacklo_epi32 (__m256i a, __m256i b);
+ /// VPUNPCKLDQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> UnpackLow(Vector256<int> left, Vector256<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_unpacklo_epi32 (__m256i a, __m256i b); VPUNPCKLDQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpacklo_epi32 (__m256i a, __m256i b);
+ /// VPUNPCKLDQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<uint> UnpackLow(Vector256<uint> left, Vector256<uint> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_unpacklo_epi64 (__m256i a, __m256i b); VPUNPCKLQDQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpacklo_epi64 (__m256i a, __m256i b);
+ /// VPUNPCKLQDQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<long> UnpackLow(Vector256<long> left, Vector256<long> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_unpacklo_epi64 (__m256i a, __m256i b); VPUNPCKLQDQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpacklo_epi64 (__m256i a, __m256i b);
+ /// VPUNPCKLQDQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ulong> UnpackLow(Vector256<ulong> left, Vector256<ulong> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_xor_si256 (__m256i a, __m256i b); VPXOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_xor_si256 (__m256i a, __m256i b);
+ /// VPXOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> Xor(Vector256<sbyte> left, Vector256<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_xor_si256 (__m256i a, __m256i b); VPXOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_xor_si256 (__m256i a, __m256i b);
+ /// VPXOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> Xor(Vector256<byte> left, Vector256<byte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_xor_si256 (__m256i a, __m256i b); VPXOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_xor_si256 (__m256i a, __m256i b);
+ /// VPXOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> Xor(Vector256<short> left, Vector256<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_xor_si256 (__m256i a, __m256i b); VPXOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_xor_si256 (__m256i a, __m256i b);
+ /// VPXOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> Xor(Vector256<ushort> left, Vector256<ushort> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_xor_si256 (__m256i a, __m256i b); VPXOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_xor_si256 (__m256i a, __m256i b);
+ /// VPXOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> Xor(Vector256<int> left, Vector256<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_xor_si256 (__m256i a, __m256i b); VPXOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_xor_si256 (__m256i a, __m256i b);
+ /// VPXOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<uint> Xor(Vector256<uint> left, Vector256<uint> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_xor_si256 (__m256i a, __m256i b); VPXOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_xor_si256 (__m256i a, __m256i b);
+ /// VPXOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<long> Xor(Vector256<long> left, Vector256<long> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256i _mm256_xor_si256 (__m256i a, __m256i b); VPXOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_xor_si256 (__m256i a, __m256i b);
+ /// VPXOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ulong> Xor(Vector256<ulong> left, Vector256<ulong> right) { throw new PlatformNotSupportedException(); }
}
diff --git a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Avx2.cs b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Avx2.cs
index 4703588fd0..da4ad28e4c 100644
--- a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Avx2.cs
+++ b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Avx2.cs
@@ -16,189 +16,237 @@ namespace System.Runtime.Intrinsics.X86
public static bool IsSupported { get => IsSupported; }
/// <summary>
- /// __m256i _mm256_abs_epi8 (__m256i a); VPABSB ymm, ymm/m256
+ /// __m256i _mm256_abs_epi8 (__m256i a);
+ /// VPABSB ymm, ymm/m256
/// </summary>
public static Vector256<byte> Abs(Vector256<sbyte> value) => Abs(value);
/// <summary>
- /// __m256i _mm256_abs_epi16 (__m256i a); VPABSW ymm, ymm/m256
+ /// __m256i _mm256_abs_epi16 (__m256i a);
+ /// VPABSW ymm, ymm/m256
/// </summary>
public static Vector256<ushort> Abs(Vector256<short> value) => Abs(value);
/// <summary>
- /// __m256i _mm256_abs_epi32 (__m256i a); VPABSD ymm, ymm/m256
+ /// __m256i _mm256_abs_epi32 (__m256i a);
+ /// VPABSD ymm, ymm/m256
/// </summary>
public static Vector256<uint> Abs(Vector256<int> value) => Abs(value);
/// <summary>
- /// __m256i _mm256_add_epi8 (__m256i a, __m256i b); VPADDB ymm, ymm, ymm/m256
+ /// __m256i _mm256_add_epi8 (__m256i a, __m256i b);
+ /// VPADDB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> Add(Vector256<sbyte> left, Vector256<sbyte> right) => Add(left, right);
/// <summary>
- /// __m256i _mm256_add_epi8 (__m256i a, __m256i b); VPADDB ymm, ymm, ymm/m256
+ /// __m256i _mm256_add_epi8 (__m256i a, __m256i b);
+ /// VPADDB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> Add(Vector256<byte> left, Vector256<byte> right) => Add(left, right);
/// <summary>
- /// __m256i _mm256_add_epi16 (__m256i a, __m256i b); VPADDW ymm, ymm, ymm/m256
+ /// __m256i _mm256_add_epi16 (__m256i a, __m256i b);
+ /// VPADDW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> Add(Vector256<short> left, Vector256<short> right) => Add(left, right);
/// <summary>
- /// __m256i _mm256_add_epi16 (__m256i a, __m256i b); VPADDW ymm, ymm, ymm/m256
+ /// __m256i _mm256_add_epi16 (__m256i a, __m256i b);
+ /// VPADDW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> Add(Vector256<ushort> left, Vector256<ushort> right) => Add(left, right);
/// <summary>
- /// __m256i _mm256_add_epi32 (__m256i a, __m256i b); VPADDD ymm, ymm, ymm/m256
+ /// __m256i _mm256_add_epi32 (__m256i a, __m256i b);
+ /// VPADDD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> Add(Vector256<int> left, Vector256<int> right) => Add(left, right);
/// <summary>
- /// __m256i _mm256_add_epi32 (__m256i a, __m256i b); VPADDD ymm, ymm, ymm/m256
+ /// __m256i _mm256_add_epi32 (__m256i a, __m256i b);
+ /// VPADDD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<uint> Add(Vector256<uint> left, Vector256<uint> right) => Add(left, right);
/// <summary>
- /// __m256i _mm256_add_epi64 (__m256i a, __m256i b); VPADDQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_add_epi64 (__m256i a, __m256i b);
+ /// VPADDQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<long> Add(Vector256<long> left, Vector256<long> right) => Add(left, right);
/// <summary>
- /// __m256i _mm256_add_epi64 (__m256i a, __m256i b); VPADDQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_add_epi64 (__m256i a, __m256i b);
+ /// VPADDQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ulong> Add(Vector256<ulong> left, Vector256<ulong> right) => Add(left, right);
/// <summary>
- /// __m256i _mm256_adds_epi8 (__m256i a, __m256i b); VPADDSB ymm, ymm, ymm/m256
+ /// __m256i _mm256_adds_epi8 (__m256i a, __m256i b);
+ /// VPADDSB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> AddSaturate(Vector256<sbyte> left, Vector256<sbyte> right) => AddSaturate(left, right);
/// <summary>
- /// __m256i _mm256_adds_epu8 (__m256i a, __m256i b); VPADDUSB ymm, ymm, ymm/m256
+ /// __m256i _mm256_adds_epu8 (__m256i a, __m256i b);
+ /// VPADDUSB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> AddSaturate(Vector256<byte> left, Vector256<byte> right) => AddSaturate(left, right);
/// <summary>
- /// __m256i _mm256_adds_epi16 (__m256i a, __m256i b); VPADDSW ymm, ymm, ymm/m256
+ /// __m256i _mm256_adds_epi16 (__m256i a, __m256i b);
+ /// VPADDSW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> AddSaturate(Vector256<short> left, Vector256<short> right) => AddSaturate(left, right);
/// <summary>
- /// __m256i _mm256_adds_epu16 (__m256i a, __m256i b); VPADDUSW ymm, ymm, ymm/m256
+ /// __m256i _mm256_adds_epu16 (__m256i a, __m256i b);
+ /// VPADDUSW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> AddSaturate(Vector256<ushort> left, Vector256<ushort> right) => AddSaturate(left, right);
/// <summary>
- /// __m256i _mm256_alignr_epi8 (__m256i a, __m256i b, const int count); VPALIGNR ymm, ymm, ymm/m256, imm8
+ /// __m256i _mm256_alignr_epi8 (__m256i a, __m256i b, const int count);
+ /// VPALIGNR ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<sbyte> AlignRight(Vector256<sbyte> left, Vector256<sbyte> right, byte mask) => AlignRight(left, right, mask);
/// <summary>
- /// __m256i _mm256_and_si256 (__m256i a, __m256i b); VPAND ymm, ymm, ymm/m256
+ /// __m256i _mm256_and_si256 (__m256i a, __m256i b);
+ /// VPAND ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> And(Vector256<sbyte> left, Vector256<sbyte> right) => And(left, right);
/// <summary>
- /// __m256i _mm256_and_si256 (__m256i a, __m256i b); VPAND ymm, ymm, ymm/m256
+ /// __m256i _mm256_and_si256 (__m256i a, __m256i b);
+ /// VPAND ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> And(Vector256<byte> left, Vector256<byte> right) => And(left, right);
/// <summary>
- /// __m256i _mm256_and_si256 (__m256i a, __m256i b); VPAND ymm, ymm, ymm/m256
+ /// __m256i _mm256_and_si256 (__m256i a, __m256i b);
+ /// VPAND ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> And(Vector256<short> left, Vector256<short> right) => And(left, right);
/// <summary>
- /// __m256i _mm256_and_si256 (__m256i a, __m256i b); VPAND ymm, ymm, ymm/m256
+ /// __m256i _mm256_and_si256 (__m256i a, __m256i b);
+ /// VPAND ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> And(Vector256<ushort> left, Vector256<ushort> right) => And(left, right);
/// <summary>
- /// __m256i _mm256_and_si256 (__m256i a, __m256i b); VPAND ymm, ymm, ymm/m256
+ /// __m256i _mm256_and_si256 (__m256i a, __m256i b);
+ /// VPAND ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> And(Vector256<int> left, Vector256<int> right) => And(left, right);
/// <summary>
- /// __m256i _mm256_and_si256 (__m256i a, __m256i b); VPAND ymm, ymm, ymm/m256
+ /// __m256i _mm256_and_si256 (__m256i a, __m256i b);
+ /// VPAND ymm, ymm, ymm/m256
/// </summary>
public static Vector256<uint> And(Vector256<uint> left, Vector256<uint> right) => And(left, right);
/// <summary>
- /// __m256i _mm256_and_si256 (__m256i a, __m256i b); VPAND ymm, ymm, ymm/m256
+ /// __m256i _mm256_and_si256 (__m256i a, __m256i b);
+ /// VPAND ymm, ymm, ymm/m256
/// </summary>
public static Vector256<long> And(Vector256<long> left, Vector256<long> right) => And(left, right);
/// <summary>
- /// __m256i _mm256_and_si256 (__m256i a, __m256i b); VPAND ymm, ymm, ymm/m256
+ /// __m256i _mm256_and_si256 (__m256i a, __m256i b);
+ /// VPAND ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ulong> And(Vector256<ulong> left, Vector256<ulong> right) => And(left, right);
/// <summary>
- /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b); VPANDN ymm, ymm, ymm/m256
+ /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b);
+ /// VPANDN ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> AndNot(Vector256<sbyte> left, Vector256<sbyte> right) => AndNot(left, right);
/// <summary>
- /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b); VPANDN ymm, ymm, ymm/m256
+ /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b);
+ /// VPANDN ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> AndNot(Vector256<byte> left, Vector256<byte> right) => AndNot(left, right);
/// <summary>
- /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b); VPANDN ymm, ymm, ymm/m256
+ /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b);
+ /// VPANDN ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> AndNot(Vector256<short> left, Vector256<short> right) => AndNot(left, right);
/// <summary>
- /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b); VPANDN ymm, ymm, ymm/m256
+ /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b);
+ /// VPANDN ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> AndNot(Vector256<ushort> left, Vector256<ushort> right) => AndNot(left, right);
/// <summary>
- /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b); VPANDN ymm, ymm, ymm/m256
+ /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b);
+ /// VPANDN ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> AndNot(Vector256<int> left, Vector256<int> right) => AndNot(left, right);
/// <summary>
- /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b); VPANDN ymm, ymm, ymm/m256
+ /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b);
+ /// VPANDN ymm, ymm, ymm/m256
/// </summary>
public static Vector256<uint> AndNot(Vector256<uint> left, Vector256<uint> right) => AndNot(left, right);
/// <summary>
- /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b); VPANDN ymm, ymm, ymm/m256
+ /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b);
+ /// VPANDN ymm, ymm, ymm/m256
/// </summary>
public static Vector256<long> AndNot(Vector256<long> left, Vector256<long> right) => AndNot(left, right);
/// <summary>
- /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b); VPANDN ymm, ymm, ymm/m256
+ /// __m256i _mm256_andnot_si256 (__m256i a, __m256i b);
+ /// VPANDN ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ulong> AndNot(Vector256<ulong> left, Vector256<ulong> right) => AndNot(left, right);
/// <summary>
- /// __m256i _mm256_avg_epu8 (__m256i a, __m256i b); VPAVGB ymm, ymm, ymm/m256
+ /// __m256i _mm256_avg_epu8 (__m256i a, __m256i b);
+ /// VPAVGB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> Average(Vector256<byte> left, Vector256<byte> right) => Average(left, right);
/// <summary>
- /// __m256i _mm256_avg_epu16 (__m256i a, __m256i b); VPAVGW ymm, ymm, ymm/m256
+ /// __m256i _mm256_avg_epu16 (__m256i a, __m256i b);
+ /// VPAVGW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> Average(Vector256<ushort> left, Vector256<ushort> right) => Average(left, right);
/// <summary>
- /// __m128i _mm_blend_epi32 (__m128i a, __m128i b, const int imm8); VPBLENDD xmm, xmm, xmm/m128, imm8
+ /// __m128i _mm_blend_epi32 (__m128i a, __m128i b, const int imm8);
+ /// VPBLENDD xmm, xmm, xmm/m128, imm8
/// </summary>
public static Vector128<int> Blend(Vector128<int> left, Vector128<int> right, byte control) => Blend(left, right, control);
/// <summary>
- /// __m128i _mm_blend_epi32 (__m128i a, __m128i b, const int imm8); VPBLENDD xmm, xmm, xmm/m128, imm8
+ /// __m128i _mm_blend_epi32 (__m128i a, __m128i b, const int imm8);
+ /// VPBLENDD xmm, xmm, xmm/m128, imm8
/// </summary>
public static Vector128<uint> Blend(Vector128<uint> left, Vector128<uint> right, byte control) => Blend(left, right, control);
/// <summary>
- /// __m256i _mm256_blend_epi16 (__m256i a, __m256i b, const int imm8); VPBLENDW ymm, ymm, ymm/m256, imm8
+ /// __m256i _mm256_blend_epi16 (__m256i a, __m256i b, const int imm8);
+ /// VPBLENDW ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<short> Blend(Vector256<short> left, Vector256<short> right, byte control) => Blend(left, right, control);
/// <summary>
- /// __m256i _mm256_blend_epi16 (__m256i a, __m256i b, const int imm8); VPBLENDW ymm, ymm, ymm/m256, imm8
+ /// __m256i _mm256_blend_epi16 (__m256i a, __m256i b, const int imm8);
+ /// VPBLENDW ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<ushort> Blend(Vector256<ushort> left, Vector256<ushort> right, byte control) => Blend(left, right, control);
/// <summary>
- /// __m256i _mm256_blend_epi32 (__m256i a, __m256i b, const int imm8); VPBLENDD ymm, ymm, ymm/m256, imm8
+ /// __m256i _mm256_blend_epi32 (__m256i a, __m256i b, const int imm8);
+ /// VPBLENDD ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<int> Blend(Vector256<int> left, Vector256<int> right, byte control) => Blend(left, right, control);
/// <summary>
- /// __m256i _mm256_blend_epi32 (__m256i a, __m256i b, const int imm8); VPBLENDD ymm, ymm, ymm/m256, imm8
+ /// __m256i _mm256_blend_epi32 (__m256i a, __m256i b, const int imm8);
+ /// VPBLENDD ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<uint> Blend(Vector256<uint> left, Vector256<uint> right, byte control) => Blend(left, right, control);
/// <summary>
- /// __m256i _mm256_blendv_epi8 (__m256i a, __m256i b, __m256i mask); PBLENDVB ymm, ymm, ymm/m256, ymm
+ /// __m256i _mm256_blendv_epi8 (__m256i a, __m256i b, __m256i mask);
+ /// PBLENDVB ymm, ymm, ymm/m256, ymm
/// </summary>
public static Vector256<sbyte> BlendVariable(Vector256<sbyte> left, Vector256<sbyte> right, Vector256<sbyte> mask) => BlendVariable(left, right, mask);
/// <summary>
- /// __m256i _mm256_blendv_epi8 (__m256i a, __m256i b, __m256i mask); PBLENDVB ymm, ymm, ymm/m256, ymm
+ /// __m256i _mm256_blendv_epi8 (__m256i a, __m256i b, __m256i mask);
+ /// PBLENDVB ymm, ymm, ymm/m256, ymm
/// </summary>
public static Vector256<byte> BlendVariable(Vector256<byte> left, Vector256<byte> right, Vector256<byte> mask) => BlendVariable(left, right, mask);
/// <summary>
- /// __m128i _mm_broadcastb_epi8 (__m128i a); VPBROADCASTB xmm, xmm
- /// __m128i _mm_broadcastw_epi16 (__m128i a); VPBROADCASTW xmm, xmm
- /// __m128i _mm_broadcastd_epi32 (__m128i a); VPBROADCASTD xmm, xmm
- /// __m128i _mm_broadcastq_epi64 (__m128i a); VPBROADCASTQ xmm, xmm
- /// __m128 _mm_broadcastss_ps (__m128 a); VBROADCASTSS xmm, xmm
- /// __m128d _mm_broadcastsd_pd (__m128d a); VBROADCASTSD xmm, xmm
+ /// __m128i _mm_broadcastb_epi8 (__m128i a);
+ /// VPBROADCASTB xmm, xmm
+ /// __m128i _mm_broadcastw_epi16 (__m128i a);
+ /// VPBROADCASTW xmm, xmm
+ /// __m128i _mm_broadcastd_epi32 (__m128i a);
+ /// VPBROADCASTD xmm, xmm
+ /// __m128i _mm_broadcastq_epi64 (__m128i a);
+ /// VPBROADCASTQ xmm, xmm
+ /// __m128 _mm_broadcastss_ps (__m128 a);
+ /// VBROADCASTSS xmm, xmm
+ /// __m128d _mm_broadcastsd_pd (__m128d a);
+ /// VBROADCASTSD xmm, xmm
/// </summary>
public static Vector128<T> BroadcastElementToVector128<T>(Vector128<T> value) where T : struct
{
@@ -207,12 +255,18 @@ namespace System.Runtime.Intrinsics.X86
}
/// <summary>
- /// __m256i _mm256_broadcastb_epi8 (__m128i a); VPBROADCASTB ymm, xmm
- /// __m256i _mm256_broadcastw_epi16 (__m128i a); VPBROADCASTW ymm, xmm
- /// __m256i _mm256_broadcastd_epi32 (__m128i a); VPBROADCASTD ymm, xmm
- /// __m256i _mm256_broadcastq_epi64 (__m128i a); VPBROADCASTQ ymm, xmm
- /// __m256 _mm256_broadcastss_ps (__m128 a); VBROADCASTSS ymm, xmm
- /// __m256d _mm256_broadcastsd_pd (__m128d a); VBROADCASTSD ymm, xmm
+ /// __m256i _mm256_broadcastb_epi8 (__m128i a);
+ /// VPBROADCASTB ymm, xmm
+ /// __m256i _mm256_broadcastw_epi16 (__m128i a);
+ /// VPBROADCASTW ymm, xmm
+ /// __m256i _mm256_broadcastd_epi32 (__m128i a);
+ /// VPBROADCASTD ymm, xmm
+ /// __m256i _mm256_broadcastq_epi64 (__m128i a);
+ /// VPBROADCASTQ ymm, xmm
+ /// __m256 _mm256_broadcastss_ps (__m128 a);
+ /// VBROADCASTSS ymm, xmm
+ /// __m256d _mm256_broadcastsd_pd (__m128d a);
+ /// VBROADCASTSD ymm, xmm
/// </summary>
public static Vector256<T> BroadcastElementToVector256<T>(Vector128<T> value) where T : struct
{
@@ -221,139 +275,172 @@ namespace System.Runtime.Intrinsics.X86
}
/// <summary>
- /// __m256i _mm256_broadcastsi128_si256 (__m128i a); VBROADCASTI128 xmm, m8
+ /// __m256i _mm256_broadcastsi128_si256 (__m128i a);
+ /// VBROADCASTI128 xmm, m8
/// </summary>
public static unsafe Vector256<sbyte> BroadcastVector128ToVector256(sbyte* address) => BroadcastVector128ToVector256(address);
/// <summary>
- /// __m256i _mm256_broadcastsi128_si256 (__m128i a); VBROADCASTI128 xmm, m8
+ /// __m256i _mm256_broadcastsi128_si256 (__m128i a);
+ /// VBROADCASTI128 xmm, m8
/// </summary>
public static unsafe Vector256<byte> BroadcastVector128ToVector256(byte* address) => BroadcastVector128ToVector256(address);
/// <summary>
- /// __m256i _mm256_broadcastsi128_si256 (__m128i a); VBROADCASTI128 xmm, m16
+ /// __m256i _mm256_broadcastsi128_si256 (__m128i a);
+ /// VBROADCASTI128 xmm, m16
/// </summary>
public static unsafe Vector256<short> BroadcastVector128ToVector256(short* address) => BroadcastVector128ToVector256(address);
/// <summary>
- /// __m256i _mm256_broadcastsi128_si256 (__m128i a); VBROADCASTI128 xmm, m16
+ /// __m256i _mm256_broadcastsi128_si256 (__m128i a);
+ /// VBROADCASTI128 xmm, m16
/// </summary>
public static unsafe Vector256<ushort> BroadcastVector128ToVector256(ushort* address) => BroadcastVector128ToVector256(address);
/// <summary>
- /// __m256i _mm256_broadcastsi128_si256 (__m128i a); VBROADCASTI128 xmm, m32
+ /// __m256i _mm256_broadcastsi128_si256 (__m128i a);
+ /// VBROADCASTI128 xmm, m32
/// </summary>
public static unsafe Vector256<int> BroadcastVector128ToVector256(int* address) => BroadcastVector128ToVector256(address);
/// <summary>
- /// __m256i _mm256_broadcastsi128_si256 (__m128i a); VBROADCASTI128 xmm, m32
+ /// __m256i _mm256_broadcastsi128_si256 (__m128i a);
+ /// VBROADCASTI128 xmm, m32
/// </summary>
public static unsafe Vector256<uint> BroadcastVector128ToVector256(uint* address) => BroadcastVector128ToVector256(address);
/// <summary>
- /// __m256i _mm256_broadcastsi128_si256 (__m128i a); VBROADCASTI128 xmm, m64
+ /// __m256i _mm256_broadcastsi128_si256 (__m128i a);
+ /// VBROADCASTI128 xmm, m64
/// </summary>
public static unsafe Vector256<long> BroadcastVector128ToVector256(long* address) => BroadcastVector128ToVector256(address);
/// <summary>
- /// __m256i _mm256_broadcastsi128_si256 (__m128i a); VBROADCASTI128 xmm, m64
+ /// __m256i _mm256_broadcastsi128_si256 (__m128i a);
+ /// VBROADCASTI128 xmm, m64
/// </summary>
public static unsafe Vector256<ulong> BroadcastVector128ToVector256(ulong* address) => BroadcastVector128ToVector256(address);
/// <summary>
- /// __m256i _mm256_cmpeq_epi8 (__m256i a, __m256i b); VPCMPEQB ymm, ymm, ymm/m256
+ /// __m256i _mm256_cmpeq_epi8 (__m256i a, __m256i b);
+ /// VPCMPEQB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> CompareEqual(Vector256<sbyte> left, Vector256<sbyte> right) => CompareEqual(left, right);
/// <summary>
- /// __m256i _mm256_cmpeq_epi8 (__m256i a, __m256i b); VPCMPEQB ymm, ymm, ymm/m256
+ /// __m256i _mm256_cmpeq_epi8 (__m256i a, __m256i b);
+ /// VPCMPEQB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> CompareEqual(Vector256<byte> left, Vector256<byte> right) => CompareEqual(left, right);
/// <summary>
- /// __m256i _mm256_cmpeq_epi16 (__m256i a, __m256i b); VPCMPEQW ymm, ymm, ymm/m256
+ /// __m256i _mm256_cmpeq_epi16 (__m256i a, __m256i b);
+ /// VPCMPEQW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> CompareEqual(Vector256<short> left, Vector256<short> right) => CompareEqual(left, right);
/// <summary>
- /// __m256i _mm256_cmpeq_epi16 (__m256i a, __m256i b); VPCMPEQW ymm, ymm, ymm/m256
+ /// __m256i _mm256_cmpeq_epi16 (__m256i a, __m256i b);
+ /// VPCMPEQW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> CompareEqual(Vector256<ushort> left, Vector256<ushort> right) => CompareEqual(left, right);
/// <summary>
- /// __m256i _mm256_cmpeq_epi32 (__m256i a, __m256i b); VPCMPEQD ymm, ymm, ymm/m256
+ /// __m256i _mm256_cmpeq_epi32 (__m256i a, __m256i b);
+ /// VPCMPEQD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> CompareEqual(Vector256<int> left, Vector256<int> right) => CompareEqual(left, right);
/// <summary>
- /// __m256i _mm256_cmpeq_epi32 (__m256i a, __m256i b); VPCMPEQD ymm, ymm, ymm/m256
+ /// __m256i _mm256_cmpeq_epi32 (__m256i a, __m256i b);
+ /// VPCMPEQD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<uint> CompareEqual(Vector256<uint> left, Vector256<uint> right) => CompareEqual(left, right);
/// <summary>
- /// __m256i _mm256_cmpeq_epi64 (__m256i a, __m256i b); VPCMPEQQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_cmpeq_epi64 (__m256i a, __m256i b);
+ /// VPCMPEQQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<long> CompareEqual(Vector256<long> left, Vector256<long> right) => CompareEqual(left, right);
/// <summary>
- /// __m256i _mm256_cmpeq_epi64 (__m256i a, __m256i b); VPCMPEQQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_cmpeq_epi64 (__m256i a, __m256i b);
+ /// VPCMPEQQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ulong> CompareEqual(Vector256<ulong> left, Vector256<ulong> right) => CompareEqual(left, right);
/// <summary>
- /// __m256i _mm256_cmpgt_epi8 (__m256i a, __m256i b); VPCMPGTB ymm, ymm, ymm/m256
+ /// __m256i _mm256_cmpgt_epi8 (__m256i a, __m256i b);
+ /// VPCMPGTB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> CompareGreaterThan(Vector256<sbyte> left, Vector256<sbyte> right) => CompareGreaterThan(left, right);
/// <summary>
- /// __m256i _mm256_cmpgt_epi16 (__m256i a, __m256i b); VPCMPGTW ymm, ymm, ymm/m256
+ /// __m256i _mm256_cmpgt_epi16 (__m256i a, __m256i b);
+ /// VPCMPGTW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> CompareGreaterThan(Vector256<short> left, Vector256<short> right) => CompareGreaterThan(left, right);
/// <summary>
- /// __m256i _mm256_cmpgt_epi32 (__m256i a, __m256i b); VPCMPGTD ymm, ymm, ymm/m256
+ /// __m256i _mm256_cmpgt_epi32 (__m256i a, __m256i b);
+ /// VPCMPGTD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> CompareGreaterThan(Vector256<int> left, Vector256<int> right) => CompareGreaterThan(left, right);
/// <summary>
- /// __m256i _mm256_cmpgt_epi64 (__m256i a, __m256i b); VPCMPGTQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_cmpgt_epi64 (__m256i a, __m256i b);
+ /// VPCMPGTQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<long> CompareGreaterThan(Vector256<long> left, Vector256<long> right) => CompareGreaterThan(left, right);
/// <summary>
- /// double _mm256_cvtsd_f64 (__m256d a); HELPER: MOVSD
+ /// double _mm256_cvtsd_f64 (__m256d a);
+ /// HELPER: MOVSD
/// </summary>
public static double ConvertToDouble(Vector256<double> value) => ConvertToDouble(value);
/// <summary>
- /// int _mm256_cvtsi256_si32 (__m256i a); MOVD reg/m32, xmm
+ /// int _mm256_cvtsi256_si32 (__m256i a);
+ /// MOVD reg/m32, xmm
/// </summary>
public static int ConvertToInt32(Vector256<int> value) => ConvertToInt32(value);
/// <summary>
- /// int _mm256_cvtsi256_si32 (__m256i a); MOVD reg/m32, xmm
+ /// int _mm256_cvtsi256_si32 (__m256i a);
+ /// MOVD reg/m32, xmm
/// </summary>
public static uint ConvertToUInt32(Vector256<uint> value) => ConvertToUInt32(value);
/// <summary>
- /// __m256i _mm256_cvtepi8_epi16 (__m128i a); VPMOVSXBW ymm, xmm/m128
+ /// __m256i _mm256_cvtepi8_epi16 (__m128i a);
+ /// VPMOVSXBW ymm, xmm/m128
/// </summary>
public static Vector256<short> ConvertToVector256Int16(Vector128<sbyte> value) => ConvertToVector256Int16(value);
/// <summary>
- /// __m256i _mm256_cvtepu8_epi16 (__m128i a); VPMOVZXBW ymm, xmm/m128
+ /// __m256i _mm256_cvtepu8_epi16 (__m128i a);
+ /// VPMOVZXBW ymm, xmm/m128
/// </summary>
public static Vector256<ushort> ConvertToVector256UInt16(Vector128<byte> value) => ConvertToVector256UInt16(value);
/// <summary>
- /// __m256i _mm256_cvtepi8_epi32 (__m128i a); VPMOVSXBD ymm, xmm/m128
+ /// __m256i _mm256_cvtepi8_epi32 (__m128i a);
+ /// VPMOVSXBD ymm, xmm/m128
/// </summary>
public static Vector256<int> ConvertToVector256Int32(Vector128<sbyte> value) => ConvertToVector256Int32(value);
/// <summary>
- /// __m256i _mm256_cvtepi16_epi32 (__m128i a); VPMOVSXWD ymm, xmm/m128
+ /// __m256i _mm256_cvtepi16_epi32 (__m128i a);
+ /// VPMOVSXWD ymm, xmm/m128
/// </summary>
public static Vector256<int> ConvertToVector256Int32(Vector128<short> value) => ConvertToVector256Int32(value);
/// <summary>
- /// __m256i _mm256_cvtepu8_epi32 (__m128i a); VPMOVZXBD ymm, xmm/m128
+ /// __m256i _mm256_cvtepu8_epi32 (__m128i a);
+ /// VPMOVZXBD ymm, xmm/m128
/// </summary>
public static Vector256<uint> ConvertToVector256UInt32(Vector128<byte> value) => ConvertToVector256UInt32(value);
/// <summary>
- /// __m256i _mm256_cvtepu16_epi32 (__m128i a); VPMOVZXWD ymm, xmm/m128
+ /// __m256i _mm256_cvtepu16_epi32 (__m128i a);
+ /// VPMOVZXWD ymm, xmm/m128
/// </summary>
public static Vector256<uint> ConvertToVector256UInt32(Vector128<ushort> value) => ConvertToVector256UInt32(value);
/// <summary>
- /// __m256i _mm256_cvtepi8_epi64 (__m128i a); VPMOVSXBQ ymm, xmm/m128
+ /// __m256i _mm256_cvtepi8_epi64 (__m128i a);
+ /// VPMOVSXBQ ymm, xmm/m128
/// </summary>
public static Vector256<long> ConvertToVector256Int64(Vector128<sbyte> value) => ConvertToVector256Int64(value);
/// <summary>
- /// __m256i _mm256_cvtepi16_epi64 (__m128i a); VPMOVSXWQ ymm, xmm/m128
+ /// __m256i _mm256_cvtepi16_epi64 (__m128i a);
+ /// VPMOVSXWQ ymm, xmm/m128
/// </summary>
public static Vector256<long> ConvertToVector256Int64(Vector128<short> value) => ConvertToVector256Int64(value);
/// <summary>
- /// __m256i _mm256_cvtepi32_epi64 (__m128i a); VPMOVSXDQ ymm, xmm/m128
+ /// __m256i _mm256_cvtepi32_epi64 (__m128i a);
+ /// VPMOVSXDQ ymm, xmm/m128
/// </summary>
public static Vector256<long> ConvertToVector256Int64(Vector128<int> value) => ConvertToVector256Int64(value);
/// <summary>
- /// __m256i _mm256_cvtepu8_epi64 (__m128i a); VPMOVZXBQ ymm, xmm/m128
+ /// __m256i _mm256_cvtepu8_epi64 (__m128i a);
+ /// VPMOVZXBQ ymm, xmm/m128
/// </summary>
public static Vector256<ulong> ConvertToVector256UInt64(Vector128<byte> value) => ConvertToVector256UInt64(value);
/// <summary>
@@ -361,1153 +448,1424 @@ namespace System.Runtime.Intrinsics.X86
/// </summary>
public static Vector256<ulong> ConvertToVector256UInt64(Vector128<ushort> value) => ConvertToVector256UInt64(value);
/// <summary>
- /// __m256i _mm256_cvtepu32_epi64 (__m128i a); VPMOVZXDQ ymm, xmm/m128
+ /// __m256i _mm256_cvtepu32_epi64 (__m128i a);
+ /// VPMOVZXDQ ymm, xmm/m128
/// </summary>
public static Vector256<ulong> ConvertToVector256UInt64(Vector128<uint> value) => ConvertToVector256UInt64(value);
/// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 xmm, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 xmm, ymm, imm8
/// </summary>
public static Vector128<sbyte> ExtractVector128(Vector256<sbyte> value, byte index) => ExtractVector128(value, index);
// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 m128, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(sbyte* address, Vector256<sbyte> value, byte index) => ExtractVector128(address, value, index);
/// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 xmm, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 xmm, ymm, imm8
/// </summary>
public static Vector128<byte> ExtractVector128(Vector256<byte> value, byte index) => ExtractVector128(value, index);
/// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 m128, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(byte* address, Vector256<byte> value, byte index) => ExtractVector128(address, value, index);
/// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 xmm, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 xmm, ymm, imm8
/// </summary>
public static Vector128<short> ExtractVector128(Vector256<short> value, byte index) => ExtractVector128(value, index);
/// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 m128, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(short* address, Vector256<short> value, byte index) => ExtractVector128(address, value, index);
/// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 xmm, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 xmm, ymm, imm8
/// </summary>
public static Vector128<ushort> ExtractVector128(Vector256<ushort> value, byte index) => ExtractVector128(value, index);
/// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 m128, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(ushort* address, Vector256<ushort> value, byte index) => ExtractVector128(address, value, index);
/// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 xmm, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 xmm, ymm, imm8
/// </summary>
public static Vector128<int> ExtractVector128(Vector256<int> value, byte index) => ExtractVector128(value, index);
/// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 m128, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(int* address, Vector256<int> value, byte index) => ExtractVector128(address, value, index);
/// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 xmm, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 xmm, ymm, imm8
/// </summary>
public static Vector128<uint> ExtractVector128(Vector256<uint> value, byte index) => ExtractVector128(value, index);
/// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 m128, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(uint* address, Vector256<uint> value, byte index) => ExtractVector128(address, value, index);
/// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 xmm, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 xmm, ymm, imm8
/// </summary>
public static Vector128<long> ExtractVector128(Vector256<long> value, byte index) => ExtractVector128(value, index);
/// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 m128, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(long* address, Vector256<long> value, byte index) => ExtractVector128(address, value, index);
/// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 xmm, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 xmm, ymm, imm8
/// </summary>
public static Vector128<ulong> ExtractVector128(Vector256<ulong> value, byte index) => ExtractVector128(value, index);
/// <summary>
- /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8); VEXTRACTI128 m128, ymm, imm8
+ /// __m128i _mm256_extracti128_si256 (__m256i a, const int imm8);
+ /// VEXTRACTI128 m128, ymm, imm8
/// </summary>
public static unsafe void ExtractVector128(ulong* address, Vector256<ulong> value, byte index) => ExtractVector128(address, value, index);
/// <summary>
- /// __m128i _mm_i32gather_epi32 (int const* base_addr, __m128i vindex, const int scale); VPGATHERDD xmm, vm32x, xmm
+ /// __m128i _mm_i32gather_epi32 (int const* base_addr, __m128i vindex, const int scale);
+ /// VPGATHERDD xmm, vm32x, xmm
/// </summary>
public static unsafe Vector128<int> GatherVector128(int* baseAddress, Vector128<int> index, byte scale) => GatherVector128(baseAddress, index, scale);
/// <summary>
- /// __m128i _mm_i32gather_epi32 (int const* base_addr, __m128i vindex, const int scale); VPGATHERDD xmm, vm32x, xmm
+ /// __m128i _mm_i32gather_epi32 (int const* base_addr, __m128i vindex, const int scale);
+ /// VPGATHERDD xmm, vm32x, xmm
/// </summary>
public static unsafe Vector128<uint> GatherVector128(uint* baseAddress, Vector128<int> index, byte scale) => GatherVector128(baseAddress, index, scale);
/// <summary>
- /// __m128i _mm_i32gather_epi64 (__int64 const* base_addr, __m128i vindex, const int scale); VPGATHERDQ xmm, vm32x, xmm
+ /// __m128i _mm_i32gather_epi64 (__int64 const* base_addr, __m128i vindex, const int scale);
+ /// VPGATHERDQ xmm, vm32x, xmm
/// </summary>
public static unsafe Vector128<long> GatherVector128(long* baseAddress, Vector128<int> index, byte scale) => GatherVector128(baseAddress, index, scale);
/// <summary>
- /// __m128i _mm_i32gather_epi64 (__int64 const* base_addr, __m128i vindex, const int scale); VPGATHERDQ xmm, vm32x, xmm
+ /// __m128i _mm_i32gather_epi64 (__int64 const* base_addr, __m128i vindex, const int scale);
+ /// VPGATHERDQ xmm, vm32x, xmm
/// </summary>
public static unsafe Vector128<ulong> GatherVector128(ulong* baseAddress, Vector128<int> index, byte scale) => GatherVector128(baseAddress, index, scale);
/// <summary>
- /// __m128 _mm_i32gather_ps (float const* base_addr, __m128i vindex, const int scale); VGATHERDPS xmm, vm32x, xmm
+ /// __m128 _mm_i32gather_ps (float const* base_addr, __m128i vindex, const int scale);
+ /// VGATHERDPS xmm, vm32x, xmm
/// </summary>
public static unsafe Vector128<float> GatherVector128(float* baseAddress, Vector128<int> index, byte scale) => GatherVector128(baseAddress, index, scale);
/// <summary>
- /// __m128d _mm_i32gather_pd (double const* base_addr, __m128i vindex, const int scale); VGATHERDPD xmm, vm32x, xmm
+ /// __m128d _mm_i32gather_pd (double const* base_addr, __m128i vindex, const int scale);
+ /// VGATHERDPD xmm, vm32x, xmm
/// </summary>
public static unsafe Vector128<double> GatherVector128(double* baseAddress, Vector128<int> index, byte scale) => GatherVector128(baseAddress, index, scale);
/// <summary>
- /// __m128i _mm_i64gather_epi32 (int const* base_addr, __m128i vindex, const int scale); VPGATHERQD xmm, vm64x, xmm
+ /// __m128i _mm_i64gather_epi32 (int const* base_addr, __m128i vindex, const int scale);
+ /// VPGATHERQD xmm, vm64x, xmm
/// </summary>
public static unsafe Vector128<int> GatherVector128(int* baseAddress, Vector128<long> index, byte scale) => GatherVector128(baseAddress, index, scale);
/// <summary>
- /// __m128i _mm_i64gather_epi32 (int const* base_addr, __m128i vindex, const int scale); VPGATHERQD xmm, vm64x, xmm
+ /// __m128i _mm_i64gather_epi32 (int const* base_addr, __m128i vindex, const int scale);
+ /// VPGATHERQD xmm, vm64x, xmm
/// </summary>
public static unsafe Vector128<uint> GatherVector128(uint* baseAddress, Vector128<long> index, byte scale) => GatherVector128(baseAddress, index, scale);
/// <summary>
- /// __m128i _mm_i64gather_epi64 (__int64 const* base_addr, __m128i vindex, const int scale); VPGATHERQQ xmm, vm64x, xmm
+ /// __m128i _mm_i64gather_epi64 (__int64 const* base_addr, __m128i vindex, const int scale);
+ /// VPGATHERQQ xmm, vm64x, xmm
/// </summary>
public static unsafe Vector128<long> GatherVector128(long* baseAddress, Vector128<long> index, byte scale) => GatherVector128(baseAddress, index, scale);
/// <summary>
- /// __m128i _mm_i64gather_epi64 (__int64 const* base_addr, __m128i vindex, const int scale); VPGATHERQQ xmm, vm64x, xmm
+ /// __m128i _mm_i64gather_epi64 (__int64 const* base_addr, __m128i vindex, const int scale);
+ /// VPGATHERQQ xmm, vm64x, xmm
/// </summary>
public static unsafe Vector128<ulong> GatherVector128(ulong* baseAddress, Vector128<long> index, byte scale) => GatherVector128(baseAddress, index, scale);
/// <summary>
- /// __m128 _mm_i64gather_ps (float const* base_addr, __m128i vindex, const int scale); VGATHERQPS xmm, vm64x, xmm
+ /// __m128 _mm_i64gather_ps (float const* base_addr, __m128i vindex, const int scale);
+ /// VGATHERQPS xmm, vm64x, xmm
/// </summary>
public static unsafe Vector128<float> GatherVector128(float* baseAddress, Vector128<long> index, byte scale) => GatherVector128(baseAddress, index, scale);
/// <summary>
- /// __m128d _mm_i64gather_pd (double const* base_addr, __m128i vindex, const int scale); VGATHERQPD xmm, vm64x, xmm
+ /// __m128d _mm_i64gather_pd (double const* base_addr, __m128i vindex, const int scale);
+ /// VGATHERQPD xmm, vm64x, xmm
/// </summary>
public static unsafe Vector128<double> GatherVector128(double* baseAddress, Vector128<long> index, byte scale) => GatherVector128(baseAddress, index, scale);
/// <summary>
- /// __m256i _mm256_i32gather_epi32 (int const* base_addr, __m256i vindex, const int scale); VPGATHERDD ymm, vm32y, ymm
+ /// __m256i _mm256_i32gather_epi32 (int const* base_addr, __m256i vindex, const int scale);
+ /// VPGATHERDD ymm, vm32y, ymm
/// </summary>
public static unsafe Vector256<int> GatherVector256(int* baseAddress, Vector256<int> index, byte scale) => GatherVector256(baseAddress, index, scale);
/// <summary>
- /// __m256i _mm256_i32gather_epi32 (int const* base_addr, __m256i vindex, const int scale); VPGATHERDD ymm, vm32y, ymm
+ /// __m256i _mm256_i32gather_epi32 (int const* base_addr, __m256i vindex, const int scale);
+ /// VPGATHERDD ymm, vm32y, ymm
/// </summary>
public static unsafe Vector256<uint> GatherVector256(uint* baseAddress, Vector256<int> index, byte scale) => GatherVector256(baseAddress, index, scale);
/// <summary>
- /// __m256i _mm256_i32gather_epi64 (__int64 const* base_addr, __m128i vindex, const int scale); VPGATHERDQ ymm, vm32y, ymm
+ /// __m256i _mm256_i32gather_epi64 (__int64 const* base_addr, __m128i vindex, const int scale);
+ /// VPGATHERDQ ymm, vm32y, ymm
/// </summary>
public static unsafe Vector256<long> GatherVector256(long* baseAddress, Vector128<int> index, byte scale) => GatherVector256(baseAddress, index, scale);
/// <summary>
- /// __m256i _mm256_i32gather_epi64 (__int64 const* base_addr, __m128i vindex, const int scale); VPGATHERDQ ymm, vm32y, ymm
+ /// __m256i _mm256_i32gather_epi64 (__int64 const* base_addr, __m128i vindex, const int scale);
+ /// VPGATHERDQ ymm, vm32y, ymm
/// </summary>
public static unsafe Vector256<ulong> GatherVector256(ulong* baseAddress, Vector128<int> index, byte scale) => GatherVector256(baseAddress, index, scale);
/// <summary>
- /// __m256 _mm256_i32gather_ps (float const* base_addr, __m256i vindex, const int scale); VGATHERDPS ymm, vm32y, ymm
+ /// __m256 _mm256_i32gather_ps (float const* base_addr, __m256i vindex, const int scale);
+ /// VGATHERDPS ymm, vm32y, ymm
/// </summary>
public static unsafe Vector256<float> GatherVector256(float* baseAddress, Vector256<int> index, byte scale) => GatherVector256(baseAddress, index, scale);
/// <summary>
- /// __m256d _mm256_i32gather_pd (double const* base_addr, __m128i vindex, const int scale); VGATHERDPD ymm, vm32y, ymm
+ /// __m256d _mm256_i32gather_pd (double const* base_addr, __m128i vindex, const int scale);
+ /// VGATHERDPD ymm, vm32y, ymm
/// </summary>
public static unsafe Vector256<double> GatherVector256(double* baseAddress, Vector128<int> index, byte scale) => GatherVector256(baseAddress, index, scale);
/// <summary>
- /// __m128i _mm256_i64gather_epi32 (int const* base_addr, __m256i vindex, const int scale); VPGATHERQD ymm, vm64y, ymm
+ /// __m128i _mm256_i64gather_epi32 (int const* base_addr, __m256i vindex, const int scale);
+ /// VPGATHERQD ymm, vm64y, ymm
/// </summary>
public static unsafe Vector128<int> GatherVector128(int* baseAddress, Vector256<long> index, byte scale) => GatherVector128(baseAddress, index, scale);
/// <summary>
- /// __m128i _mm256_i64gather_epi32 (int const* base_addr, __m256i vindex, const int scale); VPGATHERQD ymm, vm64y, ymm
+ /// __m128i _mm256_i64gather_epi32 (int const* base_addr, __m256i vindex, const int scale);
+ /// VPGATHERQD ymm, vm64y, ymm
/// </summary>
public static unsafe Vector128<uint> GatherVector128(uint* baseAddress, Vector256<long> index, byte scale) => GatherVector128(baseAddress, index, scale);
/// <summary>
- /// __m256i _mm256_i64gather_epi64 (__int64 const* base_addr, __m256i vindex, const int scale); VPGATHERQQ ymm, vm64y, ymm
+ /// __m256i _mm256_i64gather_epi64 (__int64 const* base_addr, __m256i vindex, const int scale);
+ /// VPGATHERQQ ymm, vm64y, ymm
/// </summary>
public static unsafe Vector256<long> GatherVector256(long* baseAddress, Vector256<long> index, byte scale) => GatherVector256(baseAddress, index, scale);
/// <summary>
- /// __m256i _mm256_i64gather_epi64 (__int64 const* base_addr, __m256i vindex, const int scale); VPGATHERQQ ymm, vm64y, ymm
+ /// __m256i _mm256_i64gather_epi64 (__int64 const* base_addr, __m256i vindex, const int scale);
+ /// VPGATHERQQ ymm, vm64y, ymm
/// </summary>
public static unsafe Vector256<ulong> GatherVector256(ulong* baseAddress, Vector256<long> index, byte scale) => GatherVector256(baseAddress, index, scale);
/// <summary>
- /// __m128 _mm256_i64gather_ps (float const* base_addr, __m256i vindex, const int scale); VGATHERQPS ymm, vm64y, ymm
+ /// __m128 _mm256_i64gather_ps (float const* base_addr, __m256i vindex, const int scale);
+ /// VGATHERQPS ymm, vm64y, ymm
/// </summary>
public static unsafe Vector128<float> GatherVector128(float* baseAddress, Vector256<long> index, byte scale) => GatherVector128(baseAddress, index, scale);
/// <summary>
- /// __m256d _mm256_i64gather_pd (double const* base_addr, __m256i vindex, const int scale); VGATHERQPD ymm, vm64y, ymm
+ /// __m256d _mm256_i64gather_pd (double const* base_addr, __m256i vindex, const int scale);
+ /// VGATHERQPD ymm, vm64y, ymm
/// </summary>
public static unsafe Vector256<double> GatherVector256(double* baseAddress, Vector256<long> index, byte scale) => GatherVector256(baseAddress, index, scale);
/// <summary>
- /// __m128i _mm_mask_i32gather_epi32 (__m128i src, int const* base_addr, __m128i vindex, __m128i mask, const int scale); VPGATHERDD xmm, vm32x, xmm
+ /// __m128i _mm_mask_i32gather_epi32 (__m128i src, int const* base_addr, __m128i vindex, __m128i mask, const int scale);
+ /// VPGATHERDD xmm, vm32x, xmm
/// </summary>
public static unsafe Vector128<int> GatherMaskVector128(Vector128<int> source, int* baseAddress, Vector128<int> index, Vector128<int> mask, byte scale) => GatherMaskVector128(source, baseAddress, index, mask, scale);
/// <summary>
- /// __m128i _mm_mask_i32gather_epi32 (__m128i src, int const* base_addr, __m128i vindex, __m128i mask, const int scale); VPGATHERDD xmm, vm32x, xmm
+ /// __m128i _mm_mask_i32gather_epi32 (__m128i src, int const* base_addr, __m128i vindex, __m128i mask, const int scale);
+ /// VPGATHERDD xmm, vm32x, xmm
/// </summary>
public static unsafe Vector128<uint> GatherMaskVector128(Vector128<uint> source, uint* baseAddress, Vector128<int> index, Vector128<uint> mask, byte scale) => GatherMaskVector128(source, baseAddress, index, mask, scale);
/// <summary>
- /// __m128i _mm_mask_i32gather_epi64 (__m128i src, __int64 const* base_addr, __m128i vindex, __m128i mask, const int scale); VPGATHERDQ xmm, vm32x, xmm
+ /// __m128i _mm_mask_i32gather_epi64 (__m128i src, __int64 const* base_addr, __m128i vindex, __m128i mask, const int scale);
+ /// VPGATHERDQ xmm, vm32x, xmm
/// </summary>
public static unsafe Vector128<long> GatherMaskVector128(Vector128<long> source, long* baseAddress, Vector128<int> index, Vector128<long> mask, byte scale) => GatherMaskVector128(source, baseAddress, index, mask, scale);
/// <summary>
- /// __m128i _mm_mask_i32gather_epi64 (__m128i src, __int64 const* base_addr, __m128i vindex, __m128i mask, const int scale); VPGATHERDQ xmm, vm32x, xmm
+ /// __m128i _mm_mask_i32gather_epi64 (__m128i src, __int64 const* base_addr, __m128i vindex, __m128i mask, const int scale);
+ /// VPGATHERDQ xmm, vm32x, xmm
/// </summary>
public static unsafe Vector128<ulong> GatherMaskVector128(Vector128<ulong> source, ulong* baseAddress, Vector128<int> index, Vector128<ulong> mask, byte scale) => GatherMaskVector128(source, baseAddress, index, mask, scale);
/// <summary>
- /// __m128 _mm_mask_i32gather_ps (__m128 src, float const* base_addr, __m128i vindex, __m128 mask, const int scale); VGATHERDPS xmm, vm32x, xmm
+ /// __m128 _mm_mask_i32gather_ps (__m128 src, float const* base_addr, __m128i vindex, __m128 mask, const int scale);
+ /// VGATHERDPS xmm, vm32x, xmm
/// </summary>
public static unsafe Vector128<float> GatherMaskVector128(Vector128<float> source, float* baseAddress, Vector128<int> index, Vector128<float> mask, byte scale) => GatherMaskVector128(source, baseAddress, index, mask, scale);
/// <summary>
- /// __m128d _mm_mask_i32gather_pd (__m128d src, double const* base_addr, __m128i vindex, __m128d mask, const int scale); VGATHERDPD xmm, vm32x, xmm
+ /// __m128d _mm_mask_i32gather_pd (__m128d src, double const* base_addr, __m128i vindex, __m128d mask, const int scale);
+ /// VGATHERDPD xmm, vm32x, xmm
/// </summary>
public static unsafe Vector128<double> GatherMaskVector128(Vector128<double> source, double* baseAddress, Vector128<int> index, Vector128<double> mask, byte scale) => GatherMaskVector128(source, baseAddress, index, mask, scale);
/// <summary>
- /// __m128i _mm_mask_i64gather_epi32 (__m128i src, int const* base_addr, __m128i vindex, __m128i mask, const int scale); VPGATHERQD xmm, vm64x, xmm
+ /// __m128i _mm_mask_i64gather_epi32 (__m128i src, int const* base_addr, __m128i vindex, __m128i mask, const int scale);
+ /// VPGATHERQD xmm, vm64x, xmm
/// </summary>
public static unsafe Vector128<int> GatherMaskVector128(Vector128<int> source, int* baseAddress, Vector128<long> index, Vector128<int> mask, byte scale) => GatherMaskVector128(source, baseAddress, index, mask, scale);
/// <summary>
- /// __m128i _mm_mask_i64gather_epi32 (__m128i src, int const* base_addr, __m128i vindex, __m128i mask, const int scale); VPGATHERQD xmm, vm64x, xmm
+ /// __m128i _mm_mask_i64gather_epi32 (__m128i src, int const* base_addr, __m128i vindex, __m128i mask, const int scale);
+ /// VPGATHERQD xmm, vm64x, xmm
/// </summary>
public static unsafe Vector128<uint> GatherMaskVector128(Vector128<uint> source, uint* baseAddress, Vector128<long> index, Vector128<uint> mask, byte scale) => GatherMaskVector128(source, baseAddress, index, mask, scale);
/// <summary>
- /// __m128i _mm_mask_i64gather_epi64 (__m128i src, __int64 const* base_addr, __m128i vindex, __m128i mask, const int scale); VPGATHERQQ xmm, vm64x, xmm
+ /// __m128i _mm_mask_i64gather_epi64 (__m128i src, __int64 const* base_addr, __m128i vindex, __m128i mask, const int scale);
+ /// VPGATHERQQ xmm, vm64x, xmm
/// </summary>
public static unsafe Vector128<long> GatherMaskVector128(Vector128<long> source, long* baseAddress, Vector128<long> index, Vector128<long> mask, byte scale) => GatherMaskVector128(source, baseAddress, index, mask, scale);
/// <summary>
- /// __m128i _mm_mask_i64gather_epi64 (__m128i src, __int64 const* base_addr, __m128i vindex, __m128i mask, const int scale); VPGATHERQQ xmm, vm64x, xmm
+ /// __m128i _mm_mask_i64gather_epi64 (__m128i src, __int64 const* base_addr, __m128i vindex, __m128i mask, const int scale);
+ /// VPGATHERQQ xmm, vm64x, xmm
/// </summary>
public static unsafe Vector128<ulong> GatherMaskVector128(Vector128<ulong> source, ulong* baseAddress, Vector128<long> index, Vector128<ulong> mask, byte scale) => GatherMaskVector128(source, baseAddress, index, mask, scale);
/// <summary>
- /// __m128 _mm_mask_i64gather_ps (__m128 src, float const* base_addr, __m128i vindex, __m128 mask, const int scale); VPGATHERQPS xmm, vm64x, xmm
+ /// __m128 _mm_mask_i64gather_ps (__m128 src, float const* base_addr, __m128i vindex, __m128 mask, const int scale);
+ /// VPGATHERQPS xmm, vm64x, xmm
/// </summary>
public static unsafe Vector128<float> GatherMaskVector128(Vector128<float> source, float* baseAddress, Vector128<long> index, Vector128<float> mask, byte scale) => GatherMaskVector128(source, baseAddress, index, mask, scale);
/// <summary>
- /// __m128d _mm_mask_i64gather_pd (__m128d src, double const* base_addr, __m128i vindex, __m128d mask, const int scale); VPGATHERQPD xmm, vm64x, xmm
+ /// __m128d _mm_mask_i64gather_pd (__m128d src, double const* base_addr, __m128i vindex, __m128d mask, const int scale);
+ /// VPGATHERQPD xmm, vm64x, xmm
/// </summary>
public static unsafe Vector128<double> GatherMaskVector128(Vector128<double> source, double* baseAddress, Vector128<long> index, Vector128<double> mask, byte scale) => GatherMaskVector128(source, baseAddress, index, mask, scale);
/// <summary>
- /// __m256i _mm256_mask_i32gather_epi32 (__m256i src, int const* base_addr, __m256i vindex, __m256i mask, const int scale); VPGATHERDD ymm, vm32y, ymm
+ /// __m256i _mm256_mask_i32gather_epi32 (__m256i src, int const* base_addr, __m256i vindex, __m256i mask, const int scale);
+ /// VPGATHERDD ymm, vm32y, ymm
/// </summary>
public static unsafe Vector256<int> GatherMaskVector256(Vector256<int> source, int* baseAddress, Vector256<int> index, Vector256<int> mask, byte scale) => GatherMaskVector256(source, baseAddress, index, mask, scale);
/// <summary>
- /// __m256i _mm256_mask_i32gather_epi32 (__m256i src, int const* base_addr, __m256i vindex, __m256i mask, const int scale); VPGATHERDD ymm, vm32y, ymm
+ /// __m256i _mm256_mask_i32gather_epi32 (__m256i src, int const* base_addr, __m256i vindex, __m256i mask, const int scale);
+ /// VPGATHERDD ymm, vm32y, ymm
/// </summary>
public static unsafe Vector256<uint> GatherMaskVector256(Vector256<uint> source, uint* baseAddress, Vector256<int> index, Vector256<uint> mask, byte scale) => GatherMaskVector256(source, baseAddress, index, mask, scale);
/// <summary>
- /// __m256i _mm256_mask_i32gather_epi64 (__m256i src, __int64 const* base_addr, __m128i vindex, __m256i mask, const int scale); VPGATHERDQ ymm, vm32y, ymm
+ /// __m256i _mm256_mask_i32gather_epi64 (__m256i src, __int64 const* base_addr, __m128i vindex, __m256i mask, const int scale);
+ /// VPGATHERDQ ymm, vm32y, ymm
/// </summary>
public static unsafe Vector256<long> GatherMaskVector256(Vector256<long> source, long* baseAddress, Vector128<int> index, Vector256<long> mask, byte scale) => GatherMaskVector256(source, baseAddress, index, mask, scale);
/// <summary>
- /// __m256i _mm256_mask_i32gather_epi64 (__m256i src, __int64 const* base_addr, __m128i vindex, __m256i mask, const int scale); VPGATHERDQ ymm, vm32y, ymm
+ /// __m256i _mm256_mask_i32gather_epi64 (__m256i src, __int64 const* base_addr, __m128i vindex, __m256i mask, const int scale);
+ /// VPGATHERDQ ymm, vm32y, ymm
/// </summary>
public static unsafe Vector256<ulong> GatherMaskVector256(Vector256<ulong> source, ulong* baseAddress, Vector128<int> index, Vector256<ulong> mask, byte scale) => GatherMaskVector256(source, baseAddress, index, mask, scale);
/// <summary>
- /// __m256 _mm256_mask_i32gather_ps (__m256 src, float const* base_addr, __m256i vindex, __m256 mask, const int scale); VPGATHERDPS ymm, vm32y, ymm
+ /// __m256 _mm256_mask_i32gather_ps (__m256 src, float const* base_addr, __m256i vindex, __m256 mask, const int scale);
+ /// VPGATHERDPS ymm, vm32y, ymm
/// </summary>
public static unsafe Vector256<float> GatherMaskVector256(Vector256<float> source, float* baseAddress, Vector256<int> index, Vector256<float> mask, byte scale) => GatherMaskVector256(source, baseAddress, index, mask, scale);
/// <summary>
- /// __m256d _mm256_mask_i32gather_pd (__m256d src, double const* base_addr, __m128i vindex, __m256d mask, const int scale); VPGATHERDPD ymm, vm32y, ymm
+ /// __m256d _mm256_mask_i32gather_pd (__m256d src, double const* base_addr, __m128i vindex, __m256d mask, const int scale);
+ /// VPGATHERDPD ymm, vm32y, ymm
/// </summary>
public static unsafe Vector256<double> GatherMaskVector256(Vector256<double> source, double* baseAddress, Vector128<int> index, Vector256<double> mask, byte scale) => GatherMaskVector256(source, baseAddress, index, mask, scale);
/// <summary>
- /// __m128i _mm256_mask_i64gather_epi32 (__m128i src, int const* base_addr, __m256i vindex, __m128i mask, const int scale); VPGATHERQD ymm, vm32y, ymm
+ /// __m128i _mm256_mask_i64gather_epi32 (__m128i src, int const* base_addr, __m256i vindex, __m128i mask, const int scale);
+ /// VPGATHERQD ymm, vm32y, ymm
/// </summary>
public static unsafe Vector128<int> GatherMaskVector128(Vector128<int> source, int* baseAddress, Vector256<long> index, Vector128<int> mask, byte scale) => GatherMaskVector128(source, baseAddress, index, mask, scale);
/// <summary>
- /// __m128i _mm256_mask_i64gather_epi32 (__m128i src, int const* base_addr, __m256i vindex, __m128i mask, const int scale); VPGATHERQD ymm, vm32y, ymm
+ /// __m128i _mm256_mask_i64gather_epi32 (__m128i src, int const* base_addr, __m256i vindex, __m128i mask, const int scale);
+ /// VPGATHERQD ymm, vm32y, ymm
/// </summary>
public static unsafe Vector128<uint> GatherMaskVector128(Vector128<uint> source, uint* baseAddress, Vector256<long> index, Vector128<uint> mask, byte scale) => GatherMaskVector128(source, baseAddress, index, mask, scale);
/// <summary>
- /// __m256i _mm256_mask_i64gather_epi64 (__m256i src, __int64 const* base_addr, __m256i vindex, __m256i mask, const int scale); VPGATHERQQ ymm, vm32y, ymm
+ /// __m256i _mm256_mask_i64gather_epi64 (__m256i src, __int64 const* base_addr, __m256i vindex, __m256i mask, const int scale);
+ /// VPGATHERQQ ymm, vm32y, ymm
/// </summary>
public static unsafe Vector256<long> GatherMaskVector256(Vector256<long> source, long* baseAddress, Vector256<long> index, Vector256<long> mask, byte scale) => GatherMaskVector256(source, baseAddress, index, mask, scale);
/// <summary>
- /// __m256i _mm256_mask_i64gather_epi64 (__m256i src, __int64 const* base_addr, __m256i vindex, __m256i mask, const int scale); VPGATHERQQ ymm, vm32y, ymm
+ /// __m256i _mm256_mask_i64gather_epi64 (__m256i src, __int64 const* base_addr, __m256i vindex, __m256i mask, const int scale);
+ /// VPGATHERQQ ymm, vm32y, ymm
/// </summary>
public static unsafe Vector256<ulong> GatherMaskVector256(Vector256<ulong> source, ulong* baseAddress, Vector256<long> index, Vector256<ulong> mask, byte scale) => GatherMaskVector256(source, baseAddress, index, mask, scale);
/// <summary>
- /// __m128 _mm256_mask_i64gather_ps (__m128 src, float const* base_addr, __m256i vindex, __m128 mask, const int scale); VPGATHERQPS ymm, vm32y, ymm
+ /// __m128 _mm256_mask_i64gather_ps (__m128 src, float const* base_addr, __m256i vindex, __m128 mask, const int scale);
+ /// VPGATHERQPS ymm, vm32y, ymm
/// </summary>
public static unsafe Vector128<float> GatherMaskVector128(Vector128<float> source, float* baseAddress, Vector256<long> index, Vector128<float> mask, byte scale) => GatherMaskVector128(source, baseAddress, index, mask, scale);
/// <summary>
- /// __m256d _mm256_mask_i64gather_pd (__m256d src, double const* base_addr, __m256i vindex, __m256d mask, const int scale); VPGATHERQPD ymm, vm32y, ymm
+ /// __m256d _mm256_mask_i64gather_pd (__m256d src, double const* base_addr, __m256i vindex, __m256d mask, const int scale);
+ /// VPGATHERQPD ymm, vm32y, ymm
/// </summary>
public static unsafe Vector256<double> GatherMaskVector256(Vector256<double> source, double* baseAddress, Vector256<long> index, Vector256<double> mask, byte scale) => GatherMaskVector256(source, baseAddress, index, mask, scale);
/// <summary>
- /// __m256i _mm256_hadd_epi16 (__m256i a, __m256i b); VPHADDW ymm, ymm, ymm/m256
+ /// __m256i _mm256_hadd_epi16 (__m256i a, __m256i b);
+ /// VPHADDW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> HorizontalAdd(Vector256<short> left, Vector256<short> right) => HorizontalAdd(left, right);
/// <summary>
- /// __m256i _mm256_hadd_epi32 (__m256i a, __m256i b); VPHADDD ymm, ymm, ymm/m256
+ /// __m256i _mm256_hadd_epi32 (__m256i a, __m256i b);
+ /// VPHADDD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> HorizontalAdd(Vector256<int> left, Vector256<int> right) => HorizontalAdd(left, right);
/// <summary>
- /// __m256i _mm256_hadds_epi16 (__m256i a, __m256i b); VPHADDSW ymm, ymm, ymm/m256
+ /// __m256i _mm256_hadds_epi16 (__m256i a, __m256i b);
+ /// VPHADDSW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> HorizontalAddSaturate(Vector256<short> left, Vector256<short> right) => HorizontalAddSaturate(left, right);
/// <summary>
- /// __m256i _mm256_hsub_epi16 (__m256i a, __m256i b); VPHSUBW ymm, ymm, ymm/m256
+ /// __m256i _mm256_hsub_epi16 (__m256i a, __m256i b);
+ /// VPHSUBW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> HorizontalSubtract(Vector256<short> left, Vector256<short> right) => HorizontalSubtract(left, right);
/// <summary>
- /// __m256i _mm256_hsub_epi32 (__m256i a, __m256i b); VPHSUBD ymm, ymm, ymm/m256
+ /// __m256i _mm256_hsub_epi32 (__m256i a, __m256i b);
+ /// VPHSUBD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> HorizontalSubtract(Vector256<int> left, Vector256<int> right) => HorizontalSubtract(left, right);
/// <summary>
- /// __m256i _mm256_hsubs_epi16 (__m256i a, __m256i b); VPHSUBSW ymm, ymm, ymm/m256
+ /// __m256i _mm256_hsubs_epi16 (__m256i a, __m256i b);
+ /// VPHSUBSW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> HorizontalSubtractSaturate(Vector256<short> left, Vector256<short> right) => HorizontalSubtractSaturate(left, right);
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, xmm, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, xmm, imm8
/// </summary>
public static Vector256<sbyte> Insert(Vector256<sbyte> value, Vector128<sbyte> data, byte index) => Insert(value, data, index);
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, xm128, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, xm128, imm8
/// </summary>
public static unsafe Vector256<sbyte> Insert(Vector256<sbyte> value, sbyte* address, byte index) => Insert(value, address, index);
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, xmm, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, xmm, imm8
/// </summary>
public static Vector256<byte> Insert(Vector256<byte> value, Vector128<byte> data, byte index) => Insert(value, data, index);
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, m128, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<byte> Insert(Vector256<byte> value, byte* address, byte index) => Insert(value, address, index);
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, xmm, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, xmm, imm8
/// </summary>
public static Vector256<short> Insert(Vector256<short> value, Vector128<short> data, byte index) => Insert(value, data, index);
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, m128, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<short> Insert(Vector256<short> value, short* address, byte index) => Insert(value, address, index);
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, xmm, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, xmm, imm8
/// </summary>
public static Vector256<ushort> Insert(Vector256<ushort> value, Vector128<ushort> data, byte index) => Insert(value, data, index);
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, m128, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<ushort> Insert(Vector256<ushort> value, ushort* address, byte index) => Insert(value, address, index);
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, xmm, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, xmm, imm8
/// </summary>
public static Vector256<int> Insert(Vector256<int> value, Vector128<int> data, byte index) => Insert(value, data, index);
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, m128, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<int> Insert(Vector256<int> value, int* address, byte index) => Insert(value, address, index);
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, xmm, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, xmm, imm8
/// </summary>
public static Vector256<uint> Insert(Vector256<uint> value, Vector128<uint> data, byte index) => Insert(value, data, index);
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, m128, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<uint> Insert(Vector256<uint> value, uint* address, byte index) => Insert(value, address, index);
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, xmm, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, xmm, imm8
/// </summary>
public static Vector256<long> Insert(Vector256<long> value, Vector128<long> data, byte index) => Insert(value, data, index);
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, m128, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<long> Insert(Vector256<long> value, long* address, byte index) => Insert(value, address, index);
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, xmm, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, xmm, imm8
/// </summary>
public static Vector256<ulong> Insert(Vector256<ulong> value, Vector128<ulong> data, byte index) => Insert(value, data, index);
/// <summary>
- /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8); VINSERTI128 ymm, ymm, m128, imm8
+ /// __m256i _mm256_inserti128_si256 (__m256i a, __m128i b, const int imm8);
+ /// VINSERTI128 ymm, ymm, m128, imm8
/// </summary>
public static unsafe Vector256<ulong> Insert(Vector256<ulong> value, ulong* address, byte index) => Insert(value, address, index);
/// <summary>
- /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr); VMOVNTDQA ymm, m256
+ /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr);
+ /// VMOVNTDQA ymm, m256
/// </summary>
public static unsafe Vector256<sbyte> LoadAlignedVector256NonTemporal(sbyte* address) => LoadAlignedVector256NonTemporal(address);
/// <summary>
- /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr); VMOVNTDQA ymm, m256
+ /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr);
+ /// VMOVNTDQA ymm, m256
/// </summary>
public static unsafe Vector256<byte> LoadAlignedVector256NonTemporal(byte* address) => LoadAlignedVector256NonTemporal(address);
/// <summary>
- /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr); VMOVNTDQA ymm, m256
+ /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr);
+ /// VMOVNTDQA ymm, m256
/// </summary>
public static unsafe Vector256<short> LoadAlignedVector256NonTemporal(short* address) => LoadAlignedVector256NonTemporal(address);
/// <summary>
- /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr); VMOVNTDQA ymm, m256
+ /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr);
+ /// VMOVNTDQA ymm, m256
/// </summary>
public static unsafe Vector256<ushort> LoadAlignedVector256NonTemporal(ushort* address) => LoadAlignedVector256NonTemporal(address);
/// <summary>
- /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr); VMOVNTDQA ymm, m256
+ /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr);
+ /// VMOVNTDQA ymm, m256
/// </summary>
public static unsafe Vector256<int> LoadAlignedVector256NonTemporal(int* address) => LoadAlignedVector256NonTemporal(address);
/// <summary>
- /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr); VMOVNTDQA ymm, m256
+ /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr);
+ /// VMOVNTDQA ymm, m256
/// </summary>
public static unsafe Vector256<uint> LoadAlignedVector256NonTemporal(uint* address) => LoadAlignedVector256NonTemporal(address);
/// <summary>
- /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr); VMOVNTDQA ymm, m256
+ /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr);
+ /// VMOVNTDQA ymm, m256
/// </summary>
public static unsafe Vector256<long> LoadAlignedVector256NonTemporal(long* address) => LoadAlignedVector256NonTemporal(address);
/// <summary>
- /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr); VMOVNTDQA ymm, m256
+ /// __m256i _mm256_stream_load_si256 (__m256i const* mem_addr);
+ /// VMOVNTDQA ymm, m256
/// </summary>
public static unsafe Vector256<ulong> LoadAlignedVector256NonTemporal(ulong* address) => LoadAlignedVector256NonTemporal(address);
/// <summary>
- /// __m128i _mm_maskload_epi32 (int const* mem_addr, __m128i mask); VPMASKMOVD xmm, xmm, m128
+ /// __m128i _mm_maskload_epi32 (int const* mem_addr, __m128i mask);
+ /// VPMASKMOVD xmm, xmm, m128
/// </summary>
public static unsafe Vector128<int> MaskLoad(int* address, Vector128<int> mask) => MaskLoad(address, mask);
/// <summary>
- /// __m128i _mm_maskload_epi32 (int const* mem_addr, __m128i mask); VPMASKMOVD xmm, xmm, m128
+ /// __m128i _mm_maskload_epi32 (int const* mem_addr, __m128i mask);
+ /// VPMASKMOVD xmm, xmm, m128
/// </summary>
public static unsafe Vector128<uint> MaskLoad(uint* address, Vector128<uint> mask) => MaskLoad(address, mask);
/// <summary>
- /// __m128i _mm_maskload_epi64 (__int64 const* mem_addr, __m128i mask); VPMASKMOVQ xmm, xmm, m128
+ /// __m128i _mm_maskload_epi64 (__int64 const* mem_addr, __m128i mask);
+ /// VPMASKMOVQ xmm, xmm, m128
/// </summary>
public static unsafe Vector128<long> MaskLoad(long* address, Vector128<long> mask) => MaskLoad(address, mask);
/// <summary>
- /// __m128i _mm_maskload_epi64 (__int64 const* mem_addr, __m128i mask); VPMASKMOVQ xmm, xmm, m128
+ /// __m128i _mm_maskload_epi64 (__int64 const* mem_addr, __m128i mask);
+ /// VPMASKMOVQ xmm, xmm, m128
/// </summary>
public static unsafe Vector128<ulong> MaskLoad(ulong* address, Vector128<ulong> mask) => MaskLoad(address, mask);
/// <summary>
- /// __m256i _mm256_maskload_epi32 (int const* mem_addr, __m256i mask); VPMASKMOVD ymm, ymm, m256
+ /// __m256i _mm256_maskload_epi32 (int const* mem_addr, __m256i mask);
+ /// VPMASKMOVD ymm, ymm, m256
/// </summary>
public static unsafe Vector256<int> MaskLoad(int* address, Vector256<int> mask) => MaskLoad(address, mask);
/// <summary>
- /// __m256i _mm256_maskload_epi32 (int const* mem_addr, __m256i mask); VPMASKMOVD ymm, ymm, m256
+ /// __m256i _mm256_maskload_epi32 (int const* mem_addr, __m256i mask);
+ /// VPMASKMOVD ymm, ymm, m256
/// </summary>
public static unsafe Vector256<uint> MaskLoad(uint* address, Vector256<uint> mask) => MaskLoad(address, mask);
/// <summary>
- /// __m256i _mm256_maskload_epi64 (__int64 const* mem_addr, __m256i mask); VPMASKMOVQ ymm, ymm, m256
+ /// __m256i _mm256_maskload_epi64 (__int64 const* mem_addr, __m256i mask);
+ /// VPMASKMOVQ ymm, ymm, m256
/// </summary>
public static unsafe Vector256<long> MaskLoad(long* address, Vector256<long> mask) => MaskLoad(address, mask);
/// <summary>
- /// __m256i _mm256_maskload_epi64 (__int64 const* mem_addr, __m256i mask); VPMASKMOVQ ymm, ymm, m256
+ /// __m256i _mm256_maskload_epi64 (__int64 const* mem_addr, __m256i mask);
+ /// VPMASKMOVQ ymm, ymm, m256
/// </summary>
public static unsafe Vector256<ulong> MaskLoad(ulong* address, Vector256<ulong> mask) => MaskLoad(address, mask);
/// <summary>
- /// void _mm_maskstore_epi32 (int* mem_addr, __m128i mask, __m128i a); VPMASKMOVD m128, xmm, xmm
+ /// void _mm_maskstore_epi32 (int* mem_addr, __m128i mask, __m128i a);
+ /// VPMASKMOVD m128, xmm, xmm
/// </summary>
public static unsafe void MaskStore(int* address, Vector128<int> mask, Vector128<int> source) => MaskStore(address, mask, source);
/// <summary>
- /// void _mm_maskstore_epi32 (int* mem_addr, __m128i mask, __m128i a); VPMASKMOVD m128, xmm, xmm
+ /// void _mm_maskstore_epi32 (int* mem_addr, __m128i mask, __m128i a);
+ /// VPMASKMOVD m128, xmm, xmm
/// </summary>
public static unsafe void MaskStore(uint* address, Vector128<uint> mask, Vector128<uint> source) => MaskStore(address, mask, source);
/// <summary>
- /// void _mm_maskstore_epi64 (__int64* mem_addr, __m128i mask, __m128i a); VPMASKMOVQ m128, xmm, xmm
+ /// void _mm_maskstore_epi64 (__int64* mem_addr, __m128i mask, __m128i a);
+ /// VPMASKMOVQ m128, xmm, xmm
/// </summary>
public static unsafe void MaskStore(long* address, Vector128<long> mask, Vector128<long> source) => MaskStore(address, mask, source);
/// <summary>
- /// void _mm_maskstore_epi64 (__int64* mem_addr, __m128i mask, __m128i a); VPMASKMOVQ m128, xmm, xmm
+ /// void _mm_maskstore_epi64 (__int64* mem_addr, __m128i mask, __m128i a);
+ /// VPMASKMOVQ m128, xmm, xmm
/// </summary>
public static unsafe void MaskStore(ulong* address, Vector128<ulong> mask, Vector128<ulong> source) => MaskStore(address, mask, source);
/// <summary>
- /// void _mm256_maskstore_epi32 (int* mem_addr, __m256i mask, __m256i a); VPMASKMOVD m256, ymm, ymm
+ /// void _mm256_maskstore_epi32 (int* mem_addr, __m256i mask, __m256i a);
+ /// VPMASKMOVD m256, ymm, ymm
/// </summary>
public static unsafe void MaskStore(int* address, Vector256<int> mask, Vector256<int> source) => MaskStore(address, mask, source);
/// <summary>
- /// void _mm256_maskstore_epi32 (int* mem_addr, __m256i mask, __m256i a); VPMASKMOVD m256, ymm, ymm
+ /// void _mm256_maskstore_epi32 (int* mem_addr, __m256i mask, __m256i a);
+ /// VPMASKMOVD m256, ymm, ymm
/// </summary>
public static unsafe void MaskStore(uint* address, Vector256<uint> mask, Vector256<uint> source) => MaskStore(address, mask, source);
/// <summary>
- /// void _mm256_maskstore_epi64 (__int64* mem_addr, __m256i mask, __m256i a); VPMASKMOVQ m256, ymm, ymm
+ /// void _mm256_maskstore_epi64 (__int64* mem_addr, __m256i mask, __m256i a);
+ /// VPMASKMOVQ m256, ymm, ymm
/// </summary>
public static unsafe void MaskStore(long* address, Vector256<long> mask, Vector256<long> source) => MaskStore(address, mask, source);
/// <summary>
- /// void _mm256_maskstore_epi64 (__int64* mem_addr, __m256i mask, __m256i a); VPMASKMOVQ m256, ymm, ymm
+ /// void _mm256_maskstore_epi64 (__int64* mem_addr, __m256i mask, __m256i a);
+ /// VPMASKMOVQ m256, ymm, ymm
/// </summary>
public static unsafe void MaskStore(ulong* address, Vector256<ulong> mask, Vector256<ulong> source) => MaskStore(address, mask, source);
/// <summary>
- /// __m256i _mm256_madd_epi16 (__m256i a, __m256i b); VPMADDWD ymm, ymm, ymm/m256
+ /// __m256i _mm256_madd_epi16 (__m256i a, __m256i b);
+ /// VPMADDWD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> MultiplyAddAdjacent(Vector256<short> left, Vector256<short> right) => MultiplyAddAdjacent(left, right);
/// <summary>
- /// __m256i _mm256_maddubs_epi16 (__m256i a, __m256i b); VPMADDUBSW ymm, ymm, ymm/m256
+ /// __m256i _mm256_maddubs_epi16 (__m256i a, __m256i b);
+ /// VPMADDUBSW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> MultiplyAddAdjacent(Vector256<byte> left, Vector256<sbyte> right) => MultiplyAddAdjacent(left, right);
/// <summary>
- /// __m256i _mm256_max_epi8 (__m256i a, __m256i b); VPMAXSB ymm, ymm, ymm/m256
+ /// __m256i _mm256_max_epi8 (__m256i a, __m256i b);
+ /// VPMAXSB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> Max(Vector256<sbyte> left, Vector256<sbyte> right) => Max(left, right);
/// <summary>
- /// __m256i _mm256_max_epu8 (__m256i a, __m256i b); VPMAXUB ymm, ymm, ymm/m256
+ /// __m256i _mm256_max_epu8 (__m256i a, __m256i b);
+ /// VPMAXUB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> Max(Vector256<byte> left, Vector256<byte> right) => Max(left, right);
/// <summary>
- /// __m256i _mm256_max_epi16 (__m256i a, __m256i b); VPMAXSW ymm, ymm, ymm/m256
+ /// __m256i _mm256_max_epi16 (__m256i a, __m256i b);
+ /// VPMAXSW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> Max(Vector256<short> left, Vector256<short> right) => Max(left, right);
/// <summary>
- /// __m256i _mm256_max_epu16 (__m256i a, __m256i b); VPMAXUW ymm, ymm, ymm/m256
+ /// __m256i _mm256_max_epu16 (__m256i a, __m256i b);
+ /// VPMAXUW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> Max(Vector256<ushort> left, Vector256<ushort> right) => Max(left, right);
/// <summary>
- /// __m256i _mm256_max_epi32 (__m256i a, __m256i b); VPMAXSD ymm, ymm, ymm/m256
+ /// __m256i _mm256_max_epi32 (__m256i a, __m256i b);
+ /// VPMAXSD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> Max(Vector256<int> left, Vector256<int> right) => Max(left, right);
/// <summary>
- /// __m256i _mm256_max_epu32 (__m256i a, __m256i b); VPMAXUD ymm, ymm, ymm/m256
+ /// __m256i _mm256_max_epu32 (__m256i a, __m256i b);
+ /// VPMAXUD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<uint> Max(Vector256<uint> left, Vector256<uint> right) => Max(left, right);
/// <summary>
- /// __m256i _mm256_min_epi8 (__m256i a, __m256i b); VPMINSB ymm, ymm, ymm/m256
+ /// __m256i _mm256_min_epi8 (__m256i a, __m256i b);
+ /// VPMINSB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> Min(Vector256<sbyte> left, Vector256<sbyte> right) => Min(left, right);
/// <summary>
- /// __m256i _mm256_min_epu8 (__m256i a, __m256i b); VPMINUB ymm, ymm, ymm/m256
+ /// __m256i _mm256_min_epu8 (__m256i a, __m256i b);
+ /// VPMINUB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> Min(Vector256<byte> left, Vector256<byte> right) => Min(left, right);
/// <summary>
- /// __m256i _mm256_min_epi16 (__m256i a, __m256i b); VPMINSW ymm, ymm, ymm/m256
+ /// __m256i _mm256_min_epi16 (__m256i a, __m256i b);
+ /// VPMINSW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> Min(Vector256<short> left, Vector256<short> right) => Min(left, right);
/// <summary>
- /// __m256i _mm256_min_epu16 (__m256i a, __m256i b); VPMINUW ymm, ymm, ymm/m256
+ /// __m256i _mm256_min_epu16 (__m256i a, __m256i b);
+ /// VPMINUW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> Min(Vector256<ushort> left, Vector256<ushort> right) => Min(left, right);
/// <summary>
- /// __m256i _mm256_min_epi32 (__m256i a, __m256i b); VPMINSD ymm, ymm, ymm/m256
+ /// __m256i _mm256_min_epi32 (__m256i a, __m256i b);
+ /// VPMINSD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> Min(Vector256<int> left, Vector256<int> right) => Min(left, right);
/// <summary>
- /// __m256i _mm256_min_epu32 (__m256i a, __m256i b); VPMINUD ymm, ymm, ymm/m256
+ /// __m256i _mm256_min_epu32 (__m256i a, __m256i b);
+ /// VPMINUD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<uint> Min(Vector256<uint> left, Vector256<uint> right) => Min(left, right);
/// <summary>
- /// int _mm256_movemask_epi8 (__m256i a); VPMOVMSKB reg, ymm
+ /// int _mm256_movemask_epi8 (__m256i a);
+ /// VPMOVMSKB reg, ymm
/// </summary>
public static int MoveMask(Vector256<sbyte> value) => MoveMask(value);
/// <summary>
- /// int _mm256_movemask_epi8 (__m256i a); VPMOVMSKB reg, ymm
+ /// int _mm256_movemask_epi8 (__m256i a);
+ /// VPMOVMSKB reg, ymm
/// </summary>
public static int MoveMask(Vector256<byte> value) => MoveMask(value);
/// <summary>
- /// __m256i _mm256_mpsadbw_epu8 (__m256i a, __m256i b, const int imm8); VMPSADBW ymm, ymm, ymm/m256, imm8
+ /// __m256i _mm256_mpsadbw_epu8 (__m256i a, __m256i b, const int imm8);
+ /// VMPSADBW ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<ushort> MultipleSumAbsoluteDifferences(Vector256<byte> left, Vector256<byte> right, byte mask) => MultipleSumAbsoluteDifferences(left, right, mask);
/// <summary>
- /// __m256i _mm256_mul_epi32 (__m256i a, __m256i b); VPMULDQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_mul_epi32 (__m256i a, __m256i b);
+ /// VPMULDQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<long> Multiply(Vector256<int> left, Vector256<int> right) => Multiply(left, right);
/// <summary>
- /// __m256i _mm256_mul_epu32 (__m256i a, __m256i b); VPMULUDQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_mul_epu32 (__m256i a, __m256i b);
+ /// VPMULUDQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ulong> Multiply(Vector256<uint> left, Vector256<uint> right) => Multiply(left, right);
/// <summary>
- /// __m256i _mm256_mulhi_epi16 (__m256i a, __m256i b); VPMULHW ymm, ymm, ymm/m256
+ /// __m256i _mm256_mulhi_epi16 (__m256i a, __m256i b);
+ /// VPMULHW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> MultiplyHigh(Vector256<short> left, Vector256<short> right) => MultiplyHigh(left, right);
/// <summary>
- /// __m256i _mm256_mulhi_epu16 (__m256i a, __m256i b); VPMULHUW ymm, ymm, ymm/m256
+ /// __m256i _mm256_mulhi_epu16 (__m256i a, __m256i b);
+ /// VPMULHUW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> MultiplyHigh(Vector256<ushort> left, Vector256<ushort> right) => MultiplyHigh(left, right);
/// <summary>
- /// __m256i _mm256_mulhrs_epi16 (__m256i a, __m256i b); VPMULHRSW ymm, ymm, ymm/m256
+ /// __m256i _mm256_mulhrs_epi16 (__m256i a, __m256i b);
+ /// VPMULHRSW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> MultiplyHighRoundScale(Vector256<short> left, Vector256<short> right) => MultiplyHighRoundScale(left, right);
/// <summary>
- /// __m256i _mm256_mullo_epi16 (__m256i a, __m256i b); VPMULLW ymm, ymm, ymm/m256
+ /// __m256i _mm256_mullo_epi16 (__m256i a, __m256i b);
+ /// VPMULLW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> MultiplyLow(Vector256<short> left, Vector256<short> right) => MultiplyLow(left, right);
/// <summary>
- /// __m256i _mm256_mullo_epi32 (__m256i a, __m256i b); VPMULLD ymm, ymm, ymm/m256
+ /// __m256i _mm256_mullo_epi32 (__m256i a, __m256i b);
+ /// VPMULLD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> MultiplyLow(Vector256<int> left, Vector256<int> right) => MultiplyLow(left, right);
/// <summary>
- /// __m256i _mm256_or_si256 (__m256i a, __m256i b); VPOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_or_si256 (__m256i a, __m256i b);
+ /// VPOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> Or(Vector256<sbyte> left, Vector256<sbyte> right) => Or(left, right);
/// <summary>
- /// __m256i _mm256_or_si256 (__m256i a, __m256i b); VPOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_or_si256 (__m256i a, __m256i b);
+ /// VPOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> Or(Vector256<byte> left, Vector256<byte> right) => Or(left, right);
/// <summary>
- /// __m256i _mm256_or_si256 (__m256i a, __m256i b); VPOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_or_si256 (__m256i a, __m256i b);
+ /// VPOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> Or(Vector256<short> left, Vector256<short> right) => Or(left, right);
/// <summary>
- /// __m256i _mm256_or_si256 (__m256i a, __m256i b); VPOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_or_si256 (__m256i a, __m256i b);
+ /// VPOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> Or(Vector256<ushort> left, Vector256<ushort> right) => Or(left, right);
/// <summary>
- /// __m256i _mm256_or_si256 (__m256i a, __m256i b); VPOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_or_si256 (__m256i a, __m256i b);
+ /// VPOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> Or(Vector256<int> left, Vector256<int> right) => Or(left, right);
/// <summary>
- /// __m256i _mm256_or_si256 (__m256i a, __m256i b); VPOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_or_si256 (__m256i a, __m256i b);
+ /// VPOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<uint> Or(Vector256<uint> left, Vector256<uint> right) => Or(left, right);
/// <summary>
- /// __m256i _mm256_or_si256 (__m256i a, __m256i b); VPOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_or_si256 (__m256i a, __m256i b);
+ /// VPOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<long> Or(Vector256<long> left, Vector256<long> right) => Or(left, right);
/// <summary>
- /// __m256i _mm256_or_si256 (__m256i a, __m256i b); VPOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_or_si256 (__m256i a, __m256i b);
+ /// VPOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ulong> Or(Vector256<ulong> left, Vector256<ulong> right) => Or(left, right);
/// <summary>
- /// __m256i _mm256_packs_epi16 (__m256i a, __m256i b); VPACKSSWB ymm, ymm, ymm/m256
+ /// __m256i _mm256_packs_epi16 (__m256i a, __m256i b);
+ /// VPACKSSWB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> PackSignedSaturate(Vector256<short> left, Vector256<short> right) => PackSignedSaturate(left, right);
/// <summary>
- /// __m256i _mm256_packs_epi32 (__m256i a, __m256i b); VPACKSSDW ymm, ymm, ymm/m256
+ /// __m256i _mm256_packs_epi32 (__m256i a, __m256i b);
+ /// VPACKSSDW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> PackSignedSaturate(Vector256<int> left, Vector256<int> right) => PackSignedSaturate(left, right);
/// <summary>
- /// __m256i _mm256_packus_epi16 (__m256i a, __m256i b); VPACKUSWB ymm, ymm, ymm/m256
+ /// __m256i _mm256_packus_epi16 (__m256i a, __m256i b);
+ /// VPACKUSWB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> PackUnsignedSaturate(Vector256<short> left, Vector256<short> right) => PackUnsignedSaturate(left, right);
/// <summary>
- /// __m256i _mm256_packus_epi32 (__m256i a, __m256i b); VPACKUSDW ymm, ymm, ymm/m256
+ /// __m256i _mm256_packus_epi32 (__m256i a, __m256i b);
+ /// VPACKUSDW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> PackUnsignedSaturate(Vector256<int> left, Vector256<int> right) => PackUnsignedSaturate(left, right);
/// <summary>
- /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8); VPERM2I128 ymm, ymm, ymm/m256, imm8
+ /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8);
+ /// VPERM2I128 ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<sbyte> Permute2x128(Vector256<sbyte> left, Vector256<sbyte> right, byte control) => Permute2x128(left, right, control);
/// <summary>
- /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8); VPERM2I128 ymm, ymm, ymm/m256, imm8
+ /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8);
+ /// VPERM2I128 ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<byte> Permute2x128(Vector256<byte> left, Vector256<byte> right, byte control) => Permute2x128(left, right, control);
/// <summary>
- /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8); VPERM2I128 ymm, ymm, ymm/m256, imm8
+ /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8);
+ /// VPERM2I128 ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<short> Permute2x128(Vector256<short> left, Vector256<short> right, byte control) => Permute2x128(left, right, control);
/// <summary>
- /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8); VPERM2I128 ymm, ymm, ymm/m256, imm8
+ /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8);
+ /// VPERM2I128 ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<ushort> Permute2x128(Vector256<ushort> left, Vector256<ushort> right, byte control) => Permute2x128(left, right, control);
/// <summary>
- /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8); VPERM2I128 ymm, ymm, ymm/m256, imm8
+ /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8);
+ /// VPERM2I128 ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<int> Permute2x128(Vector256<int> left, Vector256<int> right, byte control) => Permute2x128(left, right, control);
/// <summary>
- /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8); VPERM2I128 ymm, ymm, ymm/m256, imm8
+ /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8);
+ /// VPERM2I128 ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<uint> Permute2x128(Vector256<uint> left, Vector256<uint> right, byte control) => Permute2x128(left, right, control);
/// <summary>
- /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8); VPERM2I128 ymm, ymm, ymm/m256, imm8
+ /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8);
+ /// VPERM2I128 ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<long> Permute2x128(Vector256<long> left, Vector256<long> right, byte control) => Permute2x128(left, right, control);
/// <summary>
- /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8); VPERM2I128 ymm, ymm, ymm/m256, imm8
+ /// __m256i _mm256_permute2x128_si256 (__m256i a, __m256i b, const int imm8);
+ /// VPERM2I128 ymm, ymm, ymm/m256, imm8
/// </summary>
public static Vector256<ulong> Permute2x128(Vector256<ulong> left, Vector256<ulong> right, byte control) => Permute2x128(left, right, control);
/// <summary>
- /// __m256i _mm256_permute4x64_epi64 (__m256i a, const int imm8); VPERMQ ymm, ymm/m256, imm8
+ /// __m256i _mm256_permute4x64_epi64 (__m256i a, const int imm8);
+ /// VPERMQ ymm, ymm/m256, imm8
/// </summary>
public static Vector256<long> Permute4x64(Vector256<long> value, byte control) => Permute4x64(value, control);
/// <summary>
- /// __m256i _mm256_permute4x64_epi64 (__m256i a, const int imm8); VPERMQ ymm, ymm/m256, imm8
+ /// __m256i _mm256_permute4x64_epi64 (__m256i a, const int imm8);
+ /// VPERMQ ymm, ymm/m256, imm8
/// </summary>
public static Vector256<ulong> Permute4x64(Vector256<ulong> value, byte control) => Permute4x64(value, control);
/// <summary>
- /// __m256d _mm256_permute4x64_pd (__m256d a, const int imm8); VPERMPD ymm, ymm/m256, imm8
+ /// __m256d _mm256_permute4x64_pd (__m256d a, const int imm8);
+ /// VPERMPD ymm, ymm/m256, imm8
/// </summary>
public static Vector256<double> Permute4x64(Vector256<double> value, byte control) => Permute4x64(value, control);
/// <summary>
- /// __m256i _mm256_permutevar8x32_epi32 (__m256i a, __m256i idx); VPERMD ymm, ymm/m256, imm8
+ /// __m256i _mm256_permutevar8x32_epi32 (__m256i a, __m256i idx);
+ /// VPERMD ymm, ymm/m256, imm8
/// </summary>
public static Vector256<int> PermuteVar8x32(Vector256<int> left, Vector256<int> mask) => PermuteVar8x32(left, mask);
/// <summary>
- /// __m256i _mm256_permutevar8x32_epi32 (__m256i a, __m256i idx); VPERMD ymm, ymm/m256, imm8
+ /// __m256i _mm256_permutevar8x32_epi32 (__m256i a, __m256i idx);
+ /// VPERMD ymm, ymm/m256, imm8
/// </summary>
public static Vector256<uint> PermuteVar8x32(Vector256<uint> left, Vector256<uint> mask) => PermuteVar8x32(left, mask);
/// <summary>
- /// __m256 _mm256_permutevar8x32_ps (__m256 a, __m256i idx); VPERMPS ymm, ymm/m256, imm8
+ /// __m256 _mm256_permutevar8x32_ps (__m256 a, __m256i idx);
+ /// VPERMPS ymm, ymm/m256, imm8
/// </summary>
public static Vector256<float> PermuteVar8x32(Vector256<float> left, Vector256<float> mask) => PermuteVar8x32(left, mask);
/// <summary>
- /// __m256i _mm256_sll_epi16 (__m256i a, __m128i count); VPSLLW ymm, ymm, xmm/m128
+ /// __m256i _mm256_sll_epi16 (__m256i a, __m128i count);
+ /// VPSLLW ymm, ymm, xmm/m128
/// </summary>
public static Vector256<short> ShiftLeftLogical(Vector256<short> value, Vector128<short> count) => ShiftLeftLogical(value, count);
/// <summary>
- /// __m256i _mm256_sll_epi16 (__m256i a, __m128i count); VPSLLW ymm, ymm, xmm/m128
+ /// __m256i _mm256_sll_epi16 (__m256i a, __m128i count);
+ /// VPSLLW ymm, ymm, xmm/m128
/// </summary>
public static Vector256<ushort> ShiftLeftLogical(Vector256<ushort> value, Vector128<ushort> count) => ShiftLeftLogical(value, count);
/// <summary>
- /// __m256i _mm256_sll_epi32 (__m256i a, __m128i count); VPSLLD ymm, ymm, xmm/m128
+ /// __m256i _mm256_sll_epi32 (__m256i a, __m128i count);
+ /// VPSLLD ymm, ymm, xmm/m128
/// </summary>
public static Vector256<int> ShiftLeftLogical(Vector256<int> value, Vector128<int> count) => ShiftLeftLogical(value, count);
/// <summary>
- /// __m256i _mm256_sll_epi32 (__m256i a, __m128i count); VPSLLD ymm, ymm, xmm/m128
+ /// __m256i _mm256_sll_epi32 (__m256i a, __m128i count);
+ /// VPSLLD ymm, ymm, xmm/m128
/// </summary>
public static Vector256<uint> ShiftLeftLogical(Vector256<uint> value, Vector128<uint> count) => ShiftLeftLogical(value, count);
/// <summary>
- /// __m256i _mm256_sll_epi64 (__m256i a, __m128i count); VPSLLQ ymm, ymm, xmm/m128
+ /// __m256i _mm256_sll_epi64 (__m256i a, __m128i count);
+ /// VPSLLQ ymm, ymm, xmm/m128
/// </summary>
public static Vector256<long> ShiftLeftLogical(Vector256<long> value, Vector128<long> count) => ShiftLeftLogical(value, count);
/// <summary>
- /// __m256i _mm256_sll_epi64 (__m256i a, __m128i count); VPSLLQ ymm, ymm, xmm/m128
+ /// __m256i _mm256_sll_epi64 (__m256i a, __m128i count);
+ /// VPSLLQ ymm, ymm, xmm/m128
/// </summary>
public static Vector256<ulong> ShiftLeftLogical(Vector256<ulong> value, Vector128<ulong> count) => ShiftLeftLogical(value, count);
/// <summary>
- /// __m256i _mm256_slli_epi16 (__m256i a, int imm8); VPSLLW ymm, ymm, imm8
+ /// __m256i _mm256_slli_epi16 (__m256i a, int imm8);
+ /// VPSLLW ymm, ymm, imm8
/// </summary>
public static Vector256<short> ShiftLeftLogical(Vector256<short> value, byte count) => ShiftLeftLogical(value, count);
/// <summary>
- /// __m256i _mm256_slli_epi16 (__m256i a, int imm8); VPSLLW ymm, ymm, imm8
+ /// __m256i _mm256_slli_epi16 (__m256i a, int imm8);
+ /// VPSLLW ymm, ymm, imm8
/// </summary>
public static Vector256<ushort> ShiftLeftLogical(Vector256<ushort> value, byte count) => ShiftLeftLogical(value, count);
/// <summary>
- /// __m256i _mm256_slli_epi32 (__m256i a, int imm8); VPSLLD ymm, ymm, imm8
+ /// __m256i _mm256_slli_epi32 (__m256i a, int imm8);
+ /// VPSLLD ymm, ymm, imm8
/// </summary>
public static Vector256<int> ShiftLeftLogical(Vector256<int> value, byte count) => ShiftLeftLogical(value, count);
/// <summary>
- /// __m256i _mm256_slli_epi32 (__m256i a, int imm8); VPSLLD ymm, ymm, imm8
+ /// __m256i _mm256_slli_epi32 (__m256i a, int imm8);
+ /// VPSLLD ymm, ymm, imm8
/// </summary>
public static Vector256<uint> ShiftLeftLogical(Vector256<uint> value, byte count) => ShiftLeftLogical(value, count);
/// <summary>
- /// __m256i _mm256_slli_epi64 (__m256i a, int imm8); VPSLLQ ymm, ymm, imm8
+ /// __m256i _mm256_slli_epi64 (__m256i a, int imm8);
+ /// VPSLLQ ymm, ymm, imm8
/// </summary>
public static Vector256<long> ShiftLeftLogical(Vector256<long> value, byte count) => ShiftLeftLogical(value, count);
/// <summary>
- /// __m256i _mm256_slli_epi64 (__m256i a, int imm8); VPSLLQ ymm, ymm, imm8
+ /// __m256i _mm256_slli_epi64 (__m256i a, int imm8);
+ /// VPSLLQ ymm, ymm, imm8
/// </summary>
public static Vector256<ulong> ShiftLeftLogical(Vector256<ulong> value, byte count) => ShiftLeftLogical(value, count);
/// <summary>
- /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8); VPSLLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8);
+ /// VPSLLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<sbyte> ShiftLeftLogical128BitLane(Vector256<sbyte> value, byte numBytes) => ShiftLeftLogical128BitLane(value, numBytes);
/// <summary>
- /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8); VPSLLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8);
+ /// VPSLLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<byte> ShiftLeftLogical128BitLane(Vector256<byte> value, byte numBytes) => ShiftLeftLogical128BitLane(value, numBytes);
/// <summary>
- /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8); VPSLLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8);
+ /// VPSLLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<short> ShiftLeftLogical128BitLane(Vector256<short> value, byte numBytes) => ShiftLeftLogical128BitLane(value, numBytes);
/// <summary>
- /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8); VPSLLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8);
+ /// VPSLLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<ushort> ShiftLeftLogical128BitLane(Vector256<ushort> value, byte numBytes) => ShiftLeftLogical128BitLane(value, numBytes);
/// <summary>
- /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8); VPSLLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8);
+ /// VPSLLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<int> ShiftLeftLogical128BitLane(Vector256<int> value, byte numBytes) => ShiftLeftLogical128BitLane(value, numBytes);
/// <summary>
- /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8); VPSLLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8);
+ /// VPSLLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<uint> ShiftLeftLogical128BitLane(Vector256<uint> value, byte numBytes) => ShiftLeftLogical128BitLane(value, numBytes);
/// <summary>
- /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8); VPSLLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8);
+ /// VPSLLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<long> ShiftLeftLogical128BitLane(Vector256<long> value, byte numBytes) => ShiftLeftLogical128BitLane(value, numBytes);
/// <summary>
- /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8); VPSLLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bslli_epi128 (__m256i a, const int imm8);
+ /// VPSLLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<ulong> ShiftLeftLogical128BitLane(Vector256<ulong> value, byte numBytes) => ShiftLeftLogical128BitLane(value, numBytes);
/// <summary>
- /// __m256i _mm256_sllv_epi32 (__m256i a, __m256i count); VPSLLVD ymm, ymm, ymm/m256
+ /// __m256i _mm256_sllv_epi32 (__m256i a, __m256i count);
+ /// VPSLLVD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> ShiftLeftLogicalVariable(Vector256<int> value, Vector256<uint> count) => ShiftLeftLogicalVariable(value, count);
/// <summary>
- /// __m256i _mm256_sllv_epi32 (__m256i a, __m256i count); VPSLLVD ymm, ymm, ymm/m256
+ /// __m256i _mm256_sllv_epi32 (__m256i a, __m256i count);
+ /// VPSLLVD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<uint> ShiftLeftLogicalVariable(Vector256<uint> value, Vector256<uint> count) => ShiftLeftLogicalVariable(value, count);
/// <summary>
- /// __m256i _mm256_sllv_epi64 (__m256i a, __m256i count); VPSLLVQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_sllv_epi64 (__m256i a, __m256i count);
+ /// VPSLLVQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<long> ShiftLeftLogicalVariable(Vector256<long> value, Vector256<ulong> count) => ShiftLeftLogicalVariable(value, count);
/// <summary>
- /// __m256i _mm256_sllv_epi64 (__m256i a, __m256i count); VPSLLVQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_sllv_epi64 (__m256i a, __m256i count);
+ /// VPSLLVQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ulong> ShiftLeftLogicalVariable(Vector256<ulong> value, Vector256<ulong> count) => ShiftLeftLogicalVariable(value, count);
/// <summary>
- /// __m128i _mm_sllv_epi32 (__m128i a, __m128i count); VPSLLVD xmm, ymm, xmm/m128
+ /// __m128i _mm_sllv_epi32 (__m128i a, __m128i count);
+ /// VPSLLVD xmm, ymm, xmm/m128
/// </summary>
public static Vector128<int> ShiftLeftLogicalVariable(Vector128<int> value, Vector128<uint> count) => ShiftLeftLogicalVariable(value, count);
/// <summary>
- /// __m128i _mm_sllv_epi32 (__m128i a, __m128i count); VPSLLVD xmm, ymm, xmm/m128
+ /// __m128i _mm_sllv_epi32 (__m128i a, __m128i count);
+ /// VPSLLVD xmm, ymm, xmm/m128
/// </summary>
public static Vector128<uint> ShiftLeftLogicalVariable(Vector128<uint> value, Vector128<uint> count) => ShiftLeftLogicalVariable(value, count);
/// <summary>
- /// __m128i _mm_sllv_epi64 (__m128i a, __m128i count); VPSLLVQ xmm, ymm, xmm/m128
+ /// __m128i _mm_sllv_epi64 (__m128i a, __m128i count);
+ /// VPSLLVQ xmm, ymm, xmm/m128
/// </summary>
public static Vector128<long> ShiftLeftLogicalVariable(Vector128<long> value, Vector128<ulong> count) => ShiftLeftLogicalVariable(value, count);
/// <summary>
- /// __m128i _mm_sllv_epi64 (__m128i a, __m128i count); VPSLLVQ xmm, ymm, xmm/m128
+ /// __m128i _mm_sllv_epi64 (__m128i a, __m128i count);
+ /// VPSLLVQ xmm, ymm, xmm/m128
/// </summary>
public static Vector128<ulong> ShiftLeftLogicalVariable(Vector128<ulong> value, Vector128<ulong> count) => ShiftLeftLogicalVariable(value, count);
/// <summary>
- /// _mm256_sra_epi16 (__m256i a, __m128i count); VPSRAW ymm, ymm, xmm/m128
+ /// _mm256_sra_epi16 (__m256i a, __m128i count);
+ /// VPSRAW ymm, ymm, xmm/m128
/// </summary>
public static Vector256<short> ShiftRightArithmetic(Vector256<short> value, Vector128<short> count) => ShiftRightArithmetic(value, count);
/// <summary>
- /// _mm256_sra_epi32 (__m256i a, __m128i count); VPSRAD ymm, ymm, xmm/m128
+ /// _mm256_sra_epi32 (__m256i a, __m128i count);
+ /// VPSRAD ymm, ymm, xmm/m128
/// </summary>
public static Vector256<int> ShiftRightArithmetic(Vector256<int> value, Vector128<int> count) => ShiftRightArithmetic(value, count);
/// <summary>
- /// __m256i _mm256_srai_epi16 (__m256i a, int imm8); VPSRAW ymm, ymm, imm8
+ /// __m256i _mm256_srai_epi16 (__m256i a, int imm8);
+ /// VPSRAW ymm, ymm, imm8
/// </summary>
public static Vector256<short> ShiftRightArithmetic(Vector256<short> value, byte count) => ShiftRightArithmetic(value, count);
/// <summary>
- /// __m256i _mm256_srai_epi32 (__m256i a, int imm8); VPSRAD ymm, ymm, imm8
+ /// __m256i _mm256_srai_epi32 (__m256i a, int imm8);
+ /// VPSRAD ymm, ymm, imm8
/// </summary>
public static Vector256<int> ShiftRightArithmetic(Vector256<int> value, byte count) => ShiftRightArithmetic(value, count);
/// <summary>
- /// __m256i _mm256_srav_epi32 (__m256i a, __m256i count); VPSRAVD ymm, ymm, ymm/m256
+ /// __m256i _mm256_srav_epi32 (__m256i a, __m256i count);
+ /// VPSRAVD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> ShiftRightArithmeticVariable(Vector256<int> value, Vector256<uint> count) => ShiftRightArithmeticVariable(value, count);
/// <summary>
- /// __m128i _mm_srav_epi32 (__m128i a, __m128i count); VPSRAVD xmm, xmm, xmm/m128
+ /// __m128i _mm_srav_epi32 (__m128i a, __m128i count);
+ /// VPSRAVD xmm, xmm, xmm/m128
/// </summary>
public static Vector128<int> ShiftRightArithmeticVariable(Vector128<int> value, Vector128<uint> count) => ShiftRightArithmeticVariable(value, count);
/// <summary>
- /// __m256i _mm256_srl_epi16 (__m256i a, __m128i count); VPSRLW ymm, ymm, xmm/m128
+ /// __m256i _mm256_srl_epi16 (__m256i a, __m128i count);
+ /// VPSRLW ymm, ymm, xmm/m128
/// </summary>
public static Vector256<short> ShiftRightLogical(Vector256<short> value, Vector128<short> count) => ShiftRightLogical(value, count);
/// <summary>
- /// __m256i _mm256_srl_epi16 (__m256i a, __m128i count); VPSRLW ymm, ymm, xmm/m128
+ /// __m256i _mm256_srl_epi16 (__m256i a, __m128i count);
+ /// VPSRLW ymm, ymm, xmm/m128
/// </summary>
public static Vector256<ushort> ShiftRightLogical(Vector256<ushort> value, Vector128<ushort> count) => ShiftRightLogical(value, count);
/// <summary>
- /// __m256i _mm256_srl_epi32 (__m256i a, __m128i count); VPSRLD ymm, ymm, xmm/m128
+ /// __m256i _mm256_srl_epi32 (__m256i a, __m128i count);
+ /// VPSRLD ymm, ymm, xmm/m128
/// </summary>
public static Vector256<int> ShiftRightLogical(Vector256<int> value, Vector128<int> count) => ShiftRightLogical(value, count);
/// <summary>
- /// __m256i _mm256_srl_epi32 (__m256i a, __m128i count); VPSRLD ymm, ymm, xmm/m128
+ /// __m256i _mm256_srl_epi32 (__m256i a, __m128i count);
+ /// VPSRLD ymm, ymm, xmm/m128
/// </summary>
public static Vector256<uint> ShiftRightLogical(Vector256<uint> value, Vector128<uint> count) => ShiftRightLogical(value, count);
/// <summary>
- /// __m256i _mm256_srl_epi64 (__m256i a, __m128i count); VPSRLQ ymm, ymm, xmm/m128
+ /// __m256i _mm256_srl_epi64 (__m256i a, __m128i count);
+ /// VPSRLQ ymm, ymm, xmm/m128
/// </summary>
public static Vector256<long> ShiftRightLogical(Vector256<long> value, Vector128<long> count) => ShiftRightLogical(value, count);
/// <summary>
- /// __m256i _mm256_srl_epi64 (__m256i a, __m128i count); VPSRLQ ymm, ymm, xmm/m128
+ /// __m256i _mm256_srl_epi64 (__m256i a, __m128i count);
+ /// VPSRLQ ymm, ymm, xmm/m128
/// </summary>
public static Vector256<ulong> ShiftRightLogical(Vector256<ulong> value, Vector128<ulong> count) => ShiftRightLogical(value, count);
/// <summary>
- /// __m256i _mm256_srli_epi16 (__m256i a, int imm8); VPSRLW ymm, ymm, imm8
+ /// __m256i _mm256_srli_epi16 (__m256i a, int imm8);
+ /// VPSRLW ymm, ymm, imm8
/// </summary>
public static Vector256<short> ShiftRightLogical(Vector256<short> value, byte count) => ShiftRightLogical(value, count);
/// <summary>
- /// __m256i _mm256_srli_epi16 (__m256i a, int imm8); VPSRLW ymm, ymm, imm8
+ /// __m256i _mm256_srli_epi16 (__m256i a, int imm8);
+ /// VPSRLW ymm, ymm, imm8
/// </summary>
public static Vector256<ushort> ShiftRightLogical(Vector256<ushort> value, byte count) => ShiftRightLogical(value, count);
/// <summary>
- /// __m256i _mm256_srli_epi32 (__m256i a, int imm8); VPSRLD ymm, ymm, imm8
+ /// __m256i _mm256_srli_epi32 (__m256i a, int imm8);
+ /// VPSRLD ymm, ymm, imm8
/// </summary>
public static Vector256<int> ShiftRightLogical(Vector256<int> value, byte count) => ShiftRightLogical(value, count);
/// <summary>
- /// __m256i _mm256_srli_epi32 (__m256i a, int imm8); VPSRLD ymm, ymm, imm8
+ /// __m256i _mm256_srli_epi32 (__m256i a, int imm8);
+ /// VPSRLD ymm, ymm, imm8
/// </summary>
public static Vector256<uint> ShiftRightLogical(Vector256<uint> value, byte count) => ShiftRightLogical(value, count);
/// <summary>
- /// __m256i _mm256_srli_epi64 (__m256i a, int imm8); VPSRLQ ymm, ymm, imm8
+ /// __m256i _mm256_srli_epi64 (__m256i a, int imm8);
+ /// VPSRLQ ymm, ymm, imm8
/// </summary>
public static Vector256<long> ShiftRightLogical(Vector256<long> value, byte count) => ShiftRightLogical(value, count);
/// <summary>
- /// __m256i _mm256_srli_epi64 (__m256i a, int imm8); VPSRLQ ymm, ymm, imm8
+ /// __m256i _mm256_srli_epi64 (__m256i a, int imm8);
+ /// VPSRLQ ymm, ymm, imm8
/// </summary>
public static Vector256<ulong> ShiftRightLogical(Vector256<ulong> value, byte count) => ShiftRightLogical(value, count);
/// <summary>
- /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8); VPSRLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8);
+ /// VPSRLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<sbyte> ShiftRightLogical128BitLane(Vector256<sbyte> value, byte numBytes) => ShiftRightLogical128BitLane(value, numBytes);
/// <summary>
- /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8); VPSRLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8);
+ /// VPSRLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<byte> ShiftRightLogical128BitLane(Vector256<byte> value, byte numBytes) => ShiftRightLogical128BitLane(value, numBytes);
/// <summary>
- /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8); VPSRLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8);
+ /// VPSRLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<short> ShiftRightLogical128BitLane(Vector256<short> value, byte numBytes) => ShiftRightLogical128BitLane(value, numBytes);
/// <summary>
- /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8); VPSRLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8);
+ /// VPSRLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<ushort> ShiftRightLogical128BitLane(Vector256<ushort> value, byte numBytes) => ShiftRightLogical128BitLane(value, numBytes);
/// <summary>
- /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8); VPSRLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8);
+ /// VPSRLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<int> ShiftRightLogical128BitLane(Vector256<int> value, byte numBytes) => ShiftRightLogical128BitLane(value, numBytes);
/// <summary>
- /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8); VPSRLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8);
+ /// VPSRLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<uint> ShiftRightLogical128BitLane(Vector256<uint> value, byte numBytes) => ShiftRightLogical128BitLane(value, numBytes);
/// <summary>
- /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8); VPSRLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8);
+ /// VPSRLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<long> ShiftRightLogical128BitLane(Vector256<long> value, byte numBytes) => ShiftRightLogical128BitLane(value, numBytes);
/// <summary>
- /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8); VPSRLDQ ymm, ymm, imm8
+ /// __m256i _mm256_bsrli_epi128 (__m256i a, const int imm8);
+ /// VPSRLDQ ymm, ymm, imm8
/// </summary>
public static Vector256<ulong> ShiftRightLogical128BitLane(Vector256<ulong> value, byte numBytes) => ShiftRightLogical128BitLane(value, numBytes);
/// <summary>
- /// __m256i _mm256_srlv_epi32 (__m256i a, __m256i count); VPSRLVD ymm, ymm, ymm/m256
+ /// __m256i _mm256_srlv_epi32 (__m256i a, __m256i count);
+ /// VPSRLVD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> ShiftRightLogicalVariable(Vector256<int> value, Vector256<uint> count) => ShiftRightLogicalVariable(value, count);
/// <summary>
- /// __m256i _mm256_srlv_epi32 (__m256i a, __m256i count); VPSRLVD ymm, ymm, ymm/m256
+ /// __m256i _mm256_srlv_epi32 (__m256i a, __m256i count);
+ /// VPSRLVD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<uint> ShiftRightLogicalVariable(Vector256<uint> value, Vector256<uint> count) => ShiftRightLogicalVariable(value, count);
/// <summary>
- /// __m256i _mm256_srlv_epi64 (__m256i a, __m256i count); VPSRLVQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_srlv_epi64 (__m256i a, __m256i count);
+ /// VPSRLVQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<long> ShiftRightLogicalVariable(Vector256<long> value, Vector256<ulong> count) => ShiftRightLogicalVariable(value, count);
/// <summary>
- /// __m256i _mm256_srlv_epi64 (__m256i a, __m256i count); VPSRLVQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_srlv_epi64 (__m256i a, __m256i count);
+ /// VPSRLVQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ulong> ShiftRightLogicalVariable(Vector256<ulong> value, Vector256<ulong> count) => ShiftRightLogicalVariable(value, count);
/// <summary>
- /// __m128i _mm_srlv_epi32 (__m128i a, __m128i count); VPSRLVD xmm, xmm, xmm/m128
+ /// __m128i _mm_srlv_epi32 (__m128i a, __m128i count);
+ /// VPSRLVD xmm, xmm, xmm/m128
/// </summary>
public static Vector128<int> ShiftRightLogicalVariable(Vector128<int> value, Vector128<uint> count) => ShiftRightLogicalVariable(value, count);
/// <summary>
- /// __m128i _mm_srlv_epi32 (__m128i a, __m128i count); VPSRLVD xmm, xmm, xmm/m128
+ /// __m128i _mm_srlv_epi32 (__m128i a, __m128i count);
+ /// VPSRLVD xmm, xmm, xmm/m128
/// </summary>
public static Vector128<uint> ShiftRightLogicalVariable(Vector128<uint> value, Vector128<uint> count) => ShiftRightLogicalVariable(value, count);
/// <summary>
- /// __m128i _mm_srlv_epi64 (__m128i a, __m128i count); VPSRLVQ xmm, xmm, xmm/m128
+ /// __m128i _mm_srlv_epi64 (__m128i a, __m128i count);
+ /// VPSRLVQ xmm, xmm, xmm/m128
/// </summary>
public static Vector128<long> ShiftRightLogicalVariable(Vector128<long> value, Vector128<ulong> count) => ShiftRightLogicalVariable(value, count);
/// <summary>
- /// __m128i _mm_srlv_epi64 (__m128i a, __m128i count); VPSRLVQ xmm, xmm, xmm/m128
+ /// __m128i _mm_srlv_epi64 (__m128i a, __m128i count);
+ /// VPSRLVQ xmm, xmm, xmm/m128
/// </summary>
public static Vector128<ulong> ShiftRightLogicalVariable(Vector128<ulong> value, Vector128<ulong> count) => ShiftRightLogicalVariable(value, count);
/// <summary>
- /// __m256i _mm256_shuffle_epi8 (__m256i a, __m256i b); VPSHUFB ymm, ymm, ymm/m256
+ /// __m256i _mm256_shuffle_epi8 (__m256i a, __m256i b);
+ /// VPSHUFB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> Shuffle(Vector256<sbyte> value, Vector256<sbyte> mask) => Shuffle(value, mask);
/// <summary>
- /// __m256i _mm256_shuffle_epi8 (__m256i a, __m256i b); VPSHUFB ymm, ymm, ymm/m256
+ /// __m256i _mm256_shuffle_epi8 (__m256i a, __m256i b);
+ /// VPSHUFB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> Shuffle(Vector256<byte> value, Vector256<byte> mask) => Shuffle(value, mask);
/// <summary>
- /// __m256i _mm256_shuffle_epi32 (__m256i a, const int imm8); VPSHUFD ymm, ymm, ymm/m256
+ /// __m256i _mm256_shuffle_epi32 (__m256i a, const int imm8);
+ /// VPSHUFD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> Shuffle(Vector256<int> value, byte control) => Shuffle(value, control);
/// <summary>
- /// __m256i _mm256_shuffle_epi32 (__m256i a, const int imm8); VPSHUFD ymm, ymm, ymm/m256
+ /// __m256i _mm256_shuffle_epi32 (__m256i a, const int imm8);
+ /// VPSHUFD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<uint> Shuffle(Vector256<uint> value, byte control) => Shuffle(value, control);
/// <summary>
- /// __m256i _mm256_shufflehi_epi16 (__m256i a, const int imm8); VPSHUFHW ymm, ymm/m256, imm8
+ /// __m256i _mm256_shufflehi_epi16 (__m256i a, const int imm8);
+ /// VPSHUFHW ymm, ymm/m256, imm8
/// </summary>
public static Vector256<short> ShuffleHigh(Vector256<short> value, byte control) => ShuffleHigh(value, control);
/// <summary>
- /// __m256i _mm256_shufflehi_epi16 (__m256i a, const int imm8); VPSHUFHW ymm, ymm/m256, imm8
+ /// __m256i _mm256_shufflehi_epi16 (__m256i a, const int imm8);
+ /// VPSHUFHW ymm, ymm/m256, imm8
/// </summary>
public static Vector256<ushort> ShuffleHigh(Vector256<ushort> value, byte control) => ShuffleHigh(value, control);
/// <summary>
- /// __m256i _mm256_shufflelo_epi16 (__m256i a, const int imm8); VPSHUFLW ymm, ymm/m256, imm8
+ /// __m256i _mm256_shufflelo_epi16 (__m256i a, const int imm8);
+ /// VPSHUFLW ymm, ymm/m256, imm8
/// </summary>
public static Vector256<short> ShuffleLow(Vector256<short> value, byte control) => ShuffleLow(value, control);
/// <summary>
- /// __m256i _mm256_shufflelo_epi16 (__m256i a, const int imm8); VPSHUFLW ymm, ymm/m256, imm8
+ /// __m256i _mm256_shufflelo_epi16 (__m256i a, const int imm8);
+ /// VPSHUFLW ymm, ymm/m256, imm8
/// </summary>
public static Vector256<ushort> ShuffleLow(Vector256<ushort> value, byte control) => ShuffleLow(value, control);
/// <summary>
- /// __m256i _mm256_sign_epi8 (__m256i a, __m256i b); VPSIGNB ymm, ymm, ymm/m256
+ /// __m256i _mm256_sign_epi8 (__m256i a, __m256i b);
+ /// VPSIGNB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> Sign(Vector256<sbyte> left, Vector256<sbyte> right) => Sign(left, right);
/// <summary>
- /// __m256i _mm256_sign_epi16 (__m256i a, __m256i b); VPSIGNW ymm, ymm, ymm/m256
+ /// __m256i _mm256_sign_epi16 (__m256i a, __m256i b);
+ /// VPSIGNW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> Sign(Vector256<short> left, Vector256<short> right) => Sign(left, right);
/// <summary>
- /// __m256i _mm256_sign_epi32 (__m256i a, __m256i b); VPSIGND ymm, ymm, ymm/m256
+ /// __m256i _mm256_sign_epi32 (__m256i a, __m256i b);
+ /// VPSIGND ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> Sign(Vector256<int> left, Vector256<int> right) => Sign(left, right);
/// <summary>
- /// __m256i _mm256_sub_epi8 (__m256i a, __m256i b); VPSUBB ymm, ymm, ymm/m256
+ /// __m256i _mm256_sub_epi8 (__m256i a, __m256i b);
+ /// VPSUBB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> Subtract(Vector256<sbyte> left, Vector256<sbyte> right) => Subtract(left, right);
/// <summary>
- /// __m256i _mm256_sub_epi8 (__m256i a, __m256i b); VPSUBB ymm, ymm, ymm/m256
+ /// __m256i _mm256_sub_epi8 (__m256i a, __m256i b);
+ /// VPSUBB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> Subtract(Vector256<byte> left, Vector256<byte> right) => Subtract(left, right);
/// <summary>
- /// __m256i _mm256_sub_epi16 (__m256i a, __m256i b); VPSUBW ymm, ymm, ymm/m256
+ /// __m256i _mm256_sub_epi16 (__m256i a, __m256i b);
+ /// VPSUBW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> Subtract(Vector256<short> left, Vector256<short> right) => Subtract(left, right);
/// <summary>
- /// __m256i _mm256_sub_epi16 (__m256i a, __m256i b); VPSUBW ymm, ymm, ymm/m256
+ /// __m256i _mm256_sub_epi16 (__m256i a, __m256i b);
+ /// VPSUBW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> Subtract(Vector256<ushort> left, Vector256<ushort> right) => Subtract(left, right);
/// <summary>
- /// __m256i _mm256_sub_epi32 (__m256i a, __m256i b); VPSUBD ymm, ymm, ymm/m256
+ /// __m256i _mm256_sub_epi32 (__m256i a, __m256i b);
+ /// VPSUBD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> Subtract(Vector256<int> left, Vector256<int> right) => Subtract(left, right);
/// <summary>
- /// __m256i _mm256_sub_epi32 (__m256i a, __m256i b); VPSUBD ymm, ymm, ymm/m256
+ /// __m256i _mm256_sub_epi32 (__m256i a, __m256i b);
+ /// VPSUBD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<uint> Subtract(Vector256<uint> left, Vector256<uint> right) => Subtract(left, right);
/// <summary>
- /// __m256i _mm256_sub_epi64 (__m256i a, __m256i b); VPSUBQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_sub_epi64 (__m256i a, __m256i b);
+ /// VPSUBQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<long> Subtract(Vector256<long> left, Vector256<long> right) => Subtract(left, right);
/// <summary>
- /// __m256i _mm256_sub_epi64 (__m256i a, __m256i b); VPSUBQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_sub_epi64 (__m256i a, __m256i b);
+ /// VPSUBQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ulong> Subtract(Vector256<ulong> left, Vector256<ulong> right) => Subtract(left, right);
/// <summary>
- /// __m256i _mm256_subs_epi8 (__m256i a, __m256i b); VPSUBSB ymm, ymm, ymm/m256
+ /// __m256i _mm256_subs_epi8 (__m256i a, __m256i b);
+ /// VPSUBSB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> SubtractSaturate(Vector256<sbyte> left, Vector256<sbyte> right) => SubtractSaturate(left, right);
/// <summary>
- /// __m256i _mm256_subs_epi16 (__m256i a, __m256i b); VPSUBSW ymm, ymm, ymm/m256
+ /// __m256i _mm256_subs_epi16 (__m256i a, __m256i b);
+ /// VPSUBSW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> SubtractSaturate(Vector256<short> left, Vector256<short> right) => SubtractSaturate(left, right);
/// <summary>
- /// __m256i _mm256_subs_epu8 (__m256i a, __m256i b); VPSUBUSB ymm, ymm, ymm/m256
+ /// __m256i _mm256_subs_epu8 (__m256i a, __m256i b);
+ /// VPSUBUSB ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> SubtractSaturate(Vector256<byte> left, Vector256<byte> right) => SubtractSaturate(left, right);
/// <summary>
- /// __m256i _mm256_subs_epu16 (__m256i a, __m256i b); VPSUBUSW ymm, ymm, ymm/m256
+ /// __m256i _mm256_subs_epu16 (__m256i a, __m256i b);
+ /// VPSUBUSW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> SubtractSaturate(Vector256<ushort> left, Vector256<ushort> right) => SubtractSaturate(left, right);
/// <summary>
- /// __m256i _mm256_sad_epu8 (__m256i a, __m256i b); VPSADBW ymm, ymm, ymm/m256
+ /// __m256i _mm256_sad_epu8 (__m256i a, __m256i b);
+ /// VPSADBW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ulong> SumAbsoluteDifferences(Vector256<byte> left, Vector256<byte> right) => SumAbsoluteDifferences(left, right);
/// <summary>
- /// __m256i _mm256_unpackhi_epi8 (__m256i a, __m256i b); VPUNPCKHBW ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpackhi_epi8 (__m256i a, __m256i b);
+ /// VPUNPCKHBW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> UnpackHigh(Vector256<sbyte> left, Vector256<sbyte> right) => UnpackHigh(left, right);
/// <summary>
- /// __m256i _mm256_unpackhi_epi8 (__m256i a, __m256i b); VPUNPCKHBW ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpackhi_epi8 (__m256i a, __m256i b);
+ /// VPUNPCKHBW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> UnpackHigh(Vector256<byte> left, Vector256<byte> right) => UnpackHigh(left, right);
/// <summary>
- /// __m256i _mm256_unpackhi_epi16 (__m256i a, __m256i b); VPUNPCKHWD ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpackhi_epi16 (__m256i a, __m256i b);
+ /// VPUNPCKHWD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> UnpackHigh(Vector256<short> left, Vector256<short> right) => UnpackHigh(left, right);
/// <summary>
- /// __m256i _mm256_unpackhi_epi16 (__m256i a, __m256i b); VPUNPCKHWD ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpackhi_epi16 (__m256i a, __m256i b);
+ /// VPUNPCKHWD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> UnpackHigh(Vector256<ushort> left, Vector256<ushort> right) => UnpackHigh(left, right);
/// <summary>
- /// __m256i _mm256_unpackhi_epi32 (__m256i a, __m256i b); VPUNPCKHDQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpackhi_epi32 (__m256i a, __m256i b);
+ /// VPUNPCKHDQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> UnpackHigh(Vector256<int> left, Vector256<int> right) => UnpackHigh(left, right);
/// <summary>
- /// __m256i _mm256_unpackhi_epi32 (__m256i a, __m256i b); VPUNPCKHDQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpackhi_epi32 (__m256i a, __m256i b);
+ /// VPUNPCKHDQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<uint> UnpackHigh(Vector256<uint> left, Vector256<uint> right) => UnpackHigh(left, right);
/// <summary>
- /// __m256i _mm256_unpackhi_epi64 (__m256i a, __m256i b); VPUNPCKHQDQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpackhi_epi64 (__m256i a, __m256i b);
+ /// VPUNPCKHQDQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<long> UnpackHigh(Vector256<long> left, Vector256<long> right) => UnpackHigh(left, right);
/// <summary>
- /// __m256i _mm256_unpackhi_epi64 (__m256i a, __m256i b); VPUNPCKHQDQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpackhi_epi64 (__m256i a, __m256i b);
+ /// VPUNPCKHQDQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ulong> UnpackHigh(Vector256<ulong> left, Vector256<ulong> right) => UnpackHigh(left, right);
/// <summary>
- /// __m256i _mm256_unpacklo_epi8 (__m256i a, __m256i b); VPUNPCKLBW ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpacklo_epi8 (__m256i a, __m256i b);
+ /// VPUNPCKLBW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> UnpackLow(Vector256<sbyte> left, Vector256<sbyte> right) => UnpackLow(left, right);
/// <summary>
- /// __m256i _mm256_unpacklo_epi8 (__m256i a, __m256i b); VPUNPCKLBW ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpacklo_epi8 (__m256i a, __m256i b);
+ /// VPUNPCKLBW ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> UnpackLow(Vector256<byte> left, Vector256<byte> right) => UnpackLow(left, right);
/// <summary>
- /// __m256i _mm256_unpacklo_epi16 (__m256i a, __m256i b); VPUNPCKLWD ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpacklo_epi16 (__m256i a, __m256i b);
+ /// VPUNPCKLWD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> UnpackLow(Vector256<short> left, Vector256<short> right) => UnpackLow(left, right);
/// <summary>
- /// __m256i _mm256_unpacklo_epi16 (__m256i a, __m256i b); VPUNPCKLWD ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpacklo_epi16 (__m256i a, __m256i b);
+ /// VPUNPCKLWD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> UnpackLow(Vector256<ushort> left, Vector256<ushort> right) => UnpackLow(left, right);
/// <summary>
- /// __m256i _mm256_unpacklo_epi32 (__m256i a, __m256i b); VPUNPCKLDQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpacklo_epi32 (__m256i a, __m256i b);
+ /// VPUNPCKLDQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> UnpackLow(Vector256<int> left, Vector256<int> right) => UnpackLow(left, right);
/// <summary>
- /// __m256i _mm256_unpacklo_epi32 (__m256i a, __m256i b); VPUNPCKLDQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpacklo_epi32 (__m256i a, __m256i b);
+ /// VPUNPCKLDQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<uint> UnpackLow(Vector256<uint> left, Vector256<uint> right) => UnpackLow(left, right);
/// <summary>
- /// __m256i _mm256_unpacklo_epi64 (__m256i a, __m256i b); VPUNPCKLQDQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpacklo_epi64 (__m256i a, __m256i b);
+ /// VPUNPCKLQDQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<long> UnpackLow(Vector256<long> left, Vector256<long> right) => UnpackLow(left, right);
/// <summary>
- /// __m256i _mm256_unpacklo_epi64 (__m256i a, __m256i b); VPUNPCKLQDQ ymm, ymm, ymm/m256
+ /// __m256i _mm256_unpacklo_epi64 (__m256i a, __m256i b);
+ /// VPUNPCKLQDQ ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ulong> UnpackLow(Vector256<ulong> left, Vector256<ulong> right) => UnpackLow(left, right);
/// <summary>
- /// __m256i _mm256_xor_si256 (__m256i a, __m256i b); VPXOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_xor_si256 (__m256i a, __m256i b);
+ /// VPXOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<sbyte> Xor(Vector256<sbyte> left, Vector256<sbyte> right) => Xor(left, right);
/// <summary>
- /// __m256i _mm256_xor_si256 (__m256i a, __m256i b); VPXOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_xor_si256 (__m256i a, __m256i b);
+ /// VPXOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<byte> Xor(Vector256<byte> left, Vector256<byte> right) => Xor(left, right);
/// <summary>
- /// __m256i _mm256_xor_si256 (__m256i a, __m256i b); VPXOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_xor_si256 (__m256i a, __m256i b);
+ /// VPXOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<short> Xor(Vector256<short> left, Vector256<short> right) => Xor(left, right);
/// <summary>
- /// __m256i _mm256_xor_si256 (__m256i a, __m256i b); VPXOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_xor_si256 (__m256i a, __m256i b);
+ /// VPXOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ushort> Xor(Vector256<ushort> left, Vector256<ushort> right) => Xor(left, right);
/// <summary>
- /// __m256i _mm256_xor_si256 (__m256i a, __m256i b); VPXOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_xor_si256 (__m256i a, __m256i b);
+ /// VPXOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<int> Xor(Vector256<int> left, Vector256<int> right) => Xor(left, right);
/// <summary>
- /// __m256i _mm256_xor_si256 (__m256i a, __m256i b); VPXOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_xor_si256 (__m256i a, __m256i b);
+ /// VPXOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<uint> Xor(Vector256<uint> left, Vector256<uint> right) => Xor(left, right);
/// <summary>
- /// __m256i _mm256_xor_si256 (__m256i a, __m256i b); VPXOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_xor_si256 (__m256i a, __m256i b);
+ /// VPXOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<long> Xor(Vector256<long> left, Vector256<long> right) => Xor(left, right);
/// <summary>
- /// __m256i _mm256_xor_si256 (__m256i a, __m256i b); VPXOR ymm, ymm, ymm/m256
+ /// __m256i _mm256_xor_si256 (__m256i a, __m256i b);
+ /// VPXOR ymm, ymm, ymm/m256
/// </summary>
public static Vector256<ulong> Xor(Vector256<ulong> left, Vector256<ulong> right) => Xor(left, right);
}
diff --git a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Bmi2.cs b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Bmi2.cs
index c6dfa72f44..4b42356fd0 100644
--- a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Bmi2.cs
+++ b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Bmi2.cs
@@ -49,6 +49,6 @@ namespace System.Runtime.Intrinsics.X86
/// <summary>
/// unsigned __int64 _pext_u64 (unsigned __int64 a, unsigned __int64 mask)
/// </summary>
- public static ulong ParallelBitExtract(ulong value, ulong mask) => ParallelBitExtract(value, mask);
+ public static ulong ParallelBitExtract(ulong value, ulong mask) => ParallelBitExtract(value, mask);
}
}
diff --git a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Fma.PlatformNotSupported.cs b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Fma.PlatformNotSupported.cs
index b562a26440..a475caedf5 100644
--- a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Fma.PlatformNotSupported.cs
+++ b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Fma.PlatformNotSupported.cs
@@ -15,140 +15,172 @@ namespace System.Runtime.Intrinsics.X86
public static bool IsSupported { get { return false; } }
/// <summary>
- /// __m128 _mm_fmadd_ps (__m128 a, __m128 b, __m128 c); VFMADDPS xmm, xmm, xmm/m128
+ /// __m128 _mm_fmadd_ps (__m128 a, __m128 b, __m128 c);
+ /// VFMADDPS xmm, xmm, xmm/m128
/// </summary>
public static Vector128<float> MultiplyAdd(Vector128<float> a, Vector128<float> b, Vector128<float> c) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_fmadd_pd (__m128d a, __m128d b, __m128d c); VFMADDPD xmm, xmm, xmm/m128
+ /// __m128d _mm_fmadd_pd (__m128d a, __m128d b, __m128d c);
+ /// VFMADDPD xmm, xmm, xmm/m128
/// </summary>
public static Vector128<double> MultiplyAdd(Vector128<double> a, Vector128<double> b, Vector128<double> c) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_fmadd_ps (__m256 a, __m256 b, __m256 c); VFMADDPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_fmadd_ps (__m256 a, __m256 b, __m256 c);
+ /// VFMADDPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> MultiplyAdd(Vector256<float> a, Vector256<float> b, Vector256<float> c) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_fmadd_pd (__m256d a, __m256d b, __m256d c); VFMADDPS ymm, ymm, ymm/m256
+ /// __m256d _mm256_fmadd_pd (__m256d a, __m256d b, __m256d c);
+ /// VFMADDPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> MultiplyAdd(Vector256<double> a, Vector256<double> b, Vector256<double> c) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_fmadd_ss (__m128 a, __m128 b, __m128 c); VFMADDSS xmm, xmm, xmm/m32
+ /// __m128 _mm_fmadd_ss (__m128 a, __m128 b, __m128 c);
+ /// VFMADDSS xmm, xmm, xmm/m32
/// </summary>
public static Vector128<float> MultiplyAddScalar(Vector128<float> a, Vector128<float> b, Vector128<float> c) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_fmadd_sd (__m128d a, __m128d b, __m128d c); VFMADDSS xmm, xmm, xmm/m64
+ /// __m128d _mm_fmadd_sd (__m128d a, __m128d b, __m128d c);
+ /// VFMADDSS xmm, xmm, xmm/m64
/// </summary>
public static Vector128<double> MultiplyAddScalar(Vector128<double> a, Vector128<double> b, Vector128<double> c) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_fmaddsub_ps (__m128 a, __m128 b, __m128 c); VFMADDSUBPS xmm, xmm, xmm/m128
+ /// __m128 _mm_fmaddsub_ps (__m128 a, __m128 b, __m128 c);
+ /// VFMADDSUBPS xmm, xmm, xmm/m128
/// </summary>
public static Vector128<float> MultiplyAddSubtract(Vector128<float> a, Vector128<float> b, Vector128<float> c) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_fmaddsub_pd (__m128d a, __m128d b, __m128d c); VFMADDSUBPD xmm, xmm, xmm/m128
+ /// __m128d _mm_fmaddsub_pd (__m128d a, __m128d b, __m128d c);
+ /// VFMADDSUBPD xmm, xmm, xmm/m128
/// </summary>
public static Vector128<double> MultiplyAddSubtract(Vector128<double> a, Vector128<double> b, Vector128<double> c) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_fmaddsub_ps (__m256 a, __m256 b, __m256 c); VFMADDSUBPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_fmaddsub_ps (__m256 a, __m256 b, __m256 c);
+ /// VFMADDSUBPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> MultiplyAddSubtract(Vector256<float> a, Vector256<float> b, Vector256<float> c) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_fmaddsub_pd (__m256d a, __m256d b, __m256d c); VFMADDSUBPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_fmaddsub_pd (__m256d a, __m256d b, __m256d c);
+ /// VFMADDSUBPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> MultiplyAddSubtract(Vector256<double> a, Vector256<double> b, Vector256<double> c) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_fmsub_ps (__m128 a, __m128 b, __m128 c); VFMSUBPS xmm, xmm, xmm/m128
+ /// __m128 _mm_fmsub_ps (__m128 a, __m128 b, __m128 c);
+ /// VFMSUBPS xmm, xmm, xmm/m128
/// </summary>
public static Vector128<float> MultiplySubtract(Vector128<float> a, Vector128<float> b, Vector128<float> c) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_fmsub_pd (__m128d a, __m128d b, __m128d c); VFMSUBPS xmm, xmm, xmm/m128
+ /// __m128d _mm_fmsub_pd (__m128d a, __m128d b, __m128d c);
+ /// VFMSUBPS xmm, xmm, xmm/m128
/// </summary>
public static Vector128<double> MultiplySubtract(Vector128<double> a, Vector128<double> b, Vector128<double> c) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_fmsub_ps (__m256 a, __m256 b, __m256 c); VFMSUBPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_fmsub_ps (__m256 a, __m256 b, __m256 c);
+ /// VFMSUBPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> MultiplySubtract(Vector256<float> a, Vector256<float> b, Vector256<float> c) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_fmsub_pd (__m256d a, __m256d b, __m256d c); VFMSUBPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_fmsub_pd (__m256d a, __m256d b, __m256d c);
+ /// VFMSUBPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> MultiplySubtract(Vector256<double> a, Vector256<double> b, Vector256<double> c) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_fmsub_ss (__m128 a, __m128 b, __m128 c); VFMSUBSS xmm, xmm, xmm/m32
+ /// __m128 _mm_fmsub_ss (__m128 a, __m128 b, __m128 c);
+ /// VFMSUBSS xmm, xmm, xmm/m32
/// </summary>
public static Vector128<float> MultiplySubtractScalar(Vector128<float> a, Vector128<float> b, Vector128<float> c) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_fmsub_sd (__m128d a, __m128d b, __m128d c); VFMSUBSD xmm, xmm, xmm/m64
+ /// __m128d _mm_fmsub_sd (__m128d a, __m128d b, __m128d c);
+ /// VFMSUBSD xmm, xmm, xmm/m64
/// </summary>
public static Vector128<double> MultiplySubtractScalar(Vector128<double> a, Vector128<double> b, Vector128<double> c) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_fmsubadd_ps (__m128 a, __m128 b, __m128 c); VFMSUBADDPS xmm, xmm, xmm/m128
+ /// __m128 _mm_fmsubadd_ps (__m128 a, __m128 b, __m128 c);
+ /// VFMSUBADDPS xmm, xmm, xmm/m128
/// </summary>
public static Vector128<float> MultiplySubtractAdd(Vector128<float> a, Vector128<float> b, Vector128<float> c) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_fmsubadd_pd (__m128d a, __m128d b, __m128d c); VFMSUBADDPD xmm, xmm, xmm/m128
+ /// __m128d _mm_fmsubadd_pd (__m128d a, __m128d b, __m128d c);
+ /// VFMSUBADDPD xmm, xmm, xmm/m128
/// </summary>
public static Vector128<double> MultiplySubtractAdd(Vector128<double> a, Vector128<double> b, Vector128<double> c) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_fmsubadd_ps (__m256 a, __m256 b, __m256 c); VFMSUBADDPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_fmsubadd_ps (__m256 a, __m256 b, __m256 c);
+ /// VFMSUBADDPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> MultiplySubtractAdd(Vector256<float> a, Vector256<float> b, Vector256<float> c) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_fmsubadd_pd (__m256d a, __m256d b, __m256d c); VFMSUBADDPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_fmsubadd_pd (__m256d a, __m256d b, __m256d c);
+ /// VFMSUBADDPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> MultiplySubtractAdd(Vector256<double> a, Vector256<double> b, Vector256<double> c) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_fnmadd_ps (__m128 a, __m128 b, __m128 c); VFNMADDPS xmm, xmm, xmm/m128
+ /// __m128 _mm_fnmadd_ps (__m128 a, __m128 b, __m128 c);
+ /// VFNMADDPS xmm, xmm, xmm/m128
/// </summary>
public static Vector128<float> MultiplyAddNegated(Vector128<float> a, Vector128<float> b, Vector128<float> c) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_fnmadd_pd (__m128d a, __m128d b, __m128d c); VFNMADDPD xmm, xmm, xmm/m128
+ /// __m128d _mm_fnmadd_pd (__m128d a, __m128d b, __m128d c);
+ /// VFNMADDPD xmm, xmm, xmm/m128
/// </summary>
public static Vector128<double> MultiplyAddNegated(Vector128<double> a, Vector128<double> b, Vector128<double> c) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_fnmadd_ps (__m256 a, __m256 b, __m256 c); VFNMADDPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_fnmadd_ps (__m256 a, __m256 b, __m256 c);
+ /// VFNMADDPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> MultiplyAddNegated(Vector256<float> a, Vector256<float> b, Vector256<float> c) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_fnmadd_pd (__m256d a, __m256d b, __m256d c); VFNMADDPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_fnmadd_pd (__m256d a, __m256d b, __m256d c);
+ /// VFNMADDPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> MultiplyAddNegated(Vector256<double> a, Vector256<double> b, Vector256<double> c) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_fnmadd_ss (__m128 a, __m128 b, __m128 c); VFNMADDSS xmm, xmm, xmm/m32
+ /// __m128 _mm_fnmadd_ss (__m128 a, __m128 b, __m128 c);
+ /// VFNMADDSS xmm, xmm, xmm/m32
/// </summary>
public static Vector128<float> MultiplyAddNegatedScalar(Vector128<float> a, Vector128<float> b, Vector128<float> c) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_fnmadd_sd (__m128d a, __m128d b, __m128d c); VFNMADDSD xmm, xmm, xmm/m64
+ /// __m128d _mm_fnmadd_sd (__m128d a, __m128d b, __m128d c);
+ /// VFNMADDSD xmm, xmm, xmm/m64
/// </summary>
public static Vector128<double> MultiplyAddNegatedScalar(Vector128<double> a, Vector128<double> b, Vector128<double> c) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_fnmsub_ps (__m128 a, __m128 b, __m128 c); VFNMSUBPS xmm, xmm, xmm/m128
+ /// __m128 _mm_fnmsub_ps (__m128 a, __m128 b, __m128 c);
+ /// VFNMSUBPS xmm, xmm, xmm/m128
/// </summary>
public static Vector128<float> MultiplySubtractNegated(Vector128<float> a, Vector128<float> b, Vector128<float> c) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_fnmsub_pd (__m128d a, __m128d b, __m128d c); VFNMSUBPD xmm, xmm, xmm/m128
+ /// __m128d _mm_fnmsub_pd (__m128d a, __m128d b, __m128d c);
+ /// VFNMSUBPD xmm, xmm, xmm/m128
/// </summary>
public static Vector128<double> MultiplySubtractNegated(Vector128<double> a, Vector128<double> b, Vector128<double> c) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256 _mm256_fnmsub_ps (__m256 a, __m256 b, __m256 c); VFNMSUBPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_fnmsub_ps (__m256 a, __m256 b, __m256 c);
+ /// VFNMSUBPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> MultiplySubtractNegated(Vector256<float> a, Vector256<float> b, Vector256<float> c) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m256d _mm256_fnmsub_pd (__m256d a, __m256d b, __m256d c); VFNMSUBPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_fnmsub_pd (__m256d a, __m256d b, __m256d c);
+ /// VFNMSUBPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> MultiplySubtractNegated(Vector256<double> a, Vector256<double> b, Vector256<double> c) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_fnmsub_ss (__m128 a, __m128 b, __m128 c); VFNMSUBSS xmm, xmm, xmm/m32
+ /// __m128 _mm_fnmsub_ss (__m128 a, __m128 b, __m128 c);
+ /// VFNMSUBSS xmm, xmm, xmm/m32
/// </summary>
public static Vector128<float> MultiplySubtractNegatedScalar(Vector128<float> a, Vector128<float> b, Vector128<float> c) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_fnmsub_sd (__m128d a, __m128d b, __m128d c); VFNMSUBSD xmm, xmm, xmm/m64
+ /// __m128d _mm_fnmsub_sd (__m128d a, __m128d b, __m128d c);
+ /// VFNMSUBSD xmm, xmm, xmm/m64
/// </summary>
public static Vector128<double> MultiplySubtractNegatedScalar(Vector128<double> a, Vector128<double> b, Vector128<double> c) { throw new PlatformNotSupportedException(); }
}
diff --git a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Fma.cs b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Fma.cs
index 12f377219a..1bbe7b8739 100644
--- a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Fma.cs
+++ b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Fma.cs
@@ -15,140 +15,172 @@ namespace System.Runtime.Intrinsics.X86
public static bool IsSupported { get => IsSupported; }
/// <summary>
- /// __m128 _mm_fmadd_ps (__m128 a, __m128 b, __m128 c); VFMADDPS xmm, xmm, xmm/m128
+ /// __m128 _mm_fmadd_ps (__m128 a, __m128 b, __m128 c);
+ /// VFMADDPS xmm, xmm, xmm/m128
/// </summary>
public static Vector128<float> MultiplyAdd(Vector128<float> a, Vector128<float> b, Vector128<float> c) => MultiplyAdd(a, b, c);
/// <summary>
- /// __m128d _mm_fmadd_pd (__m128d a, __m128d b, __m128d c); VFMADDPD xmm, xmm, xmm/m128
+ /// __m128d _mm_fmadd_pd (__m128d a, __m128d b, __m128d c);
+ /// VFMADDPD xmm, xmm, xmm/m128
/// </summary>
public static Vector128<double> MultiplyAdd(Vector128<double> a, Vector128<double> b, Vector128<double> c) => MultiplyAdd(a, b, c);
/// <summary>
- /// __m256 _mm256_fmadd_ps (__m256 a, __m256 b, __m256 c); VFMADDPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_fmadd_ps (__m256 a, __m256 b, __m256 c);
+ /// VFMADDPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> MultiplyAdd(Vector256<float> a, Vector256<float> b, Vector256<float> c) => MultiplyAdd(a, b, c);
/// <summary>
- /// __m256d _mm256_fmadd_pd (__m256d a, __m256d b, __m256d c); VFMADDPS ymm, ymm, ymm/m256
+ /// __m256d _mm256_fmadd_pd (__m256d a, __m256d b, __m256d c);
+ /// VFMADDPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> MultiplyAdd(Vector256<double> a, Vector256<double> b, Vector256<double> c) => MultiplyAdd(a, b, c);
/// <summary>
- /// __m128 _mm_fmadd_ss (__m128 a, __m128 b, __m128 c); VFMADDSS xmm, xmm, xmm/m32
+ /// __m128 _mm_fmadd_ss (__m128 a, __m128 b, __m128 c);
+ /// VFMADDSS xmm, xmm, xmm/m32
/// </summary>
public static Vector128<float> MultiplyAddScalar(Vector128<float> a, Vector128<float> b, Vector128<float> c) => MultiplyAddScalar(a, b, c);
/// <summary>
- /// __m128d _mm_fmadd_sd (__m128d a, __m128d b, __m128d c); VFMADDSS xmm, xmm, xmm/m64
+ /// __m128d _mm_fmadd_sd (__m128d a, __m128d b, __m128d c);
+ /// VFMADDSS xmm, xmm, xmm/m64
/// </summary>
public static Vector128<double> MultiplyAddScalar(Vector128<double> a, Vector128<double> b, Vector128<double> c) => MultiplyAddScalar(a, b, c);
/// <summary>
- /// __m128 _mm_fmaddsub_ps (__m128 a, __m128 b, __m128 c); VFMADDSUBPS xmm, xmm, xmm/m128
+ /// __m128 _mm_fmaddsub_ps (__m128 a, __m128 b, __m128 c);
+ /// VFMADDSUBPS xmm, xmm, xmm/m128
/// </summary>
public static Vector128<float> MultiplyAddSubtract(Vector128<float> a, Vector128<float> b, Vector128<float> c) => MultiplyAddSubtract(a, b, c);
/// <summary>
- /// __m128d _mm_fmaddsub_pd (__m128d a, __m128d b, __m128d c); VFMADDSUBPD xmm, xmm, xmm/m128
+ /// __m128d _mm_fmaddsub_pd (__m128d a, __m128d b, __m128d c);
+ /// VFMADDSUBPD xmm, xmm, xmm/m128
/// </summary>
public static Vector128<double> MultiplyAddSubtract(Vector128<double> a, Vector128<double> b, Vector128<double> c) => MultiplyAddSubtract(a, b, c);
/// <summary>
- /// __m256 _mm256_fmaddsub_ps (__m256 a, __m256 b, __m256 c); VFMADDSUBPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_fmaddsub_ps (__m256 a, __m256 b, __m256 c);
+ /// VFMADDSUBPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> MultiplyAddSubtract(Vector256<float> a, Vector256<float> b, Vector256<float> c) => MultiplyAddSubtract(a, b, c);
/// <summary>
- /// __m256d _mm256_fmaddsub_pd (__m256d a, __m256d b, __m256d c); VFMADDSUBPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_fmaddsub_pd (__m256d a, __m256d b, __m256d c);
+ /// VFMADDSUBPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> MultiplyAddSubtract(Vector256<double> a, Vector256<double> b, Vector256<double> c) => MultiplyAddSubtract(a, b, c);
/// <summary>
- /// __m128 _mm_fmsub_ps (__m128 a, __m128 b, __m128 c); VFMSUBPS xmm, xmm, xmm/m128
+ /// __m128 _mm_fmsub_ps (__m128 a, __m128 b, __m128 c);
+ /// VFMSUBPS xmm, xmm, xmm/m128
/// </summary>
public static Vector128<float> MultiplySubtract(Vector128<float> a, Vector128<float> b, Vector128<float> c) => MultiplySubtract(a, b, c);
/// <summary>
- /// __m128d _mm_fmsub_pd (__m128d a, __m128d b, __m128d c); VFMSUBPS xmm, xmm, xmm/m128
+ /// __m128d _mm_fmsub_pd (__m128d a, __m128d b, __m128d c);
+ /// VFMSUBPS xmm, xmm, xmm/m128
/// </summary>
public static Vector128<double> MultiplySubtract(Vector128<double> a, Vector128<double> b, Vector128<double> c) => MultiplySubtract(a, b, c);
/// <summary>
- /// __m256 _mm256_fmsub_ps (__m256 a, __m256 b, __m256 c); VFMSUBPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_fmsub_ps (__m256 a, __m256 b, __m256 c);
+ /// VFMSUBPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> MultiplySubtract(Vector256<float> a, Vector256<float> b, Vector256<float> c) => MultiplySubtract(a, b, c);
/// <summary>
- /// __m256d _mm256_fmsub_pd (__m256d a, __m256d b, __m256d c); VFMSUBPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_fmsub_pd (__m256d a, __m256d b, __m256d c);
+ /// VFMSUBPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> MultiplySubtract(Vector256<double> a, Vector256<double> b, Vector256<double> c) => MultiplySubtract(a, b, c);
/// <summary>
- /// __m128 _mm_fmsub_ss (__m128 a, __m128 b, __m128 c); VFMSUBSS xmm, xmm, xmm/m32
+ /// __m128 _mm_fmsub_ss (__m128 a, __m128 b, __m128 c);
+ /// VFMSUBSS xmm, xmm, xmm/m32
/// </summary>
public static Vector128<float> MultiplySubtractScalar(Vector128<float> a, Vector128<float> b, Vector128<float> c) => MultiplySubtractScalar(a, b, c);
/// <summary>
- /// __m128d _mm_fmsub_sd (__m128d a, __m128d b, __m128d c); VFMSUBSD xmm, xmm, xmm/m64
+ /// __m128d _mm_fmsub_sd (__m128d a, __m128d b, __m128d c);
+ /// VFMSUBSD xmm, xmm, xmm/m64
/// </summary>
public static Vector128<double> MultiplySubtractScalar(Vector128<double> a, Vector128<double> b, Vector128<double> c) => MultiplySubtractScalar(a, b, c);
/// <summary>
- /// __m128 _mm_fmsubadd_ps (__m128 a, __m128 b, __m128 c); VFMSUBADDPS xmm, xmm, xmm/m128
+ /// __m128 _mm_fmsubadd_ps (__m128 a, __m128 b, __m128 c);
+ /// VFMSUBADDPS xmm, xmm, xmm/m128
/// </summary>
public static Vector128<float> MultiplySubtractAdd(Vector128<float> a, Vector128<float> b, Vector128<float> c) => MultiplySubtractAdd(a, b, c);
/// <summary>
- /// __m128d _mm_fmsubadd_pd (__m128d a, __m128d b, __m128d c); VFMSUBADDPD xmm, xmm, xmm/m128
+ /// __m128d _mm_fmsubadd_pd (__m128d a, __m128d b, __m128d c);
+ /// VFMSUBADDPD xmm, xmm, xmm/m128
/// </summary>
public static Vector128<double> MultiplySubtractAdd(Vector128<double> a, Vector128<double> b, Vector128<double> c) => MultiplySubtractAdd(a, b, c);
/// <summary>
- /// __m256 _mm256_fmsubadd_ps (__m256 a, __m256 b, __m256 c); VFMSUBADDPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_fmsubadd_ps (__m256 a, __m256 b, __m256 c);
+ /// VFMSUBADDPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> MultiplySubtractAdd(Vector256<float> a, Vector256<float> b, Vector256<float> c) => MultiplySubtractAdd(a, b, c);
/// <summary>
- /// __m256d _mm256_fmsubadd_pd (__m256d a, __m256d b, __m256d c); VFMSUBADDPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_fmsubadd_pd (__m256d a, __m256d b, __m256d c);
+ /// VFMSUBADDPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> MultiplySubtractAdd(Vector256<double> a, Vector256<double> b, Vector256<double> c) => MultiplySubtractAdd(a, b, c);
/// <summary>
- /// __m128 _mm_fnmadd_ps (__m128 a, __m128 b, __m128 c); VFNMADDPS xmm, xmm, xmm/m128
+ /// __m128 _mm_fnmadd_ps (__m128 a, __m128 b, __m128 c);
+ /// VFNMADDPS xmm, xmm, xmm/m128
/// </summary>
public static Vector128<float> MultiplyAddNegated(Vector128<float> a, Vector128<float> b, Vector128<float> c) => MultiplyAddNegated(a, b, c);
/// <summary>
- /// __m128d _mm_fnmadd_pd (__m128d a, __m128d b, __m128d c); VFNMADDPD xmm, xmm, xmm/m128
+ /// __m128d _mm_fnmadd_pd (__m128d a, __m128d b, __m128d c);
+ /// VFNMADDPD xmm, xmm, xmm/m128
/// </summary>
public static Vector128<double> MultiplyAddNegated(Vector128<double> a, Vector128<double> b, Vector128<double> c) => MultiplyAddNegated(a, b, c);
/// <summary>
- /// __m256 _mm256_fnmadd_ps (__m256 a, __m256 b, __m256 c); VFNMADDPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_fnmadd_ps (__m256 a, __m256 b, __m256 c);
+ /// VFNMADDPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> MultiplyAddNegated(Vector256<float> a, Vector256<float> b, Vector256<float> c) => MultiplyAddNegated(a, b, c);
/// <summary>
- /// __m256d _mm256_fnmadd_pd (__m256d a, __m256d b, __m256d c); VFNMADDPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_fnmadd_pd (__m256d a, __m256d b, __m256d c);
+ /// VFNMADDPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> MultiplyAddNegated(Vector256<double> a, Vector256<double> b, Vector256<double> c) => MultiplyAddNegated(a, b, c);
/// <summary>
- /// __m128 _mm_fnmadd_ss (__m128 a, __m128 b, __m128 c); VFNMADDSS xmm, xmm, xmm/m32
+ /// __m128 _mm_fnmadd_ss (__m128 a, __m128 b, __m128 c);
+ /// VFNMADDSS xmm, xmm, xmm/m32
/// </summary>
public static Vector128<float> MultiplyAddNegatedScalar(Vector128<float> a, Vector128<float> b, Vector128<float> c) => MultiplyAddNegatedScalar(a, b, c);
/// <summary>
- /// __m128d _mm_fnmadd_sd (__m128d a, __m128d b, __m128d c); VFNMADDSD xmm, xmm, xmm/m64
+ /// __m128d _mm_fnmadd_sd (__m128d a, __m128d b, __m128d c);
+ /// VFNMADDSD xmm, xmm, xmm/m64
/// </summary>
public static Vector128<double> MultiplyAddNegatedScalar(Vector128<double> a, Vector128<double> b, Vector128<double> c) => MultiplyAddNegatedScalar(a, b, c);
/// <summary>
- /// __m128 _mm_fnmsub_ps (__m128 a, __m128 b, __m128 c); VFNMSUBPS xmm, xmm, xmm/m128
+ /// __m128 _mm_fnmsub_ps (__m128 a, __m128 b, __m128 c);
+ /// VFNMSUBPS xmm, xmm, xmm/m128
/// </summary>
public static Vector128<float> MultiplySubtractNegated(Vector128<float> a, Vector128<float> b, Vector128<float> c) => MultiplySubtractNegated(a, b, c);
/// <summary>
- /// __m128d _mm_fnmsub_pd (__m128d a, __m128d b, __m128d c); VFNMSUBPD xmm, xmm, xmm/m128
+ /// __m128d _mm_fnmsub_pd (__m128d a, __m128d b, __m128d c);
+ /// VFNMSUBPD xmm, xmm, xmm/m128
/// </summary>
public static Vector128<double> MultiplySubtractNegated(Vector128<double> a, Vector128<double> b, Vector128<double> c) => MultiplySubtractNegated(a, b, c);
/// <summary>
- /// __m256 _mm256_fnmsub_ps (__m256 a, __m256 b, __m256 c); VFNMSUBPS ymm, ymm, ymm/m256
+ /// __m256 _mm256_fnmsub_ps (__m256 a, __m256 b, __m256 c);
+ /// VFNMSUBPS ymm, ymm, ymm/m256
/// </summary>
public static Vector256<float> MultiplySubtractNegated(Vector256<float> a, Vector256<float> b, Vector256<float> c) => MultiplySubtractNegated(a, b, c);
/// <summary>
- /// __m256d _mm256_fnmsub_pd (__m256d a, __m256d b, __m256d c); VFNMSUBPD ymm, ymm, ymm/m256
+ /// __m256d _mm256_fnmsub_pd (__m256d a, __m256d b, __m256d c);
+ /// VFNMSUBPD ymm, ymm, ymm/m256
/// </summary>
public static Vector256<double> MultiplySubtractNegated(Vector256<double> a, Vector256<double> b, Vector256<double> c) => MultiplySubtractNegated(a, b, c);
/// <summary>
- /// __m128 _mm_fnmsub_ss (__m128 a, __m128 b, __m128 c); VFNMSUBSS xmm, xmm, xmm/m32
+ /// __m128 _mm_fnmsub_ss (__m128 a, __m128 b, __m128 c);
+ /// VFNMSUBSS xmm, xmm, xmm/m32
/// </summary>
public static Vector128<float> MultiplySubtractNegatedScalar(Vector128<float> a, Vector128<float> b, Vector128<float> c) => MultiplySubtractNegatedScalar(a, b, c);
/// <summary>
- /// __m128d _mm_fnmsub_sd (__m128d a, __m128d b, __m128d c); VFNMSUBSD xmm, xmm, xmm/m64
+ /// __m128d _mm_fnmsub_sd (__m128d a, __m128d b, __m128d c);
+ /// VFNMSUBSD xmm, xmm, xmm/m64
/// </summary>
public static Vector128<double> MultiplySubtractNegatedScalar(Vector128<double> a, Vector128<double> b, Vector128<double> c) => MultiplySubtractNegatedScalar(a, b, c);
}
diff --git a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse.PlatformNotSupported.cs b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse.PlatformNotSupported.cs
index ddf7c916b3..fedd7dbbf2 100644
--- a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse.PlatformNotSupported.cs
+++ b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse.PlatformNotSupported.cs
@@ -16,355 +16,426 @@ namespace System.Runtime.Intrinsics.X86
public static bool IsSupported { get { return false; } }
/// <summary>
- /// __m128 _mm_add_ps (__m128 a, __m128 b); ADDPS xmm, xmm/m128
+ /// __m128 _mm_add_ps (__m128 a, __m128 b);
+ /// ADDPS xmm, xmm/m128
/// </summary>
public static Vector128<float> Add(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_add_ss (__m128 a, __m128 b); ADDSS xmm, xmm/m32
+ /// __m128 _mm_add_ss (__m128 a, __m128 b);
+ /// ADDSS xmm, xmm/m32
/// </summary>
public static Vector128<float> AddScalar(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_and_ps (__m128 a, __m128 b); ANDPS xmm, xmm/m128
+ /// __m128 _mm_and_ps (__m128 a, __m128 b);
+ /// ANDPS xmm, xmm/m128
/// </summary>
public static Vector128<float> And(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_andnot_ps (__m128 a, __m128 b); ANDNPS xmm, xmm/m128
+ /// __m128 _mm_andnot_ps (__m128 a, __m128 b);
+ /// ANDNPS xmm, xmm/m128
/// </summary>
public static Vector128<float> AndNot(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_cmpeq_ps (__m128 a, __m128 b); CMPPS xmm, xmm/m128, imm8(0)
+ /// __m128 _mm_cmpeq_ps (__m128 a, __m128 b);
+ /// CMPPS xmm, xmm/m128, imm8(0)
/// </summary>
public static Vector128<float> CompareEqual(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_comieq_ss (__m128 a, __m128 b); COMISS xmm, xmm/m32
+ /// int _mm_comieq_ss (__m128 a, __m128 b);
+ /// COMISS xmm, xmm/m32
/// </summary>
public static bool CompareEqualOrderedScalar(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_ucomieq_ss (__m128 a, __m128 b); UCOMISS xmm, xmm/m32
+ /// int _mm_ucomieq_ss (__m128 a, __m128 b);
+ /// UCOMISS xmm, xmm/m32
/// </summary>
public static bool CompareEqualUnorderedScalar(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_cmpeq_ss (__m128 a, __m128 b); CMPSS xmm, xmm/m32, imm8(0)
+ /// __m128 _mm_cmpeq_ss (__m128 a, __m128 b);
+ /// CMPSS xmm, xmm/m32, imm8(0)
/// </summary>
public static Vector128<float> CompareEqualScalar(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_cmpgt_ps (__m128 a, __m128 b); CMPPS xmm, xmm/m128, imm8(6)
+ /// __m128 _mm_cmpgt_ps (__m128 a, __m128 b);
+ /// CMPPS xmm, xmm/m128, imm8(6)
/// </summary>
public static Vector128<float> CompareGreaterThan(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_comigt_ss (__m128 a, __m128 b); COMISS xmm, xmm/m32
+ /// int _mm_comigt_ss (__m128 a, __m128 b);
+ /// COMISS xmm, xmm/m32
/// </summary>
public static bool CompareGreaterThanOrderedScalar(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_ucomigt_ss (__m128 a, __m128 b); UCOMISS xmm, xmm/m32
+ /// int _mm_ucomigt_ss (__m128 a, __m128 b);
+ /// UCOMISS xmm, xmm/m32
/// </summary>
public static bool CompareGreaterThanUnorderedScalar(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_cmpgt_ss (__m128 a, __m128 b); CMPSS xmm, xmm/m32, imm8(6)
+ /// __m128 _mm_cmpgt_ss (__m128 a, __m128 b);
+ /// CMPSS xmm, xmm/m32, imm8(6)
/// </summary>
public static Vector128<float> CompareGreaterThanScalar(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_cmpge_ps (__m128 a, __m128 b); CMPPS xmm, xmm/m128, imm8(5)
+ /// __m128 _mm_cmpge_ps (__m128 a, __m128 b);
+ /// CMPPS xmm, xmm/m128, imm8(5)
/// </summary>
public static Vector128<float> CompareGreaterThanOrEqual(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_comige_ss (__m128 a, __m128 b); COMISS xmm, xmm/m32
+ /// int _mm_comige_ss (__m128 a, __m128 b);
+ /// COMISS xmm, xmm/m32
/// </summary>
public static bool CompareGreaterThanOrEqualOrderedScalar(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_ucomige_ss (__m128 a, __m128 b); UCOMISS xmm, xmm/m32
+ /// int _mm_ucomige_ss (__m128 a, __m128 b);
+ /// UCOMISS xmm, xmm/m32
/// </summary>
public static bool CompareGreaterThanOrEqualUnorderedScalar(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_cmpge_ss (__m128 a, __m128 b); CMPPS xmm, xmm/m32, imm8(5)
+ /// __m128 _mm_cmpge_ss (__m128 a, __m128 b);
+ /// CMPPS xmm, xmm/m32, imm8(5)
/// </summary>
public static Vector128<float> CompareGreaterThanOrEqualScalar(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_cmplt_ps (__m128 a, __m128 b); CMPPS xmm, xmm/m128, imm8(1)
+ /// __m128 _mm_cmplt_ps (__m128 a, __m128 b);
+ /// CMPPS xmm, xmm/m128, imm8(1)
/// </summary>
public static Vector128<float> CompareLessThan(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_comilt_ss (__m128 a, __m128 b); COMISS xmm, xmm/m32
+ /// int _mm_comilt_ss (__m128 a, __m128 b);
+ /// COMISS xmm, xmm/m32
/// </summary>
public static bool CompareLessThanOrderedScalar(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_ucomilt_ss (__m128 a, __m128 b); UCOMISS xmm, xmm/m32
+ /// int _mm_ucomilt_ss (__m128 a, __m128 b);
+ /// UCOMISS xmm, xmm/m32
/// </summary>
public static bool CompareLessThanUnorderedScalar(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_cmplt_ss (__m128 a, __m128 b); CMPSS xmm, xmm/m32, imm8(1)
+ /// __m128 _mm_cmplt_ss (__m128 a, __m128 b);
+ /// CMPSS xmm, xmm/m32, imm8(1)
/// </summary>
public static Vector128<float> CompareLessThanScalar(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_cmple_ps (__m128 a, __m128 b); CMPPS xmm, xmm/m128, imm8(2)
+ /// __m128 _mm_cmple_ps (__m128 a, __m128 b);
+ /// CMPPS xmm, xmm/m128, imm8(2)
/// </summary>
public static Vector128<float> CompareLessThanOrEqual(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_comile_ss (__m128 a, __m128 b); COMISS xmm, xmm/m32
+ /// int _mm_comile_ss (__m128 a, __m128 b);
+ /// COMISS xmm, xmm/m32
/// </summary>
public static bool CompareLessThanOrEqualOrderedScalar(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_ucomile_ss (__m128 a, __m128 b); UCOMISS xmm, xmm/m32
+ /// int _mm_ucomile_ss (__m128 a, __m128 b);
+ /// UCOMISS xmm, xmm/m32
/// </summary>
public static bool CompareLessThanOrEqualUnorderedScalar(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_cmple_ss (__m128 a, __m128 b); CMPSS xmm, xmm/m32, imm8(2)
+ /// __m128 _mm_cmple_ss (__m128 a, __m128 b);
+ /// CMPSS xmm, xmm/m32, imm8(2)
/// </summary>
public static Vector128<float> CompareLessThanOrEqualScalar(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_cmpneq_ps (__m128 a, __m128 b); CMPPS xmm, xmm/m128, imm8(4)
+ /// __m128 _mm_cmpneq_ps (__m128 a, __m128 b);
+ /// CMPPS xmm, xmm/m128, imm8(4)
/// </summary>
public static Vector128<float> CompareNotEqual(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_comineq_ss (__m128 a, __m128 b); COMISS xmm, xmm/m32
+ /// int _mm_comineq_ss (__m128 a, __m128 b);
+ /// COMISS xmm, xmm/m32
/// </summary>
public static bool CompareNotEqualOrderedScalar(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_ucomineq_ss (__m128 a, __m128 b); UCOMISS xmm, xmm/m32
+ /// int _mm_ucomineq_ss (__m128 a, __m128 b);
+ /// UCOMISS xmm, xmm/m32
/// </summary>
public static bool CompareNotEqualUnorderedScalar(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_cmpneq_ss (__m128 a, __m128 b); CMPSS xmm, xmm/m32, imm8(4)
+ /// __m128 _mm_cmpneq_ss (__m128 a, __m128 b);
+ /// CMPSS xmm, xmm/m32, imm8(4)
/// </summary>
public static Vector128<float> CompareNotEqualScalar(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_cmpngt_ps (__m128 a, __m128 b); CMPPS xmm, xmm/m128, imm8(2)
+ /// __m128 _mm_cmpngt_ps (__m128 a, __m128 b);
+ /// CMPPS xmm, xmm/m128, imm8(2)
/// </summary>
public static Vector128<float> CompareNotGreaterThan(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_cmpngt_ss (__m128 a, __m128 b); CMPSS xmm, xmm/m32, imm8(2)
+ /// __m128 _mm_cmpngt_ss (__m128 a, __m128 b);
+ /// CMPSS xmm, xmm/m32, imm8(2)
/// </summary>
public static Vector128<float> CompareNotGreaterThanScalar(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_cmpnge_ps (__m128 a, __m128 b); CMPPS xmm, xmm/m128, imm8(1)
+ /// __m128 _mm_cmpnge_ps (__m128 a, __m128 b);
+ /// CMPPS xmm, xmm/m128, imm8(1)
/// </summary>
public static Vector128<float> CompareNotGreaterThanOrEqual(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_cmpnge_ss (__m128 a, __m128 b); CMPSS xmm, xmm/m32, imm8(1)
+ /// __m128 _mm_cmpnge_ss (__m128 a, __m128 b);
+ /// CMPSS xmm, xmm/m32, imm8(1)
/// </summary>
public static Vector128<float> CompareNotGreaterThanOrEqualScalar(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_cmpnlt_ps (__m128 a, __m128 b); CMPPS xmm, xmm/m128, imm8(5)
+ /// __m128 _mm_cmpnlt_ps (__m128 a, __m128 b);
+ /// CMPPS xmm, xmm/m128, imm8(5)
/// </summary>
public static Vector128<float> CompareNotLessThan(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_cmpnlt_ss (__m128 a, __m128 b); CMPSS xmm, xmm/m32, imm8(5)
+ /// __m128 _mm_cmpnlt_ss (__m128 a, __m128 b);
+ /// CMPSS xmm, xmm/m32, imm8(5)
/// </summary>
public static Vector128<float> CompareNotLessThanScalar(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_cmpnle_ps (__m128 a, __m128 b); CMPPS xmm, xmm/m128, imm8(6)
+ /// __m128 _mm_cmpnle_ps (__m128 a, __m128 b);
+ /// CMPPS xmm, xmm/m128, imm8(6)
/// </summary>
public static Vector128<float> CompareNotLessThanOrEqual(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_cmpnle_ss (__m128 a, __m128 b); CMPSS xmm, xmm/m32, imm8(6)
+ /// __m128 _mm_cmpnle_ss (__m128 a, __m128 b);
+ /// CMPSS xmm, xmm/m32, imm8(6)
/// </summary>
public static Vector128<float> CompareNotLessThanOrEqualScalar(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_cmpord_ps (__m128 a, __m128 b); CMPPS xmm, xmm/m128, imm8(7)
+ /// __m128 _mm_cmpord_ps (__m128 a, __m128 b);
+ /// CMPPS xmm, xmm/m128, imm8(7)
/// </summary>
public static Vector128<float> CompareOrdered(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_cmpord_ss (__m128 a, __m128 b); CMPSS xmm, xmm/m32, imm8(7)
+ /// __m128 _mm_cmpord_ss (__m128 a, __m128 b);
+ /// CMPSS xmm, xmm/m32, imm8(7)
/// </summary>
public static Vector128<float> CompareOrderedScalar(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_cmpunord_ps (__m128 a, __m128 b); CMPPS xmm, xmm/m128, imm8(3)
+ /// __m128 _mm_cmpunord_ps (__m128 a, __m128 b);
+ /// CMPPS xmm, xmm/m128, imm8(3)
/// </summary>
public static Vector128<float> CompareUnordered(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_cmpunord_ss (__m128 a, __m128 b); CMPSS xmm, xmm/m32, imm8(3)
+ /// __m128 _mm_cmpunord_ss (__m128 a, __m128 b);
+ /// CMPSS xmm, xmm/m32, imm8(3)
/// </summary>
public static Vector128<float> CompareUnorderedScalar(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_cvtss_si32 (__m128 a); CVTSS2SI r32, xmm/m32
+ /// int _mm_cvtss_si32 (__m128 a);
+ /// CVTSS2SI r32, xmm/m32
/// </summary>
public static int ConvertToInt32(Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __int64 _mm_cvtss_si64 (__m128 a); CVTSS2SI r64, xmm/m32
+ /// __int64 _mm_cvtss_si64 (__m128 a);
+ /// CVTSS2SI r64, xmm/m32
/// </summary>
public static long ConvertToInt64(Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// float _mm_cvtss_f32 (__m128 a); HELPER: MOVSS
+ /// float _mm_cvtss_f32 (__m128 a);
+ /// HELPER: MOVSS
/// </summary>
public static float ConvertToSingle(Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_cvtsi32_ss (__m128 a, int b); CVTSI2SS xmm, reg/m32
+ /// __m128 _mm_cvtsi32_ss (__m128 a, int b);
+ /// CVTSI2SS xmm, reg/m32
/// </summary>
public static Vector128<float> ConvertToVector128SingleScalar(Vector128<float> upper, int value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_cvtsi64_ss (__m128 a, __int64 b); CVTSI2SS xmm, reg/m64
+ /// __m128 _mm_cvtsi64_ss (__m128 a, __int64 b);
+ /// CVTSI2SS xmm, reg/m64
/// </summary>
public static Vector128<float> ConvertToVector128SingleScalar(Vector128<float> upper, long value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_cvttss_si32 (__m128 a); CVTTSS2SI r32, xmm/m32
+ /// int _mm_cvttss_si32 (__m128 a);
+ /// CVTTSS2SI r32, xmm/m32
/// </summary>
public static int ConvertToInt32WithTruncation(Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __int64 _mm_cvttss_si64 (__m128 a); CVTTSS2SI r64, xmm/m32
+ /// __int64 _mm_cvttss_si64 (__m128 a);
+ /// CVTTSS2SI r64, xmm/m32
/// </summary>
public static long ConvertToInt64WithTruncation(Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_div_ps (__m128 a, __m128 b); DIVPS xmm, xmm/m128
+ /// __m128 _mm_div_ps (__m128 a, __m128 b);
+ /// DIVPS xmm, xmm/m128
/// </summary>
public static Vector128<float> Divide(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_div_ss (__m128 a, __m128 b); DIVSS xmm, xmm/m32
+ /// __m128 _mm_div_ss (__m128 a, __m128 b);
+ /// DIVSS xmm, xmm/m32
/// </summary>
public static Vector128<float> DivideScalar(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_loadu_ps (float const* mem_address); MOVUPS xmm, m128
+ /// __m128 _mm_loadu_ps (float const* mem_address);
+ /// MOVUPS xmm, m128
/// </summary>
public static unsafe Vector128<float> LoadVector128(float* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_load_ss (float const* mem_address); MOVSS xmm, m32
+ /// __m128 _mm_load_ss (float const* mem_address);
+ /// MOVSS xmm, m32
/// </summary>
public static unsafe Vector128<float> LoadScalarVector128(float* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_load_ps (float const* mem_address); MOVAPS xmm, m128
+ /// __m128 _mm_load_ps (float const* mem_address);
+ /// MOVAPS xmm, m128
/// </summary>
public static unsafe Vector128<float> LoadAlignedVector128(float* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_loadh_pi (__m128 a, __m64 const* mem_addr); MOVHPS xmm, m64
+ /// __m128 _mm_loadh_pi (__m128 a, __m64 const* mem_addr);
+ /// MOVHPS xmm, m64
/// </summary>
public static unsafe Vector128<float> LoadHigh(Vector128<float> lower, float* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_loadl_pi (__m128 a, __m64 const* mem_addr); MOVLPS xmm, m64
+ /// __m128 _mm_loadl_pi (__m128 a, __m64 const* mem_addr);
+ /// MOVLPS xmm, m64
/// </summary>
public static unsafe Vector128<float> LoadLow(Vector128<float> upper, float* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_max_ps (__m128 a, __m128 b); MAXPS xmm, xmm/m128
+ /// __m128 _mm_max_ps (__m128 a, __m128 b);
+ /// MAXPS xmm, xmm/m128
/// </summary>
public static Vector128<float> Max(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_max_ss (__m128 a, __m128 b); MAXSS xmm, xmm/m32
+ /// __m128 _mm_max_ss (__m128 a, __m128 b);
+ /// MAXSS xmm, xmm/m32
/// </summary>
public static Vector128<float> MaxScalar(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_min_ps (__m128 a, __m128 b); MINPS xmm, xmm/m128
+ /// __m128 _mm_min_ps (__m128 a, __m128 b);
+ /// MINPS xmm, xmm/m128
/// </summary>
public static Vector128<float> Min(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_min_ss (__m128 a, __m128 b); MINSS xmm, xmm/m32
+ /// __m128 _mm_min_ss (__m128 a, __m128 b);
+ /// MINSS xmm, xmm/m32
/// </summary>
public static Vector128<float> MinScalar(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_move_ss (__m128 a, __m128 b); MOVSS xmm, xmm
+ /// __m128 _mm_move_ss (__m128 a, __m128 b);
+ /// MOVSS xmm, xmm
/// </summary>
public static Vector128<float> MoveScalar(Vector128<float> upper, Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_movehl_ps (__m128 a, __m128 b); MOVHLPS xmm, xmm
+ /// __m128 _mm_movehl_ps (__m128 a, __m128 b);
+ /// MOVHLPS xmm, xmm
/// </summary>
public static Vector128<float> MoveHighToLow(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_movelh_ps (__m128 a, __m128 b); MOVLHPS xmm, xmm
+ /// __m128 _mm_movelh_ps (__m128 a, __m128 b);
+ /// MOVLHPS xmm, xmm
/// </summary>
public static Vector128<float> MoveLowToHigh(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_movemask_ps (__m128 a); MOVMSKPS reg, xmm
+ /// int _mm_movemask_ps (__m128 a);
+ /// MOVMSKPS reg, xmm
/// </summary>
public static int MoveMask(Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_mul_ps (__m128 a, __m128 b); MULPS xmm, xmm/m128
+ /// __m128 _mm_mul_ps (__m128 a, __m128 b);
+ /// MULPS xmm, xmm/m128
/// </summary>
public static Vector128<float> Multiply(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_mul_ss (__m128 a, __m128 b); MULPS xmm, xmm/m32
+ /// __m128 _mm_mul_ss (__m128 a, __m128 b);
+ /// MULPS xmm, xmm/m32
/// </summary>
public static Vector128<float> MultiplyScalar(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_or_ps (__m128 a, __m128 b); ORPS xmm, xmm/m128
+ /// __m128 _mm_or_ps (__m128 a, __m128 b);
+ /// ORPS xmm, xmm/m128
/// </summary>
public static Vector128<float> Or(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_rcp_ps (__m128 a); RCPPS xmm, xmm/m128
+ /// __m128 _mm_rcp_ps (__m128 a);
+ /// RCPPS xmm, xmm/m128
/// </summary>
public static Vector128<float> Reciprocal(Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_rcp_ss (__m128 a); RCPSS xmm, xmm/m32
+ /// __m128 _mm_rcp_ss (__m128 a);
+ /// RCPSS xmm, xmm/m32
/// </summary>
public static Vector128<float> ReciprocalScalar(Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_rcp_ss (__m128 a, __m128 b); RCPSS xmm, xmm/m32
+ /// __m128 _mm_rcp_ss (__m128 a, __m128 b);
+ /// RCPSS xmm, xmm/m32
/// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
/// </summary>
public static Vector128<float> ReciprocalScalar(Vector128<float> upper, Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_rsqrt_ps (__m128 a); RSQRTPS xmm, xmm/m128
+ /// __m128 _mm_rsqrt_ps (__m128 a);
+ /// RSQRTPS xmm, xmm/m128
/// </summary>
public static Vector128<float> ReciprocalSqrt(Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_rsqrt_ss (__m128 a); RSQRTSS xmm, xmm/m32
+ /// __m128 _mm_rsqrt_ss (__m128 a);
+ /// RSQRTSS xmm, xmm/m32
/// </summary>
public static Vector128<float> ReciprocalSqrtScalar(Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_rsqrt_ss (__m128 a, __m128 b); RSQRTSS xmm, xmm/m32
+ /// __m128 _mm_rsqrt_ss (__m128 a, __m128 b);
+ /// RSQRTSS xmm, xmm/m32
/// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
/// </summary>
public static Vector128<float> ReciprocalSqrtScalar(Vector128<float> upper, Vector128<float> value) { throw new PlatformNotSupportedException(); }
@@ -375,104 +446,128 @@ namespace System.Runtime.Intrinsics.X86
public static Vector128<float> SetVector128(float e3, float e2, float e1, float e0) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_set_ss (float a); HELPER
+ /// __m128 _mm_set_ss (float a);
+ /// HELPER
/// </summary>
public static Vector128<float> SetScalarVector128(float value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_set1_ps (float a); HELPER
+ /// __m128 _mm_set1_ps (float a);
+ /// HELPER
/// </summary>
public static Vector128<float> SetAllVector128(float value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_setzero_ps (void); HELPER - XORPS
+ /// __m128d _mm_setzero_ps (void);
+ /// HELPER - XORPS
/// </summary>
public static Vector128<float> SetZeroVector128() { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_castpd_ps (__m128d a); HELPER - No Codegen
- /// __m128i _mm_castpd_si128 (__m128d a); HELPER - No Codegen
- /// __m128d _mm_castps_pd (__m128 a); HELPER - No Codegen
- /// __m128i _mm_castps_si128 (__m128 a); HELPER - No Codegen
- /// __m128d _mm_castsi128_pd (__m128i a); HELPER - No Codegen
- /// __m128 _mm_castsi128_ps (__m128i a); HELPER - No Codegen
+ /// __m128 _mm_castpd_ps (__m128d a);
+ /// HELPER - No Codegen
+ /// __m128i _mm_castpd_si128 (__m128d a);
+ /// HELPER - No Codegen
+ /// __m128d _mm_castps_pd (__m128 a);
+ /// HELPER - No Codegen
+ /// __m128i _mm_castps_si128 (__m128 a);
+ /// HELPER - No Codegen
+ /// __m128d _mm_castsi128_pd (__m128i a);
+ /// HELPER - No Codegen
+ /// __m128 _mm_castsi128_ps (__m128i a);
+ /// HELPER - No Codegen
/// </summary>
public static Vector128<U> StaticCast<T, U>(Vector128<T> value) where T : struct where U : struct { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_shuffle_ps (__m128 a, __m128 b, unsigned int control); SHUFPS xmm, xmm/m128, imm8
+ /// __m128 _mm_shuffle_ps (__m128 a, __m128 b, unsigned int control);
+ /// SHUFPS xmm, xmm/m128, imm8
/// </summary>
public static Vector128<float> Shuffle(Vector128<float> left, Vector128<float> right, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_sqrt_ps (__m128 a); SQRTPS xmm, xmm/m128
+ /// __m128 _mm_sqrt_ps (__m128 a);
+ /// SQRTPS xmm, xmm/m128
/// </summary>
public static Vector128<float> Sqrt(Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_sqrt_ss (__m128 a); SQRTSS xmm, xmm/m32
+ /// __m128 _mm_sqrt_ss (__m128 a);
+ /// SQRTSS xmm, xmm/m32
/// </summary>
public static Vector128<float> SqrtScalar(Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_sqrt_ss (__m128 a, __m128 b); SQRTSS xmm, xmm/m32
+ /// __m128 _mm_sqrt_ss (__m128 a, __m128 b);
+ /// SQRTSS xmm, xmm/m32
/// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
/// </summary>
public static Vector128<float> SqrtScalar(Vector128<float> upper, Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_store_ps (float* mem_addr, __m128 a); MOVAPS m128, xmm
+ /// void _mm_store_ps (float* mem_addr, __m128 a);
+ /// MOVAPS m128, xmm
/// </summary>
public static unsafe void StoreAligned(float* address, Vector128<float> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_stream_ps (float* mem_addr, __m128 a); MOVNTPS m128, xmm
+ /// void _mm_stream_ps (float* mem_addr, __m128 a);
+ /// MOVNTPS m128, xmm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(float* address, Vector128<float> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_storeu_ps (float* mem_addr, __m128 a); MOVUPS m128, xmm
+ /// void _mm_storeu_ps (float* mem_addr, __m128 a);
+ /// MOVUPS m128, xmm
/// </summary>
public static unsafe void Store(float* address, Vector128<float> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_store_ss (float* mem_addr, __m128 a); MOVSS m32, xmm
+ /// void _mm_store_ss (float* mem_addr, __m128 a);
+ /// MOVSS m32, xmm
/// </summary>
public static unsafe void StoreScalar(float* address, Vector128<float> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_storeh_pi (__m64* mem_addr, __m128 a); MOVHPS m64, xmm
+ /// void _mm_storeh_pi (__m64* mem_addr, __m128 a);
+ /// MOVHPS m64, xmm
/// </summary>
public static unsafe void StoreHigh(float* address, Vector128<float> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_storel_pi (__m64* mem_addr, __m128 a); MOVLPS m64, xmm
+ /// void _mm_storel_pi (__m64* mem_addr, __m128 a);
+ /// MOVLPS m64, xmm
/// </summary>
public static unsafe void StoreLow(float* address, Vector128<float> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_sub_ps (__m128d a, __m128d b); SUBPS xmm, xmm/m128
+ /// __m128d _mm_sub_ps (__m128d a, __m128d b);
+ /// SUBPS xmm, xmm/m128
/// </summary>
public static Vector128<float> Subtract(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_sub_ss (__m128 a, __m128 b); SUBSS xmm, xmm/m32
+ /// __m128 _mm_sub_ss (__m128 a, __m128 b);
+ /// SUBSS xmm, xmm/m32
/// </summary>
public static Vector128<float> SubtractScalar(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_unpackhi_ps (__m128 a, __m128 b); UNPCKHPS xmm, xmm/m128
+ /// __m128 _mm_unpackhi_ps (__m128 a, __m128 b);
+ /// UNPCKHPS xmm, xmm/m128
/// </summary>
public static Vector128<float> UnpackHigh(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_unpacklo_ps (__m128 a, __m128 b); UNPCKLPS xmm, xmm/m128
+ /// __m128 _mm_unpacklo_ps (__m128 a, __m128 b);
+ /// UNPCKLPS xmm, xmm/m128
/// </summary>
public static Vector128<float> UnpackLow(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_xor_ps (__m128 a, __m128 b); XORPS xmm, xmm/m128
+ /// __m128 _mm_xor_ps (__m128 a, __m128 b);
+ /// XORPS xmm, xmm/m128
/// </summary>
public static Vector128<float> Xor(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
}
diff --git a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse.cs b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse.cs
index 4084d09397..b6c893c9aa 100644
--- a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse.cs
+++ b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse.cs
@@ -16,355 +16,426 @@ namespace System.Runtime.Intrinsics.X86
public static bool IsSupported { get => IsSupported; }
/// <summary>
- /// __m128 _mm_add_ps (__m128 a, __m128 b); ADDPS xmm, xmm/m128
+ /// __m128 _mm_add_ps (__m128 a, __m128 b);
+ /// ADDPS xmm, xmm/m128
/// </summary>
public static Vector128<float> Add(Vector128<float> left, Vector128<float> right) => Add(left, right);
/// <summary>
- /// __m128 _mm_add_ss (__m128 a, __m128 b); ADDSS xmm, xmm/m32
+ /// __m128 _mm_add_ss (__m128 a, __m128 b);
+ /// ADDSS xmm, xmm/m32
/// </summary>
public static Vector128<float> AddScalar(Vector128<float> left, Vector128<float> right) => AddScalar(left, right);
/// <summary>
- /// __m128 _mm_and_ps (__m128 a, __m128 b); ANDPS xmm, xmm/m128
+ /// __m128 _mm_and_ps (__m128 a, __m128 b);
+ /// ANDPS xmm, xmm/m128
/// </summary>
public static Vector128<float> And(Vector128<float> left, Vector128<float> right) => And(left, right);
/// <summary>
- /// __m128 _mm_andnot_ps (__m128 a, __m128 b); ANDNPS xmm, xmm/m128
+ /// __m128 _mm_andnot_ps (__m128 a, __m128 b);
+ /// ANDNPS xmm, xmm/m128
/// </summary>
public static Vector128<float> AndNot(Vector128<float> left, Vector128<float> right) => AndNot(left, right);
/// <summary>
- /// __m128 _mm_cmpeq_ps (__m128 a, __m128 b); CMPPS xmm, xmm/m128, imm8(0)
+ /// __m128 _mm_cmpeq_ps (__m128 a, __m128 b);
+ /// CMPPS xmm, xmm/m128, imm8(0)
/// </summary>
public static Vector128<float> CompareEqual(Vector128<float> left, Vector128<float> right) => CompareEqual(left, right);
/// <summary>
- /// int _mm_comieq_ss (__m128 a, __m128 b); COMISS xmm, xmm/m32
+ /// int _mm_comieq_ss (__m128 a, __m128 b);
+ /// COMISS xmm, xmm/m32
/// </summary>
public static bool CompareEqualOrderedScalar(Vector128<float> left, Vector128<float> right) => CompareEqualOrderedScalar(left, right);
/// <summary>
- /// int _mm_ucomieq_ss (__m128 a, __m128 b); UCOMISS xmm, xmm/m32
+ /// int _mm_ucomieq_ss (__m128 a, __m128 b);
+ /// UCOMISS xmm, xmm/m32
/// </summary>
public static bool CompareEqualUnorderedScalar(Vector128<float> left, Vector128<float> right) => CompareEqualUnorderedScalar(left, right);
/// <summary>
- /// __m128 _mm_cmpeq_ss (__m128 a, __m128 b); CMPSS xmm, xmm/m32, imm8(0)
+ /// __m128 _mm_cmpeq_ss (__m128 a, __m128 b);
+ /// CMPSS xmm, xmm/m32, imm8(0)
/// </summary>
public static Vector128<float> CompareEqualScalar(Vector128<float> left, Vector128<float> right) => CompareEqualScalar(left, right);
/// <summary>
- /// __m128 _mm_cmpgt_ps (__m128 a, __m128 b); CMPPS xmm, xmm/m128, imm8(6)
+ /// __m128 _mm_cmpgt_ps (__m128 a, __m128 b);
+ /// CMPPS xmm, xmm/m128, imm8(6)
/// </summary>
public static Vector128<float> CompareGreaterThan(Vector128<float> left, Vector128<float> right) => CompareGreaterThan(left, right);
/// <summary>
- /// int _mm_comigt_ss (__m128 a, __m128 b); COMISS xmm, xmm/m32
+ /// int _mm_comigt_ss (__m128 a, __m128 b);
+ /// COMISS xmm, xmm/m32
/// </summary>
public static bool CompareGreaterThanOrderedScalar(Vector128<float> left, Vector128<float> right) => CompareGreaterThanOrderedScalar(left, right);
/// <summary>
- /// int _mm_ucomigt_ss (__m128 a, __m128 b); UCOMISS xmm, xmm/m32
+ /// int _mm_ucomigt_ss (__m128 a, __m128 b);
+ /// UCOMISS xmm, xmm/m32
/// </summary>
public static bool CompareGreaterThanUnorderedScalar(Vector128<float> left, Vector128<float> right) => CompareGreaterThanUnorderedScalar(left, right);
/// <summary>
- /// __m128 _mm_cmpgt_ss (__m128 a, __m128 b); CMPSS xmm, xmm/m32, imm8(6)
+ /// __m128 _mm_cmpgt_ss (__m128 a, __m128 b);
+ /// CMPSS xmm, xmm/m32, imm8(6)
/// </summary>
public static Vector128<float> CompareGreaterThanScalar(Vector128<float> left, Vector128<float> right) => CompareGreaterThanScalar(left, right);
/// <summary>
- /// __m128 _mm_cmpge_ps (__m128 a, __m128 b); CMPPS xmm, xmm/m128, imm8(5)
+ /// __m128 _mm_cmpge_ps (__m128 a, __m128 b);
+ /// CMPPS xmm, xmm/m128, imm8(5)
/// </summary>
public static Vector128<float> CompareGreaterThanOrEqual(Vector128<float> left, Vector128<float> right) => CompareGreaterThanOrEqual(left, right);
/// <summary>
- /// int _mm_comige_ss (__m128 a, __m128 b); COMISS xmm, xmm/m32
+ /// int _mm_comige_ss (__m128 a, __m128 b);
+ /// COMISS xmm, xmm/m32
/// </summary>
public static bool CompareGreaterThanOrEqualOrderedScalar(Vector128<float> left, Vector128<float> right) => CompareGreaterThanOrEqualOrderedScalar(left, right);
/// <summary>
- /// int _mm_ucomige_ss (__m128 a, __m128 b); UCOMISS xmm, xmm/m32
+ /// int _mm_ucomige_ss (__m128 a, __m128 b);
+ /// UCOMISS xmm, xmm/m32
/// </summary>
public static bool CompareGreaterThanOrEqualUnorderedScalar(Vector128<float> left, Vector128<float> right) => CompareGreaterThanOrEqualUnorderedScalar(left, right);
/// <summary>
- /// __m128 _mm_cmpge_ss (__m128 a, __m128 b); CMPPS xmm, xmm/m32, imm8(5)
+ /// __m128 _mm_cmpge_ss (__m128 a, __m128 b);
+ /// CMPPS xmm, xmm/m32, imm8(5)
/// </summary>
public static Vector128<float> CompareGreaterThanOrEqualScalar(Vector128<float> left, Vector128<float> right) => CompareGreaterThanOrEqualScalar(left, right);
/// <summary>
- /// __m128 _mm_cmplt_ps (__m128 a, __m128 b); CMPPS xmm, xmm/m128, imm8(1)
+ /// __m128 _mm_cmplt_ps (__m128 a, __m128 b);
+ /// CMPPS xmm, xmm/m128, imm8(1)
/// </summary>
public static Vector128<float> CompareLessThan(Vector128<float> left, Vector128<float> right) => CompareLessThan(left, right);
/// <summary>
- /// int _mm_comilt_ss (__m128 a, __m128 b); COMISS xmm, xmm/m32
+ /// int _mm_comilt_ss (__m128 a, __m128 b);
+ /// COMISS xmm, xmm/m32
/// </summary>
public static bool CompareLessThanOrderedScalar(Vector128<float> left, Vector128<float> right) => CompareLessThanOrderedScalar(left, right);
/// <summary>
- /// int _mm_ucomilt_ss (__m128 a, __m128 b); UCOMISS xmm, xmm/m32
+ /// int _mm_ucomilt_ss (__m128 a, __m128 b);
+ /// UCOMISS xmm, xmm/m32
/// </summary>
public static bool CompareLessThanUnorderedScalar(Vector128<float> left, Vector128<float> right) => CompareLessThanUnorderedScalar(left, right);
/// <summary>
- /// __m128 _mm_cmplt_ss (__m128 a, __m128 b); CMPSS xmm, xmm/m32, imm8(1)
+ /// __m128 _mm_cmplt_ss (__m128 a, __m128 b);
+ /// CMPSS xmm, xmm/m32, imm8(1)
/// </summary>
public static Vector128<float> CompareLessThanScalar(Vector128<float> left, Vector128<float> right) => CompareLessThanScalar(left, right);
/// <summary>
- /// __m128 _mm_cmple_ps (__m128 a, __m128 b); CMPPS xmm, xmm/m128, imm8(2)
+ /// __m128 _mm_cmple_ps (__m128 a, __m128 b);
+ /// CMPPS xmm, xmm/m128, imm8(2)
/// </summary>
public static Vector128<float> CompareLessThanOrEqual(Vector128<float> left, Vector128<float> right) => CompareLessThanOrEqual(left, right);
/// <summary>
- /// int _mm_comile_ss (__m128 a, __m128 b); COMISS xmm, xmm/m32
+ /// int _mm_comile_ss (__m128 a, __m128 b);
+ /// COMISS xmm, xmm/m32
/// </summary>
public static bool CompareLessThanOrEqualOrderedScalar(Vector128<float> left, Vector128<float> right) => CompareLessThanOrEqualOrderedScalar(left, right);
/// <summary>
- /// int _mm_ucomile_ss (__m128 a, __m128 b); UCOMISS xmm, xmm/m32
+ /// int _mm_ucomile_ss (__m128 a, __m128 b);
+ /// UCOMISS xmm, xmm/m32
/// </summary>
public static bool CompareLessThanOrEqualUnorderedScalar(Vector128<float> left, Vector128<float> right) => CompareLessThanOrEqualUnorderedScalar(left, right);
/// <summary>
- /// __m128 _mm_cmple_ss (__m128 a, __m128 b); CMPSS xmm, xmm/m32, imm8(2)
+ /// __m128 _mm_cmple_ss (__m128 a, __m128 b);
+ /// CMPSS xmm, xmm/m32, imm8(2)
/// </summary>
public static Vector128<float> CompareLessThanOrEqualScalar(Vector128<float> left, Vector128<float> right) => CompareLessThanOrEqualScalar(left, right);
/// <summary>
- /// __m128 _mm_cmpneq_ps (__m128 a, __m128 b); CMPPS xmm, xmm/m128, imm8(4)
+ /// __m128 _mm_cmpneq_ps (__m128 a, __m128 b);
+ /// CMPPS xmm, xmm/m128, imm8(4)
/// </summary>
public static Vector128<float> CompareNotEqual(Vector128<float> left, Vector128<float> right) => CompareNotEqual(left, right);
/// <summary>
- /// int _mm_comineq_ss (__m128 a, __m128 b); COMISS xmm, xmm/m32
+ /// int _mm_comineq_ss (__m128 a, __m128 b);
+ /// COMISS xmm, xmm/m32
/// </summary>
public static bool CompareNotEqualOrderedScalar(Vector128<float> left, Vector128<float> right) => CompareNotEqualOrderedScalar(left, right);
/// <summary>
- /// int _mm_ucomineq_ss (__m128 a, __m128 b); UCOMISS xmm, xmm/m32
+ /// int _mm_ucomineq_ss (__m128 a, __m128 b);
+ /// UCOMISS xmm, xmm/m32
/// </summary>
public static bool CompareNotEqualUnorderedScalar(Vector128<float> left, Vector128<float> right) => CompareNotEqualUnorderedScalar(left, right);
/// <summary>
- /// __m128 _mm_cmpneq_ss (__m128 a, __m128 b); CMPSS xmm, xmm/m32, imm8(4)
+ /// __m128 _mm_cmpneq_ss (__m128 a, __m128 b);
+ /// CMPSS xmm, xmm/m32, imm8(4)
/// </summary>
public static Vector128<float> CompareNotEqualScalar(Vector128<float> left, Vector128<float> right) => CompareNotEqualScalar(left, right);
/// <summary>
- /// __m128 _mm_cmpngt_ps (__m128 a, __m128 b); CMPPS xmm, xmm/m128, imm8(2)
+ /// __m128 _mm_cmpngt_ps (__m128 a, __m128 b);
+ /// CMPPS xmm, xmm/m128, imm8(2)
/// </summary>
public static Vector128<float> CompareNotGreaterThan(Vector128<float> left, Vector128<float> right) => CompareNotGreaterThan(left, right);
/// <summary>
- /// __m128 _mm_cmpngt_ss (__m128 a, __m128 b); CMPSS xmm, xmm/m32, imm8(2)
+ /// __m128 _mm_cmpngt_ss (__m128 a, __m128 b);
+ /// CMPSS xmm, xmm/m32, imm8(2)
/// </summary>
public static Vector128<float> CompareNotGreaterThanScalar(Vector128<float> left, Vector128<float> right) => CompareNotGreaterThanScalar(left, right);
/// <summary>
- /// __m128 _mm_cmpnge_ps (__m128 a, __m128 b); CMPPS xmm, xmm/m128, imm8(1)
+ /// __m128 _mm_cmpnge_ps (__m128 a, __m128 b);
+ /// CMPPS xmm, xmm/m128, imm8(1)
/// </summary>
public static Vector128<float> CompareNotGreaterThanOrEqual(Vector128<float> left, Vector128<float> right) => CompareNotGreaterThanOrEqual(left, right);
/// <summary>
- /// __m128 _mm_cmpnge_ss (__m128 a, __m128 b); CMPSS xmm, xmm/m32, imm8(1)
+ /// __m128 _mm_cmpnge_ss (__m128 a, __m128 b);
+ /// CMPSS xmm, xmm/m32, imm8(1)
/// </summary>
public static Vector128<float> CompareNotGreaterThanOrEqualScalar(Vector128<float> left, Vector128<float> right) => CompareNotGreaterThanOrEqualScalar(left, right);
/// <summary>
- /// __m128 _mm_cmpnlt_ps (__m128 a, __m128 b); CMPPS xmm, xmm/m128, imm8(5)
+ /// __m128 _mm_cmpnlt_ps (__m128 a, __m128 b);
+ /// CMPPS xmm, xmm/m128, imm8(5)
/// </summary>
public static Vector128<float> CompareNotLessThan(Vector128<float> left, Vector128<float> right) => CompareNotLessThan(left, right);
/// <summary>
- /// __m128 _mm_cmpnlt_ss (__m128 a, __m128 b); CMPSS xmm, xmm/m32, imm8(5)
+ /// __m128 _mm_cmpnlt_ss (__m128 a, __m128 b);
+ /// CMPSS xmm, xmm/m32, imm8(5)
/// </summary>
public static Vector128<float> CompareNotLessThanScalar(Vector128<float> left, Vector128<float> right) => CompareNotLessThanScalar(left, right);
/// <summary>
- /// __m128 _mm_cmpnle_ps (__m128 a, __m128 b); CMPPS xmm, xmm/m128, imm8(6)
+ /// __m128 _mm_cmpnle_ps (__m128 a, __m128 b);
+ /// CMPPS xmm, xmm/m128, imm8(6)
/// </summary>
public static Vector128<float> CompareNotLessThanOrEqual(Vector128<float> left, Vector128<float> right) => CompareNotLessThanOrEqual(left, right);
/// <summary>
- /// __m128 _mm_cmpnle_ss (__m128 a, __m128 b); CMPSS xmm, xmm/m32, imm8(6)
+ /// __m128 _mm_cmpnle_ss (__m128 a, __m128 b);
+ /// CMPSS xmm, xmm/m32, imm8(6)
/// </summary>
public static Vector128<float> CompareNotLessThanOrEqualScalar(Vector128<float> left, Vector128<float> right) => CompareNotLessThanOrEqualScalar(left, right);
/// <summary>
- /// __m128 _mm_cmpord_ps (__m128 a, __m128 b); CMPPS xmm, xmm/m128, imm8(7)
+ /// __m128 _mm_cmpord_ps (__m128 a, __m128 b);
+ /// CMPPS xmm, xmm/m128, imm8(7)
/// </summary>
public static Vector128<float> CompareOrdered(Vector128<float> left, Vector128<float> right) => CompareOrdered(left, right);
/// <summary>
- /// __m128 _mm_cmpord_ss (__m128 a, __m128 b); CMPSS xmm, xmm/m32, imm8(7)
+ /// __m128 _mm_cmpord_ss (__m128 a, __m128 b);
+ /// CMPSS xmm, xmm/m32, imm8(7)
/// </summary>
public static Vector128<float> CompareOrderedScalar(Vector128<float> left, Vector128<float> right) => CompareOrderedScalar(left, right);
/// <summary>
- /// __m128 _mm_cmpunord_ps (__m128 a, __m128 b); CMPPS xmm, xmm/m128, imm8(3)
+ /// __m128 _mm_cmpunord_ps (__m128 a, __m128 b);
+ /// CMPPS xmm, xmm/m128, imm8(3)
/// </summary>
public static Vector128<float> CompareUnordered(Vector128<float> left, Vector128<float> right) => CompareUnordered(left, right);
/// <summary>
- /// __m128 _mm_cmpunord_ss (__m128 a, __m128 b); CMPSS xmm, xmm/m32, imm8(3)
+ /// __m128 _mm_cmpunord_ss (__m128 a, __m128 b);
+ /// CMPSS xmm, xmm/m32, imm8(3)
/// </summary>
public static Vector128<float> CompareUnorderedScalar(Vector128<float> left, Vector128<float> right) => CompareUnorderedScalar(left, right);
/// <summary>
- /// int _mm_cvtss_si32 (__m128 a); CVTSS2SI r32, xmm/m32
+ /// int _mm_cvtss_si32 (__m128 a);
+ /// CVTSS2SI r32, xmm/m32
/// </summary>
public static int ConvertToInt32(Vector128<float> value) => ConvertToInt32(value);
/// <summary>
- /// __int64 _mm_cvtss_si64 (__m128 a); CVTSS2SI r64, xmm/m32
+ /// __int64 _mm_cvtss_si64 (__m128 a);
+ /// CVTSS2SI r64, xmm/m32
/// </summary>
public static long ConvertToInt64(Vector128<float> value) => ConvertToInt64(value);
/// <summary>
- /// float _mm_cvtss_f32 (__m128 a); HELPER: MOVSS
+ /// float _mm_cvtss_f32 (__m128 a);
+ /// HELPER: MOVSS
/// </summary>
public static float ConvertToSingle(Vector128<float> value) => ConvertToSingle(value);
/// <summary>
- /// __m128 _mm_cvtsi32_ss (__m128 a, int b); CVTSI2SS xmm, reg/m32
+ /// __m128 _mm_cvtsi32_ss (__m128 a, int b);
+ /// CVTSI2SS xmm, reg/m32
/// </summary>
public static Vector128<float> ConvertToVector128SingleScalar(Vector128<float> upper, int value) => ConvertToVector128SingleScalar(upper, value);
/// <summary>
- /// __m128 _mm_cvtsi64_ss (__m128 a, __int64 b); CVTSI2SS xmm, reg/m64
+ /// __m128 _mm_cvtsi64_ss (__m128 a, __int64 b);
+ /// CVTSI2SS xmm, reg/m64
/// </summary>
public static Vector128<float> ConvertToVector128SingleScalar(Vector128<float> upper, long value) => ConvertToVector128SingleScalar(upper, value);
/// <summary>
- /// int _mm_cvttss_si32 (__m128 a); CVTTSS2SI r32, xmm/m32
+ /// int _mm_cvttss_si32 (__m128 a);
+ /// CVTTSS2SI r32, xmm/m32
/// </summary>
public static int ConvertToInt32WithTruncation(Vector128<float> value) => ConvertToInt32WithTruncation(value);
/// <summary>
- /// __int64 _mm_cvttss_si64 (__m128 a); CVTTSS2SI r64, xmm/m32
+ /// __int64 _mm_cvttss_si64 (__m128 a);
+ /// CVTTSS2SI r64, xmm/m32
/// </summary>
public static long ConvertToInt64WithTruncation(Vector128<float> value) => ConvertToInt64WithTruncation(value);
/// <summary>
- /// __m128 _mm_div_ps (__m128 a, __m128 b); DIVPS xmm, xmm/m128
+ /// __m128 _mm_div_ps (__m128 a, __m128 b);
+ /// DIVPS xmm, xmm/m128
/// </summary>
public static Vector128<float> Divide(Vector128<float> left, Vector128<float> right) => Divide(left, right);
/// <summary>
- /// __m128 _mm_div_ss (__m128 a, __m128 b); DIVSS xmm, xmm/m32
+ /// __m128 _mm_div_ss (__m128 a, __m128 b);
+ /// DIVSS xmm, xmm/m32
/// </summary>
public static Vector128<float> DivideScalar(Vector128<float> left, Vector128<float> right) => DivideScalar(left, right);
/// <summary>
- /// __m128 _mm_loadu_ps (float const* mem_address); MOVUPS xmm, m128
+ /// __m128 _mm_loadu_ps (float const* mem_address);
+ /// MOVUPS xmm, m128
/// </summary>
public static unsafe Vector128<float> LoadVector128(float* address) => LoadVector128(address);
/// <summary>
- /// __m128 _mm_load_ss (float const* mem_address); MOVSS xmm, m32
+ /// __m128 _mm_load_ss (float const* mem_address);
+ /// MOVSS xmm, m32
/// </summary>
public static unsafe Vector128<float> LoadScalarVector128(float* address) => LoadScalarVector128(address);
/// <summary>
- /// __m128 _mm_load_ps (float const* mem_address); MOVAPS xmm, m128
+ /// __m128 _mm_load_ps (float const* mem_address);
+ /// MOVAPS xmm, m128
/// </summary>
public static unsafe Vector128<float> LoadAlignedVector128(float* address) => LoadAlignedVector128(address);
/// <summary>
- /// __m128 _mm_loadh_pi (__m128 a, __m64 const* mem_addr); MOVHPS xmm, m64
+ /// __m128 _mm_loadh_pi (__m128 a, __m64 const* mem_addr);
+ /// MOVHPS xmm, m64
/// </summary>
public static unsafe Vector128<float> LoadHigh(Vector128<float> lower, float* address) => LoadHigh(lower, address);
/// <summary>
- /// __m128 _mm_loadl_pi (__m128 a, __m64 const* mem_addr); MOVLPS xmm, m64
+ /// __m128 _mm_loadl_pi (__m128 a, __m64 const* mem_addr);
+ /// MOVLPS xmm, m64
/// </summary>
public static unsafe Vector128<float> LoadLow(Vector128<float> upper, float* address) => LoadLow(upper, address);
/// <summary>
- /// __m128 _mm_max_ps (__m128 a, __m128 b); MAXPS xmm, xmm/m128
+ /// __m128 _mm_max_ps (__m128 a, __m128 b);
+ /// MAXPS xmm, xmm/m128
/// </summary>
public static Vector128<float> Max(Vector128<float> left, Vector128<float> right) => Max(left, right);
/// <summary>
- /// __m128 _mm_max_ss (__m128 a, __m128 b); MAXSS xmm, xmm/m32
+ /// __m128 _mm_max_ss (__m128 a, __m128 b);
+ /// MAXSS xmm, xmm/m32
/// </summary>
public static Vector128<float> MaxScalar(Vector128<float> left, Vector128<float> right) => MaxScalar(left, right);
/// <summary>
- /// __m128 _mm_min_ps (__m128 a, __m128 b); MINPS xmm, xmm/m128
+ /// __m128 _mm_min_ps (__m128 a, __m128 b);
+ /// MINPS xmm, xmm/m128
/// </summary>
public static Vector128<float> Min(Vector128<float> left, Vector128<float> right) => Min(left, right);
/// <summary>
- /// __m128 _mm_min_ss (__m128 a, __m128 b); MINSS xmm, xmm/m32
+ /// __m128 _mm_min_ss (__m128 a, __m128 b);
+ /// MINSS xmm, xmm/m32
/// </summary>
public static Vector128<float> MinScalar(Vector128<float> left, Vector128<float> right) => MinScalar(left, right);
/// <summary>
- /// __m128 _mm_move_ss (__m128 a, __m128 b); MOVSS xmm, xmm
+ /// __m128 _mm_move_ss (__m128 a, __m128 b);
+ /// MOVSS xmm, xmm
/// </summary>
public static Vector128<float> MoveScalar(Vector128<float> upper, Vector128<float> value) => MoveScalar(upper, value);
/// <summary>
- /// __m128 _mm_movehl_ps (__m128 a, __m128 b); MOVHLPS xmm, xmm
+ /// __m128 _mm_movehl_ps (__m128 a, __m128 b);
+ /// MOVHLPS xmm, xmm
/// </summary>
public static Vector128<float> MoveHighToLow(Vector128<float> left, Vector128<float> right) => MoveHighToLow(left, right);
/// <summary>
- /// __m128 _mm_movelh_ps (__m128 a, __m128 b); MOVLHPS xmm, xmm
+ /// __m128 _mm_movelh_ps (__m128 a, __m128 b);
+ /// MOVLHPS xmm, xmm
/// </summary>
public static Vector128<float> MoveLowToHigh(Vector128<float> left, Vector128<float> right) => MoveLowToHigh(left, right);
/// <summary>
- /// int _mm_movemask_ps (__m128 a); MOVMSKPS reg, xmm
+ /// int _mm_movemask_ps (__m128 a);
+ /// MOVMSKPS reg, xmm
/// </summary>
public static int MoveMask(Vector128<float> value) => MoveMask(value);
/// <summary>
- /// __m128 _mm_mul_ps (__m128 a, __m128 b); MULPS xmm, xmm/m128
+ /// __m128 _mm_mul_ps (__m128 a, __m128 b);
+ /// MULPS xmm, xmm/m128
/// </summary>
public static Vector128<float> Multiply(Vector128<float> left, Vector128<float> right) => Multiply(left, right);
/// <summary>
- /// __m128 _mm_mul_ss (__m128 a, __m128 b); MULPS xmm, xmm/m32
+ /// __m128 _mm_mul_ss (__m128 a, __m128 b);
+ /// MULPS xmm, xmm/m32
/// </summary>
public static Vector128<float> MultiplyScalar(Vector128<float> left, Vector128<float> right) => MultiplyScalar(left, right);
/// <summary>
- /// __m128 _mm_or_ps (__m128 a, __m128 b); ORPS xmm, xmm/m128
+ /// __m128 _mm_or_ps (__m128 a, __m128 b);
+ /// ORPS xmm, xmm/m128
/// </summary>
public static Vector128<float> Or(Vector128<float> left, Vector128<float> right) => Or(left, right);
/// <summary>
- /// __m128 _mm_rcp_ps (__m128 a); RCPPS xmm, xmm/m128
+ /// __m128 _mm_rcp_ps (__m128 a);
+ /// RCPPS xmm, xmm/m128
/// </summary>
public static Vector128<float> Reciprocal(Vector128<float> value) => Reciprocal(value);
/// <summary>
- /// __m128 _mm_rcp_ss (__m128 a); RCPSS xmm, xmm/m32
+ /// __m128 _mm_rcp_ss (__m128 a);
+ /// RCPSS xmm, xmm/m32
/// </summary>
public static Vector128<float> ReciprocalScalar(Vector128<float> value) => ReciprocalScalar(value);
/// <summary>
- /// __m128 _mm_rcp_ss (__m128 a, __m128 b); RCPSS xmm, xmm/m32
+ /// __m128 _mm_rcp_ss (__m128 a, __m128 b);
+ /// RCPSS xmm, xmm/m32
/// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
/// </summary>
public static Vector128<float> ReciprocalScalar(Vector128<float> upper, Vector128<float> value) => ReciprocalScalar(upper, value);
/// <summary>
- /// __m128 _mm_rsqrt_ps (__m128 a); RSQRTPS xmm, xmm/m128
+ /// __m128 _mm_rsqrt_ps (__m128 a);
+ /// RSQRTPS xmm, xmm/m128
/// </summary>
public static Vector128<float> ReciprocalSqrt(Vector128<float> value) => ReciprocalSqrt(value);
/// <summary>
- /// __m128 _mm_rsqrt_ss (__m128 a); RSQRTSS xmm, xmm/m32
+ /// __m128 _mm_rsqrt_ss (__m128 a);
+ /// RSQRTSS xmm, xmm/m32
/// </summary>
public static Vector128<float> ReciprocalSqrtScalar(Vector128<float> value) => ReciprocalSqrtScalar(value);
/// <summary>
- /// __m128 _mm_rsqrt_ss (__m128 a, __m128 b); RSQRTSS xmm, xmm/m32
+ /// __m128 _mm_rsqrt_ss (__m128 a, __m128 b);
+ /// RSQRTSS xmm, xmm/m32
/// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
/// </summary>
public static Vector128<float> ReciprocalSqrtScalar(Vector128<float> upper, Vector128<float> value) => ReciprocalSqrtScalar(upper, value);
@@ -375,27 +446,36 @@ namespace System.Runtime.Intrinsics.X86
public static Vector128<float> SetVector128(float e3, float e2, float e1, float e0) => SetVector128(e3, e2, e1, e0);
/// <summary>
- /// __m128 _mm_set_ss (float a); HELPER
+ /// __m128 _mm_set_ss (float a);
+ /// HELPER
/// </summary>
public static Vector128<float> SetScalarVector128(float value) => SetScalarVector128(value);
/// <summary>
- /// __m128 _mm_set1_ps (float a); HELPER
+ /// __m128 _mm_set1_ps (float a);
+ /// HELPER
/// </summary>
public static Vector128<float> SetAllVector128(float value) => SetAllVector128(value);
/// <summary>
- /// __m128d _mm_setzero_ps (void); HELPER - XORPS
+ /// __m128d _mm_setzero_ps (void);
+ /// HELPER - XORPS
/// </summary>
public static Vector128<float> SetZeroVector128() => SetZeroVector128();
/// <summary>
- /// __m128 _mm_castpd_ps (__m128d a); HELPER - No Codegen
- /// __m128i _mm_castpd_si128 (__m128d a); HELPER - No Codegen
- /// __m128d _mm_castps_pd (__m128 a); HELPER - No Codegen
- /// __m128i _mm_castps_si128 (__m128 a); HELPER - No Codegen
- /// __m128d _mm_castsi128_pd (__m128i a); HELPER - No Codegen
- /// __m128 _mm_castsi128_ps (__m128i a); HELPER - No Codegen
+ /// __m128 _mm_castpd_ps (__m128d a);
+ /// HELPER - No Codegen
+ /// __m128i _mm_castpd_si128 (__m128d a);
+ /// HELPER - No Codegen
+ /// __m128d _mm_castps_pd (__m128 a);
+ /// HELPER - No Codegen
+ /// __m128i _mm_castps_si128 (__m128 a);
+ /// HELPER - No Codegen
+ /// __m128d _mm_castsi128_pd (__m128i a);
+ /// HELPER - No Codegen
+ /// __m128 _mm_castsi128_ps (__m128i a);
+ /// HELPER - No Codegen
/// </summary>
public static Vector128<U> StaticCast<T, U>(Vector128<T> value) where T : struct where U : struct
{
@@ -405,78 +485,93 @@ namespace System.Runtime.Intrinsics.X86
}
/// <summary>
- /// __m128 _mm_shuffle_ps (__m128 a, __m128 b, unsigned int control); SHUFPS xmm, xmm/m128, imm8
+ /// __m128 _mm_shuffle_ps (__m128 a, __m128 b, unsigned int control);
+ /// SHUFPS xmm, xmm/m128, imm8
/// </summary>
public static Vector128<float> Shuffle(Vector128<float> left, Vector128<float> right, byte control) => Shuffle(left, right, control);
/// <summary>
- /// __m128 _mm_sqrt_ps (__m128 a); SQRTPS xmm, xmm/m128
+ /// __m128 _mm_sqrt_ps (__m128 a);
+ /// SQRTPS xmm, xmm/m128
/// </summary>
public static Vector128<float> Sqrt(Vector128<float> value) => Sqrt(value);
/// <summary>
- /// __m128 _mm_sqrt_ss (__m128 a); SQRTSS xmm, xmm/m32
+ /// __m128 _mm_sqrt_ss (__m128 a);
+ /// SQRTSS xmm, xmm/m32
/// </summary>
public static Vector128<float> SqrtScalar(Vector128<float> value) => SqrtScalar(value);
/// <summary>
- /// __m128 _mm_sqrt_ss (__m128 a, __m128 b); SQRTSS xmm, xmm/m32
+ /// __m128 _mm_sqrt_ss (__m128 a, __m128 b);
+ /// SQRTSS xmm, xmm/m32
/// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
/// </summary>
public static Vector128<float> SqrtScalar(Vector128<float> upper, Vector128<float> value) => SqrtScalar(upper, value);
/// <summary>
- /// void _mm_store_ps (float* mem_addr, __m128 a); MOVAPS m128, xmm
+ /// void _mm_store_ps (float* mem_addr, __m128 a);
+ /// MOVAPS m128, xmm
/// </summary>
public static unsafe void StoreAligned(float* address, Vector128<float> source) => StoreAligned(address, source);
/// <summary>
- /// void _mm_stream_ps (float* mem_addr, __m128 a); MOVNTPS m128, xmm
+ /// void _mm_stream_ps (float* mem_addr, __m128 a);
+ /// MOVNTPS m128, xmm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(float* address, Vector128<float> source) => StoreAlignedNonTemporal(address, source);
/// <summary>
- /// void _mm_storeu_ps (float* mem_addr, __m128 a); MOVUPS m128, xmm
+ /// void _mm_storeu_ps (float* mem_addr, __m128 a);
+ /// MOVUPS m128, xmm
/// </summary>
public static unsafe void Store(float* address, Vector128<float> source) => Store(address, source);
/// <summary>
- /// void _mm_store_ss (float* mem_addr, __m128 a); MOVSS m32, xmm
+ /// void _mm_store_ss (float* mem_addr, __m128 a);
+ /// MOVSS m32, xmm
/// </summary>
public static unsafe void StoreScalar(float* address, Vector128<float> source) => StoreScalar(address, source);
/// <summary>
- /// void _mm_storeh_pi (__m64* mem_addr, __m128 a); MOVHPS m64, xmm
+ /// void _mm_storeh_pi (__m64* mem_addr, __m128 a);
+ /// MOVHPS m64, xmm
/// </summary>
public static unsafe void StoreHigh(float* address, Vector128<float> source) => StoreHigh(address, source);
/// <summary>
- /// void _mm_storel_pi (__m64* mem_addr, __m128 a); MOVLPS m64, xmm
+ /// void _mm_storel_pi (__m64* mem_addr, __m128 a);
+ /// MOVLPS m64, xmm
/// </summary>
public static unsafe void StoreLow(float* address, Vector128<float> source) => StoreLow(address, source);
/// <summary>
- /// __m128d _mm_sub_ps (__m128d a, __m128d b); SUBPS xmm, xmm/m128
+ /// __m128d _mm_sub_ps (__m128d a, __m128d b);
+ /// SUBPS xmm, xmm/m128
/// </summary>
public static Vector128<float> Subtract(Vector128<float> left, Vector128<float> right) => Subtract(left, right);
/// <summary>
- /// __m128 _mm_sub_ss (__m128 a, __m128 b); SUBSS xmm, xmm/m32
+ /// __m128 _mm_sub_ss (__m128 a, __m128 b);
+ /// SUBSS xmm, xmm/m32
/// </summary>
public static Vector128<float> SubtractScalar(Vector128<float> left, Vector128<float> right) => SubtractScalar(left, right);
/// <summary>
- /// __m128 _mm_unpackhi_ps (__m128 a, __m128 b); UNPCKHPS xmm, xmm/m128
+ /// __m128 _mm_unpackhi_ps (__m128 a, __m128 b);
+ /// UNPCKHPS xmm, xmm/m128
/// </summary>
public static Vector128<float> UnpackHigh(Vector128<float> left, Vector128<float> right) => UnpackHigh(left, right);
/// <summary>
- /// __m128 _mm_unpacklo_ps (__m128 a, __m128 b); UNPCKLPS xmm, xmm/m128
+ /// __m128 _mm_unpacklo_ps (__m128 a, __m128 b);
+ /// UNPCKLPS xmm, xmm/m128
/// </summary>
public static Vector128<float> UnpackLow(Vector128<float> left, Vector128<float> right) => UnpackLow(left, right);
/// <summary>
- /// __m128 _mm_xor_ps (__m128 a, __m128 b); XORPS xmm, xmm/m128
+ /// __m128 _mm_xor_ps (__m128 a, __m128 b);
+ /// XORPS xmm, xmm/m128
/// </summary>
public static Vector128<float> Xor(Vector128<float> left, Vector128<float> right) => Xor(left, right);
}
diff --git a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse2.PlatformNotSupported.cs b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse2.PlatformNotSupported.cs
index f999418370..16acc5912f 100644
--- a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse2.PlatformNotSupported.cs
+++ b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse2.PlatformNotSupported.cs
@@ -16,1408 +16,1734 @@ namespace System.Runtime.Intrinsics.X86
public static bool IsSupported { get { return false; } }
/// <summary>
- /// __m128i _mm_add_epi8 (__m128i a, __m128i b); PADDB xmm, xmm/m128
+ /// __m128i _mm_add_epi8 (__m128i a, __m128i b);
+ /// PADDB xmm, xmm/m128
/// </summary>
public static Vector128<byte> Add(Vector128<byte> left, Vector128<byte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_add_epi8 (__m128i a, __m128i b); PADDB xmm, xmm/m128
+ /// __m128i _mm_add_epi8 (__m128i a, __m128i b);
+ /// PADDB xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> Add(Vector128<sbyte> left, Vector128<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_add_epi16 (__m128i a, __m128i b); PADDW xmm, xmm/m128
+ /// __m128i _mm_add_epi16 (__m128i a, __m128i b);
+ /// PADDW xmm, xmm/m128
/// </summary>
public static Vector128<short> Add(Vector128<short> left, Vector128<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_add_epi16 (__m128i a, __m128i b); PADDW xmm, xmm/m128
+ /// __m128i _mm_add_epi16 (__m128i a, __m128i b);
+ /// PADDW xmm, xmm/m128
/// </summary>
public static Vector128<ushort> Add(Vector128<ushort> left, Vector128<ushort> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_add_epi32 (__m128i a, __m128i b); PADDD xmm, xmm/m128
+ /// __m128i _mm_add_epi32 (__m128i a, __m128i b);
+ /// PADDD xmm, xmm/m128
/// </summary>
public static Vector128<int> Add(Vector128<int> left, Vector128<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_add_epi32 (__m128i a, __m128i b); PADDD xmm, xmm/m128
+ /// __m128i _mm_add_epi32 (__m128i a, __m128i b);
+ /// PADDD xmm, xmm/m128
/// </summary>
public static Vector128<uint> Add(Vector128<uint> left, Vector128<uint> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_add_epi64 (__m128i a, __m128i b); PADDQ xmm, xmm/m128
+ /// __m128i _mm_add_epi64 (__m128i a, __m128i b);
+ /// PADDQ xmm, xmm/m128
/// </summary>
public static Vector128<long> Add(Vector128<long> left, Vector128<long> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_add_epi64 (__m128i a, __m128i b); PADDQ xmm, xmm/m128
+ /// __m128i _mm_add_epi64 (__m128i a, __m128i b);
+ /// PADDQ xmm, xmm/m128
/// </summary>
public static Vector128<ulong> Add(Vector128<ulong> left, Vector128<ulong> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_add_pd (__m128d a, __m128d b); ADDPD xmm, xmm/m128
+ /// __m128d _mm_add_pd (__m128d a, __m128d b);
+ /// ADDPD xmm, xmm/m128
/// </summary>
public static Vector128<double> Add(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_add_sd (__m128d a, __m128d b); ADDSD xmm, xmm/m64
+ /// __m128d _mm_add_sd (__m128d a, __m128d b);
+ /// ADDSD xmm, xmm/m64
/// </summary>
public static Vector128<double> AddScalar(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_adds_epi8 (__m128i a, __m128i b); PADDSB xmm, xmm/m128
+ /// __m128i _mm_adds_epi8 (__m128i a, __m128i b);
+ /// PADDSB xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> AddSaturate(Vector128<sbyte> left, Vector128<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_adds_epu8 (__m128i a, __m128i b); PADDUSB xmm, xmm/m128
+ /// __m128i _mm_adds_epu8 (__m128i a, __m128i b);
+ /// PADDUSB xmm, xmm/m128
/// </summary>
public static Vector128<byte> AddSaturate(Vector128<byte> left, Vector128<byte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_adds_epi16 (__m128i a, __m128i b); PADDSW xmm, xmm/m128
+ /// __m128i _mm_adds_epi16 (__m128i a, __m128i b);
+ /// PADDSW xmm, xmm/m128
/// </summary>
public static Vector128<short> AddSaturate(Vector128<short> left, Vector128<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_adds_epu16 (__m128i a, __m128i b); PADDUSW xmm, xmm/m128
+ /// __m128i _mm_adds_epu16 (__m128i a, __m128i b);
+ /// PADDUSW xmm, xmm/m128
/// </summary>
public static Vector128<ushort> AddSaturate(Vector128<ushort> left, Vector128<ushort> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_and_si128 (__m128i a, __m128i b); PAND xmm, xmm/m128
+ /// __m128i _mm_and_si128 (__m128i a, __m128i b);
+ /// PAND xmm, xmm/m128
/// </summary>
public static Vector128<byte> And(Vector128<byte> left, Vector128<byte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_and_si128 (__m128i a, __m128i b); PAND xmm, xmm/m128
+ /// __m128i _mm_and_si128 (__m128i a, __m128i b);
+ /// PAND xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> And(Vector128<sbyte> left, Vector128<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_and_si128 (__m128i a, __m128i b); PAND xmm, xmm/m128
+ /// __m128i _mm_and_si128 (__m128i a, __m128i b);
+ /// PAND xmm, xmm/m128
/// </summary>
public static Vector128<short> And(Vector128<short> left, Vector128<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_and_si128 (__m128i a, __m128i b); PAND xmm, xmm/m128
+ /// __m128i _mm_and_si128 (__m128i a, __m128i b);
+ /// PAND xmm, xmm/m128
/// </summary>
public static Vector128<ushort> And(Vector128<ushort> left, Vector128<ushort> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_and_si128 (__m128i a, __m128i b); PAND xmm, xmm/m128
+ /// __m128i _mm_and_si128 (__m128i a, __m128i b);
+ /// PAND xmm, xmm/m128
/// </summary>
public static Vector128<int> And(Vector128<int> left, Vector128<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_and_si128 (__m128i a, __m128i b); PAND xmm, xmm/m128
+ /// __m128i _mm_and_si128 (__m128i a, __m128i b);
+ /// PAND xmm, xmm/m128
/// </summary>
public static Vector128<uint> And(Vector128<uint> left, Vector128<uint> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_and_si128 (__m128i a, __m128i b); PAND xmm, xmm/m128
+ /// __m128i _mm_and_si128 (__m128i a, __m128i b);
+ /// PAND xmm, xmm/m128
/// </summary>
public static Vector128<long> And(Vector128<long> left, Vector128<long> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_and_si128 (__m128i a, __m128i b); PAND xmm, xmm/m128
+ /// __m128i _mm_and_si128 (__m128i a, __m128i b);
+ /// PAND xmm, xmm/m128
/// </summary>
public static Vector128<ulong> And(Vector128<ulong> left, Vector128<ulong> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_and_pd (__m128d a, __m128d b); ANDPD xmm, xmm/m128
+ /// __m128d _mm_and_pd (__m128d a, __m128d b);
+ /// ANDPD xmm, xmm/m128
/// </summary>
public static Vector128<double> And(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_andnot_si128 (__m128i a, __m128i b); PANDN xmm, xmm/m128
+ /// __m128i _mm_andnot_si128 (__m128i a, __m128i b);
+ /// PANDN xmm, xmm/m128
/// </summary>
public static Vector128<byte> AndNot(Vector128<byte> left, Vector128<byte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_andnot_si128 (__m128i a, __m128i b); PANDN xmm, xmm/m128
+ /// __m128i _mm_andnot_si128 (__m128i a, __m128i b);
+ /// PANDN xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> AndNot(Vector128<sbyte> left, Vector128<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_andnot_si128 (__m128i a, __m128i b); PANDN xmm, xmm/m128
+ /// __m128i _mm_andnot_si128 (__m128i a, __m128i b);
+ /// PANDN xmm, xmm/m128
/// </summary>
public static Vector128<short> AndNot(Vector128<short> left, Vector128<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_andnot_si128 (__m128i a, __m128i b); PANDN xmm, xmm/m128
+ /// __m128i _mm_andnot_si128 (__m128i a, __m128i b);
+ /// PANDN xmm, xmm/m128
/// </summary>
public static Vector128<ushort> AndNot(Vector128<ushort> left, Vector128<ushort> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_andnot_si128 (__m128i a, __m128i b); PANDN xmm, xmm/m128
+ /// __m128i _mm_andnot_si128 (__m128i a, __m128i b);
+ /// PANDN xmm, xmm/m128
/// </summary>
public static Vector128<int> AndNot(Vector128<int> left, Vector128<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_andnot_si128 (__m128i a, __m128i b); PANDN xmm, xmm/m128
+ /// __m128i _mm_andnot_si128 (__m128i a, __m128i b);
+ /// PANDN xmm, xmm/m128
/// </summary>
public static Vector128<uint> AndNot(Vector128<uint> left, Vector128<uint> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_andnot_si128 (__m128i a, __m128i b); PANDN xmm, xmm/m128
+ /// __m128i _mm_andnot_si128 (__m128i a, __m128i b);
+ /// PANDN xmm, xmm/m128
/// </summary>
public static Vector128<long> AndNot(Vector128<long> left, Vector128<long> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_andnot_si128 (__m128i a, __m128i b); PANDN xmm, xmm/m128
+ /// __m128i _mm_andnot_si128 (__m128i a, __m128i b);
+ /// PANDN xmm, xmm/m128
/// </summary>
public static Vector128<ulong> AndNot(Vector128<ulong> left, Vector128<ulong> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_andnot_pd (__m128d a, __m128d b); ADDNPD xmm, xmm/m128
+ /// __m128d _mm_andnot_pd (__m128d a, __m128d b);
+ /// ADDNPD xmm, xmm/m128
/// </summary>
public static Vector128<double> AndNot(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_avg_epu8 (__m128i a, __m128i b); PAVGB xmm, xmm/m128
+ /// __m128i _mm_avg_epu8 (__m128i a, __m128i b);
+ /// PAVGB xmm, xmm/m128
/// </summary>
public static Vector128<byte> Average(Vector128<byte> left, Vector128<byte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_avg_epu16 (__m128i a, __m128i b); PAVGW xmm, xmm/m128
+ /// __m128i _mm_avg_epu16 (__m128i a, __m128i b);
+ /// PAVGW xmm, xmm/m128
/// </summary>
public static Vector128<ushort> Average(Vector128<ushort> left, Vector128<ushort> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cmpeq_epi8 (__m128i a, __m128i b); PCMPEQB xmm, xmm/m128
+ /// __m128i _mm_cmpeq_epi8 (__m128i a, __m128i b);
+ /// PCMPEQB xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> CompareEqual(Vector128<sbyte> left, Vector128<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cmpeq_epi8 (__m128i a, __m128i b); PCMPEQB xmm, xmm/m128
+ /// __m128i _mm_cmpeq_epi8 (__m128i a, __m128i b);
+ /// PCMPEQB xmm, xmm/m128
/// </summary>
public static Vector128<byte> CompareEqual(Vector128<byte> left, Vector128<byte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cmpeq_epi16 (__m128i a, __m128i b); PCMPEQW xmm, xmm/m128
+ /// __m128i _mm_cmpeq_epi16 (__m128i a, __m128i b);
+ /// PCMPEQW xmm, xmm/m128
/// </summary>
public static Vector128<short> CompareEqual(Vector128<short> left, Vector128<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cmpeq_epi16 (__m128i a, __m128i b); PCMPEQW xmm, xmm/m128
+ /// __m128i _mm_cmpeq_epi16 (__m128i a, __m128i b);
+ /// PCMPEQW xmm, xmm/m128
/// </summary>
public static Vector128<ushort> CompareEqual(Vector128<ushort> left, Vector128<ushort> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cmpeq_epi32 (__m128i a, __m128i b); PCMPEQD xmm, xmm/m128
+ /// __m128i _mm_cmpeq_epi32 (__m128i a, __m128i b);
+ /// PCMPEQD xmm, xmm/m128
/// </summary>
public static Vector128<int> CompareEqual(Vector128<int> left, Vector128<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cmpeq_epi32 (__m128i a, __m128i b); PCMPEQD xmm, xmm/m128
+ /// __m128i _mm_cmpeq_epi32 (__m128i a, __m128i b);
+ /// PCMPEQD xmm, xmm/m128
/// </summary>
public static Vector128<uint> CompareEqual(Vector128<uint> left, Vector128<uint> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_cmpeq_pd (__m128d a, __m128d b); CMPPD xmm, xmm/m128, imm8(0)
+ /// __m128d _mm_cmpeq_pd (__m128d a, __m128d b);
+ /// CMPPD xmm, xmm/m128, imm8(0)
/// </summary>
public static Vector128<double> CompareEqual(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_comieq_sd (__m128d a, __m128d b); COMISS xmm, xmm/m64
+ /// int _mm_comieq_sd (__m128d a, __m128d b);
+ /// COMISS xmm, xmm/m64
/// </summary>
public static bool CompareEqualOrderedScalar(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_ucomieq_sd (__m128d a, __m128d b); UCOMISS xmm, xmm/m64
+ /// int _mm_ucomieq_sd (__m128d a, __m128d b);
+ /// UCOMISS xmm, xmm/m64
/// </summary>
public static bool CompareEqualUnorderedScalar(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_cmpeq_sd (__m128d a, __m128d b); CMPSD xmm, xmm/m64, imm8(0)
+ /// __m128d _mm_cmpeq_sd (__m128d a, __m128d b);
+ /// CMPSD xmm, xmm/m64, imm8(0)
/// </summary>
public static Vector128<double> CompareEqualScalar(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cmpgt_epi8 (__m128i a, __m128i b); PCMPGTB xmm, xmm/m128
+ /// __m128i _mm_cmpgt_epi8 (__m128i a, __m128i b);
+ /// PCMPGTB xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> CompareGreaterThan(Vector128<sbyte> left, Vector128<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cmpgt_epi16 (__m128i a, __m128i b); PCMPGTW xmm, xmm/m128
+ /// __m128i _mm_cmpgt_epi16 (__m128i a, __m128i b);
+ /// PCMPGTW xmm, xmm/m128
/// </summary>
public static Vector128<short> CompareGreaterThan(Vector128<short> left, Vector128<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cmpgt_epi32 (__m128i a, __m128i b); PCMPGTD xmm, xmm/m128
+ /// __m128i _mm_cmpgt_epi32 (__m128i a, __m128i b);
+ /// PCMPGTD xmm, xmm/m128
/// </summary>
public static Vector128<int> CompareGreaterThan(Vector128<int> left, Vector128<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_cmpgt_pd (__m128d a, __m128d b); CMPPD xmm, xmm/m128, imm8(6)
+ /// __m128d _mm_cmpgt_pd (__m128d a, __m128d b);
+ /// CMPPD xmm, xmm/m128, imm8(6)
/// </summary>
public static Vector128<double> CompareGreaterThan(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_comigt_sd (__m128d a, __m128d b); COMISS xmm, xmm/m64
+ /// int _mm_comigt_sd (__m128d a, __m128d b);
+ /// COMISS xmm, xmm/m64
/// </summary>
public static bool CompareGreaterThanOrderedScalar(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_ucomigt_sd (__m128d a, __m128d b); UCOMISS xmm, xmm/m64
+ /// int _mm_ucomigt_sd (__m128d a, __m128d b);
+ /// UCOMISS xmm, xmm/m64
/// </summary>
public static bool CompareGreaterThanUnorderedScalar(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_cmpgt_sd (__m128d a, __m128d b); CMPSD xmm, xmm/m64, imm8(6)
+ /// __m128d _mm_cmpgt_sd (__m128d a, __m128d b);
+ /// CMPSD xmm, xmm/m64, imm8(6)
/// </summary>
public static Vector128<double> CompareGreaterThanScalar(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_cmpge_pd (__m128d a, __m128d b); CMPPD xmm, xmm/m128, imm8(5)
+ /// __m128d _mm_cmpge_pd (__m128d a, __m128d b);
+ /// CMPPD xmm, xmm/m128, imm8(5)
/// </summary>
public static Vector128<double> CompareGreaterThanOrEqual(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_comige_sd (__m128d a, __m128d b); COMISS xmm, xmm/m64
+ /// int _mm_comige_sd (__m128d a, __m128d b);
+ /// COMISS xmm, xmm/m64
/// </summary>
public static bool CompareGreaterThanOrEqualOrderedScalar(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_ucomige_sd (__m128d a, __m128d b); UCOMISS xmm, xmm/m64
+ /// int _mm_ucomige_sd (__m128d a, __m128d b);
+ /// UCOMISS xmm, xmm/m64
/// </summary>
public static bool CompareGreaterThanOrEqualUnorderedScalar(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_cmpge_sd (__m128d a, __m128d b); CMPSD xmm, xmm/m64, imm8(5)
+ /// __m128d _mm_cmpge_sd (__m128d a, __m128d b);
+ /// CMPSD xmm, xmm/m64, imm8(5)
/// </summary>
public static Vector128<double> CompareGreaterThanOrEqualScalar(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cmplt_epi8 (__m128i a, __m128i b); PCMPGTB xmm, xmm/m128
+ /// __m128i _mm_cmplt_epi8 (__m128i a, __m128i b);
+ /// PCMPGTB xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> CompareLessThan(Vector128<sbyte> left, Vector128<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cmplt_epi16 (__m128i a, __m128i b); PCMPGTW xmm, xmm/m128
+ /// __m128i _mm_cmplt_epi16 (__m128i a, __m128i b);
+ /// PCMPGTW xmm, xmm/m128
/// </summary>
public static Vector128<short> CompareLessThan(Vector128<short> left, Vector128<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cmplt_epi32 (__m128i a, __m128i b); PCMPGTD xmm, xmm/m128
+ /// __m128i _mm_cmplt_epi32 (__m128i a, __m128i b);
+ /// PCMPGTD xmm, xmm/m128
/// </summary>
public static Vector128<int> CompareLessThan(Vector128<int> left, Vector128<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_cmplt_pd (__m128d a, __m128d b); CMPPD xmm, xmm/m128, imm8(1)
+ /// __m128d _mm_cmplt_pd (__m128d a, __m128d b);
+ /// CMPPD xmm, xmm/m128, imm8(1)
/// </summary>
public static Vector128<double> CompareLessThan(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_comilt_sd (__m128d a, __m128d b); COMISS xmm, xmm/m64
+ /// int _mm_comilt_sd (__m128d a, __m128d b);
+ /// COMISS xmm, xmm/m64
/// </summary>
public static bool CompareLessThanOrderedScalar(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_ucomilt_sd (__m128d a, __m128d b); UCOMISS xmm, xmm/m64
+ /// int _mm_ucomilt_sd (__m128d a, __m128d b);
+ /// UCOMISS xmm, xmm/m64
/// </summary>
public static bool CompareLessThanUnorderedScalar(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_cmplt_sd (__m128d a, __m128d b); CMPSD xmm, xmm/m64, imm8(1)
+ /// __m128d _mm_cmplt_sd (__m128d a, __m128d b);
+ /// CMPSD xmm, xmm/m64, imm8(1)
/// </summary>
public static Vector128<double> CompareLessThanScalar(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_cmple_pd (__m128d a, __m128d b); CMPPD xmm, xmm/m128, imm8(2)
+ /// __m128d _mm_cmple_pd (__m128d a, __m128d b);
+ /// CMPPD xmm, xmm/m128, imm8(2)
/// </summary>
public static Vector128<double> CompareLessThanOrEqual(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_comile_sd (__m128d a, __m128d b); COMISS xmm, xmm/m64
+ /// int _mm_comile_sd (__m128d a, __m128d b);
+ /// COMISS xmm, xmm/m64
/// </summary>
public static bool CompareLessThanOrEqualOrderedScalar(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_ucomile_sd (__m128d a, __m128d b); UCOMISS xmm, xmm/m64
+ /// int _mm_ucomile_sd (__m128d a, __m128d b);
+ /// UCOMISS xmm, xmm/m64
/// </summary>
public static bool CompareLessThanOrEqualUnorderedScalar(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_cmple_sd (__m128d a, __m128d b); CMPSD xmm, xmm/m64, imm8(2)
+ /// __m128d _mm_cmple_sd (__m128d a, __m128d b);
+ /// CMPSD xmm, xmm/m64, imm8(2)
/// </summary>
public static Vector128<double> CompareLessThanOrEqualScalar(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_cmpneq_pd (__m128d a, __m128d b); CMPPD xmm, xmm/m128, imm8(4)
+ /// __m128d _mm_cmpneq_pd (__m128d a, __m128d b);
+ /// CMPPD xmm, xmm/m128, imm8(4)
/// </summary>
public static Vector128<double> CompareNotEqual(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_comineq_sd (__m128d a, __m128d b); COMISS xmm, xmm/m64
+ /// int _mm_comineq_sd (__m128d a, __m128d b);
+ /// COMISS xmm, xmm/m64
/// </summary>
public static bool CompareNotEqualOrderedScalar(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_ucomineq_sd (__m128d a, __m128d b); UCOMISS xmm, xmm/m64
+ /// int _mm_ucomineq_sd (__m128d a, __m128d b);
+ /// UCOMISS xmm, xmm/m64
/// </summary>
public static bool CompareNotEqualUnorderedScalar(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_cmpneq_sd (__m128d a, __m128d b); CMPSD xmm, xmm/m64, imm8(4)
+ /// __m128d _mm_cmpneq_sd (__m128d a, __m128d b);
+ /// CMPSD xmm, xmm/m64, imm8(4)
/// </summary>
public static Vector128<double> CompareNotEqualScalar(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_cmpngt_pd (__m128d a, __m128d b); CMPPD xmm, xmm/m128, imm8(2)
+ /// __m128d _mm_cmpngt_pd (__m128d a, __m128d b);
+ /// CMPPD xmm, xmm/m128, imm8(2)
/// </summary>
public static Vector128<double> CompareNotGreaterThan(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_cmpngt_sd (__m128d a, __m128d b); CMPSD xmm, xmm/m64, imm8(2)
+ /// __m128d _mm_cmpngt_sd (__m128d a, __m128d b);
+ /// CMPSD xmm, xmm/m64, imm8(2)
/// </summary>
public static Vector128<double> CompareNotGreaterThanScalar(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_cmpnge_pd (__m128d a, __m128d b); CMPPD xmm, xmm/m128, imm8(1)
+ /// __m128d _mm_cmpnge_pd (__m128d a, __m128d b);
+ /// CMPPD xmm, xmm/m128, imm8(1)
/// </summary>
public static Vector128<double> CompareNotGreaterThanOrEqual(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_cmpnge_sd (__m128d a, __m128d b); CMPSD xmm, xmm/m64, imm8(1)
+ /// __m128d _mm_cmpnge_sd (__m128d a, __m128d b);
+ /// CMPSD xmm, xmm/m64, imm8(1)
/// </summary>
public static Vector128<double> CompareNotGreaterThanOrEqualScalar(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_cmpnlt_pd (__m128d a, __m128d b); CMPPD xmm, xmm/m128, imm8(5)
+ /// __m128d _mm_cmpnlt_pd (__m128d a, __m128d b);
+ /// CMPPD xmm, xmm/m128, imm8(5)
/// </summary>
public static Vector128<double> CompareNotLessThan(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_cmpnlt_sd (__m128d a, __m128d b); CMPSD xmm, xmm/m64, imm8(5)
+ /// __m128d _mm_cmpnlt_sd (__m128d a, __m128d b);
+ /// CMPSD xmm, xmm/m64, imm8(5)
/// </summary>
public static Vector128<double> CompareNotLessThanScalar(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_cmpnle_pd (__m128d a, __m128d b); CMPPD xmm, xmm/m128, imm8(6)
+ /// __m128d _mm_cmpnle_pd (__m128d a, __m128d b);
+ /// CMPPD xmm, xmm/m128, imm8(6)
/// </summary>
public static Vector128<double> CompareNotLessThanOrEqual(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_cmpnle_sd (__m128d a, __m128d b); CMPSD xmm, xmm/m64, imm8(6)
+ /// __m128d _mm_cmpnle_sd (__m128d a, __m128d b);
+ /// CMPSD xmm, xmm/m64, imm8(6)
/// </summary>
public static Vector128<double> CompareNotLessThanOrEqualScalar(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_cmpord_pd (__m128d a, __m128d b); CMPPD xmm, xmm/m128, imm8(7)
+ /// __m128d _mm_cmpord_pd (__m128d a, __m128d b);
+ /// CMPPD xmm, xmm/m128, imm8(7)
/// </summary>
public static Vector128<double> CompareOrdered(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_cmpord_sd (__m128d a, __m128d b); CMPSD xmm, xmm/m64, imm8(7)
+ /// __m128d _mm_cmpord_sd (__m128d a, __m128d b);
+ /// CMPSD xmm, xmm/m64, imm8(7)
/// </summary>
public static Vector128<double> CompareOrderedScalar(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_cmpunord_pd (__m128d a, __m128d b); CMPPD xmm, xmm/m128, imm8(3)
+ /// __m128d _mm_cmpunord_pd (__m128d a, __m128d b);
+ /// CMPPD xmm, xmm/m128, imm8(3)
/// </summary>
public static Vector128<double> CompareUnordered(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_cmpunord_sd (__m128d a, __m128d b); CMPSD xmm, xmm/m64, imm8(3)
+ /// __m128d _mm_cmpunord_sd (__m128d a, __m128d b);
+ /// CMPSD xmm, xmm/m64, imm8(3)
/// </summary>
public static Vector128<double> CompareUnorderedScalar(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cvtps_epi32 (__m128 a); CVTPS2DQ xmm, xmm/m128
+ /// __m128i _mm_cvtps_epi32 (__m128 a);
+ /// CVTPS2DQ xmm, xmm/m128
/// </summary>
public static Vector128<int> ConvertToVector128Int32(Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cvtpd_epi32 (__m128d a); CVTPD2DQ xmm, xmm/m128
+ /// __m128i _mm_cvtpd_epi32 (__m128d a);
+ /// CVTPD2DQ xmm, xmm/m128
/// </summary>
public static Vector128<int> ConvertToVector128Int32(Vector128<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_cvtepi32_ps (__m128i a); CVTDQ2PS xmm, xmm/m128
+ /// __m128 _mm_cvtepi32_ps (__m128i a);
+ /// CVTDQ2PS xmm, xmm/m128
/// </summary>
public static Vector128<float> ConvertToVector128Single(Vector128<int> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_cvtpd_ps (__m128d a); CVTPD2PS xmm, xmm/m128
+ /// __m128 _mm_cvtpd_ps (__m128d a);
+ /// CVTPD2PS xmm, xmm/m128
/// </summary>
public static Vector128<float> ConvertToVector128Single(Vector128<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_cvtepi32_pd (__m128i a); CVTDQ2PD xmm, xmm/m128
+ /// __m128d _mm_cvtepi32_pd (__m128i a);
+ /// CVTDQ2PD xmm, xmm/m128
/// </summary>
public static Vector128<double> ConvertToVector128Double(Vector128<int> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_cvtps_pd (__m128 a); CVTPS2PD xmm, xmm/m128
+ /// __m128d _mm_cvtps_pd (__m128 a);
+ /// CVTPS2PD xmm, xmm/m128
/// </summary>
public static Vector128<double> ConvertToVector128Double(Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// double _mm_cvtsd_f64(__m128d a); HELPER: MOVSD
+ /// double _mm_cvtsd_f64(__m128d a);
+ /// HELPER: MOVSD
/// </summary>
public static double ConvertToDouble(Vector128<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_cvtsd_si32 (__m128d a); CVTSD2SI r32, xmm/m64
+ /// int _mm_cvtsd_si32 (__m128d a);
+ /// CVTSD2SI r32, xmm/m64
/// </summary>
public static int ConvertToInt32(Vector128<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_cvtsi128_si32 (__m128i a); MOVD reg/m32, xmm
+ /// int _mm_cvtsi128_si32 (__m128i a);
+ /// MOVD reg/m32, xmm
/// </summary>
public static int ConvertToInt32(Vector128<int> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __int64 _mm_cvtsd_si64 (__m128d a); CVTSD2SI r64, xmm/m64
+ /// __int64 _mm_cvtsd_si64 (__m128d a);
+ /// CVTSD2SI r64, xmm/m64
/// </summary>
public static long ConvertToInt64(Vector128<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __int64 _mm_cvtsi128_si64 (__m128i a); MOVQ reg/m64, xmm
+ /// __int64 _mm_cvtsi128_si64 (__m128i a);
+ /// MOVQ reg/m64, xmm
/// </summary>
public static long ConvertToInt64(Vector128<long> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_cvtsi128_si32 (__m128i a); MOVD reg/m32, xmm
+ /// int _mm_cvtsi128_si32 (__m128i a);
+ /// MOVD reg/m32, xmm
/// </summary>
public static uint ConvertToUInt32(Vector128<uint> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __int64 _mm_cvtsi128_si64 (__m128i a); MOVQ reg/m64, xmm
+ /// __int64 _mm_cvtsi128_si64 (__m128i a);
+ /// MOVQ reg/m64, xmm
/// </summary>
public static ulong ConvertToUInt64(Vector128<ulong> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_cvtsi32_sd (__m128d a, int b); CVTSI2SD xmm, reg/m64
+ /// __m128d _mm_cvtsi32_sd (__m128d a, int b);
+ /// CVTSI2SD xmm, reg/m64
/// </summary>
public static Vector128<double> ConvertToVector128DoubleScalar(Vector128<double> upper, int value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_cvtsi64_sd (__m128d a, int b); CVTSI2SD xmm, reg/m64
+ /// __m128d _mm_cvtsi64_sd (__m128d a, int b);
+ /// CVTSI2SD xmm, reg/m64
/// </summary>
public static Vector128<double> ConvertToVector128DoubleScalar(Vector128<double> upper, long value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_cvtss_sd (__m128d a, __m128 b); CVTSS2SD xmm, xmm/m32
+ /// __m128d _mm_cvtss_sd (__m128d a, __m128 b);
+ /// CVTSS2SD xmm, xmm/m32
/// </summary>
public static Vector128<double> ConvertToVector128DoubleScalar(Vector128<double> upper, Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cvtsi32_si128 (int a); MOVD xmm, reg/m32
+ /// __m128i _mm_cvtsi32_si128 (int a);
+ /// MOVD xmm, reg/m32
/// </summary>
public static Vector128<int> ConvertToVector128Int32Scalar(int value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cvtsi64_si128 (__int64 a); MOVQ xmm, reg/m64
+ /// __m128i _mm_cvtsi64_si128 (__int64 a);
+ /// MOVQ xmm, reg/m64
/// </summary>
public static Vector128<long> ConvertToVector128Int64Scalar(long value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_cvtsd_ss (__m128 a, __m128d b); CVTSD2SS xmm, xmm/m64
+ /// __m128 _mm_cvtsd_ss (__m128 a, __m128d b);
+ /// CVTSD2SS xmm, xmm/m64
/// </summary>
public static Vector128<float> ConvertToVector128SingleScalar(Vector128<float> upper, Vector128<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cvtsi32_si128 (int a); MOVD xmm, reg/m32
+ /// __m128i _mm_cvtsi32_si128 (int a);
+ /// MOVD xmm, reg/m32
/// </summary>
public static Vector128<uint> ConvertToVector128UInt32Scalar(uint value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cvtsi64_si128 (__int64 a); MOVQ xmm, reg/m64
+ /// __m128i _mm_cvtsi64_si128 (__int64 a);
+ /// MOVQ xmm, reg/m64
/// </summary>
public static Vector128<ulong> ConvertToVector128UInt64Scalar(ulong value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cvttps_epi32 (__m128 a); CVTTPS2DQ xmm, xmm/m128
+ /// __m128i _mm_cvttps_epi32 (__m128 a);
+ /// CVTTPS2DQ xmm, xmm/m128
/// </summary>
public static Vector128<int> ConvertToVector128Int32WithTruncation(Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cvttpd_epi32 (__m128d a); CVTTPD2DQ xmm, xmm/m128
+ /// __m128i _mm_cvttpd_epi32 (__m128d a);
+ /// CVTTPD2DQ xmm, xmm/m128
/// </summary>
public static Vector128<int> ConvertToVector128Int32WithTruncation(Vector128<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_cvttsd_si32 (__m128d a); CVTTSD2SI reg, xmm/m64
+ /// int _mm_cvttsd_si32 (__m128d a);
+ /// CVTTSD2SI reg, xmm/m64
/// </summary>
public static int ConvertToInt32WithTruncation(Vector128<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __int64 _mm_cvttsd_si64 (__m128d a); CVTTSD2SI reg, xmm/m64
+ /// __int64 _mm_cvttsd_si64 (__m128d a);
+ /// CVTTSD2SI reg, xmm/m64
/// </summary>
public static long ConvertToInt64WithTruncation(Vector128<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_div_pd (__m128d a, __m128d b); DIVPD xmm, xmm/m128
+ /// __m128d _mm_div_pd (__m128d a, __m128d b);
+ /// DIVPD xmm, xmm/m128
/// </summary>
public static Vector128<double> Divide(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_div_sd (__m128d a, __m128d b); DIVSD xmm, xmm/m64
+ /// __m128d _mm_div_sd (__m128d a, __m128d b);
+ /// DIVSD xmm, xmm/m64
/// </summary>
public static Vector128<double> DivideScalar(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_extract_epi16 (__m128i a, int immediate); PEXTRW reg, xmm, imm8
+ /// int _mm_extract_epi16 (__m128i a, int immediate);
+ /// PEXTRW reg, xmm, imm8
/// </summary>
public static short Extract(Vector128<short> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_extract_epi16 (__m128i a, int immediate); PEXTRW reg, xmm, imm8
+ /// int _mm_extract_epi16 (__m128i a, int immediate);
+ /// PEXTRW reg, xmm, imm8
/// </summary>
public static ushort Extract(Vector128<ushort> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_insert_epi16 (__m128i a, int i, int immediate); PINSRW xmm, reg/m16, imm8
+ /// __m128i _mm_insert_epi16 (__m128i a, int i, int immediate);
+ /// PINSRW xmm, reg/m16, imm8
/// </summary>
public static Vector128<short> Insert(Vector128<short> value, short data, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_insert_epi16 (__m128i a, int i, int immediate); PINSRW xmm, reg/m16, imm8
+ /// __m128i _mm_insert_epi16 (__m128i a, int i, int immediate);
+ /// PINSRW xmm, reg/m16, imm8
/// </summary>
public static Vector128<ushort> Insert(Vector128<ushort> value, ushort data, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_loadu_si128 (__m128i const* mem_address); MOVDQU xmm, m128
+ /// __m128i _mm_loadu_si128 (__m128i const* mem_address);
+ /// MOVDQU xmm, m128
/// </summary>
public static unsafe Vector128<sbyte> LoadVector128(sbyte* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_loadu_si128 (__m128i const* mem_address); MOVDQU xmm, m128
+ /// __m128i _mm_loadu_si128 (__m128i const* mem_address);
+ /// MOVDQU xmm, m128
/// </summary>
public static unsafe Vector128<byte> LoadVector128(byte* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_loadu_si128 (__m128i const* mem_address); MOVDQU xmm, m128
+ /// __m128i _mm_loadu_si128 (__m128i const* mem_address);
+ /// MOVDQU xmm, m128
/// </summary>
public static unsafe Vector128<short> LoadVector128(short* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_loadu_si128 (__m128i const* mem_address); MOVDQU xmm, m128
+ /// __m128i _mm_loadu_si128 (__m128i const* mem_address);
+ /// MOVDQU xmm, m128
/// </summary>
public static unsafe Vector128<ushort> LoadVector128(ushort* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_loadu_si128 (__m128i const* mem_address); MOVDQU xmm, m128
+ /// __m128i _mm_loadu_si128 (__m128i const* mem_address);
+ /// MOVDQU xmm, m128
/// </summary>
public static unsafe Vector128<int> LoadVector128(int* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_loadu_si128 (__m128i const* mem_address); MOVDQU xmm, m128
+ /// __m128i _mm_loadu_si128 (__m128i const* mem_address);
+ /// MOVDQU xmm, m128
/// </summary>
public static unsafe Vector128<uint> LoadVector128(uint* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_loadu_si128 (__m128i const* mem_address); MOVDQU xmm, m128
+ /// __m128i _mm_loadu_si128 (__m128i const* mem_address);
+ /// MOVDQU xmm, m128
/// </summary>
public static unsafe Vector128<long> LoadVector128(long* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_loadu_si128 (__m128i const* mem_address); MOVDQU xmm, m128
+ /// __m128i _mm_loadu_si128 (__m128i const* mem_address);
+ /// MOVDQU xmm, m128
/// </summary>
public static unsafe Vector128<ulong> LoadVector128(ulong* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_loadu_pd (double const* mem_address); MOVUPD xmm, m128
+ /// __m128d _mm_loadu_pd (double const* mem_address);
+ /// MOVUPD xmm, m128
/// </summary>
public static unsafe Vector128<double> LoadVector128(double* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_load_sd (double const* mem_address); MOVSD xmm, m64
+ /// __m128d _mm_load_sd (double const* mem_address);
+ /// MOVSD xmm, m64
/// </summary>
public static unsafe Vector128<double> LoadScalar(double* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_load_si128 (__m128i const* mem_address); MOVDQA xmm, m128
+ /// __m128i _mm_load_si128 (__m128i const* mem_address);
+ /// MOVDQA xmm, m128
/// </summary>
public static unsafe Vector128<sbyte> LoadAlignedVector128(sbyte* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_load_si128 (__m128i const* mem_address); MOVDQA xmm, m128
+ /// __m128i _mm_load_si128 (__m128i const* mem_address);
+ /// MOVDQA xmm, m128
/// </summary>
public static unsafe Vector128<byte> LoadAlignedVector128(byte* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_load_si128 (__m128i const* mem_address); MOVDQA xmm, m128
+ /// __m128i _mm_load_si128 (__m128i const* mem_address);
+ /// MOVDQA xmm, m128
/// </summary>
public static unsafe Vector128<short> LoadAlignedVector128(short* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_load_si128 (__m128i const* mem_address); MOVDQA xmm, m128
+ /// __m128i _mm_load_si128 (__m128i const* mem_address);
+ /// MOVDQA xmm, m128
/// </summary>
public static unsafe Vector128<ushort> LoadAlignedVector128(ushort* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_load_si128 (__m128i const* mem_address); MOVDQA xmm, m128
+ /// __m128i _mm_load_si128 (__m128i const* mem_address);
+ /// MOVDQA xmm, m128
/// </summary>
public static unsafe Vector128<int> LoadAlignedVector128(int* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_load_si128 (__m128i const* mem_address); MOVDQA xmm, m128
+ /// __m128i _mm_load_si128 (__m128i const* mem_address);
+ /// MOVDQA xmm, m128
/// </summary>
public static unsafe Vector128<uint> LoadAlignedVector128(uint* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_load_si128 (__m128i const* mem_address); MOVDQA xmm, m128
+ /// __m128i _mm_load_si128 (__m128i const* mem_address);
+ /// MOVDQA xmm, m128
/// </summary>
public static unsafe Vector128<long> LoadAlignedVector128(long* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_load_si128 (__m128i const* mem_address); MOVDQA xmm, m128
+ /// __m128i _mm_load_si128 (__m128i const* mem_address);
+ /// MOVDQA xmm, m128
/// </summary>
public static unsafe Vector128<ulong> LoadAlignedVector128(ulong* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_load_pd (double const* mem_address); MOVAPD xmm, m128
+ /// __m128d _mm_load_pd (double const* mem_address);
+ /// MOVAPD xmm, m128
/// </summary>
public static unsafe Vector128<double> LoadAlignedVector128(double* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_loadh_pd (__m128d a, double const* mem_addr); MOVHPD xmm, m64
+ /// __m128d _mm_loadh_pd (__m128d a, double const* mem_addr);
+ /// MOVHPD xmm, m64
/// </summary>
public static unsafe Vector128<double> LoadHigh(Vector128<double> lower, double* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_loadl_pd (__m128d a, double const* mem_addr); MOVLPD xmm, m64
+ /// __m128d _mm_loadl_pd (__m128d a, double const* mem_addr);
+ /// MOVLPD xmm, m64
/// </summary>
public static unsafe Vector128<double> LoadLow(Vector128<double> upper, double* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr); MOVQ xmm, reg/m64
+ /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr);
+ /// MOVQ xmm, reg/m64
/// </summary>
public static unsafe Vector128<sbyte> LoadScalar(sbyte* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr); MOVQ xmm, reg/m64
+ /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr);
+ /// MOVQ xmm, reg/m64
/// </summary>
public static unsafe Vector128<byte> LoadScalar(byte* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr); MOVQ xmm, reg/m64
+ /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr);
+ /// MOVQ xmm, reg/m64
/// </summary>
public static unsafe Vector128<short> LoadScalar(short* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr); MOVQ xmm, reg/m64
+ /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr);
+ /// MOVQ xmm, reg/m64
/// </summary>
public static unsafe Vector128<ushort> LoadScalar(ushort* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr); MOVQ xmm, reg/m64
+ /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr);
+ /// MOVQ xmm, reg/m64
/// </summary>
public static unsafe Vector128<int> LoadScalar(int* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr); MOVQ xmm, reg/m64
+ /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr);
+ /// MOVQ xmm, reg/m64
/// </summary>
public static unsafe Vector128<uint> LoadScalar(uint* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr); MOVQ xmm, reg/m64
+ /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr);
+ /// MOVQ xmm, reg/m64
/// </summary>
public static unsafe Vector128<long> LoadScalar(long* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr); MOVQ xmm, reg/m64
+ /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr);
+ /// MOVQ xmm, reg/m64
/// </summary>
public static unsafe Vector128<ulong> LoadScalar(ulong* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_maskmoveu_si128 (__m128i a, __m128i mask, char* mem_address); MASKMOVDQU xmm, xmm
+ /// void _mm_maskmoveu_si128 (__m128i a, __m128i mask, char* mem_address);
+ /// MASKMOVDQU xmm, xmm
/// </summary>
public static unsafe void MaskMove(Vector128<sbyte> source, Vector128<sbyte> mask, sbyte* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_maskmoveu_si128 (__m128i a, __m128i mask, char* mem_address); MASKMOVDQU xmm, xmm
+ /// void _mm_maskmoveu_si128 (__m128i a, __m128i mask, char* mem_address);
+ /// MASKMOVDQU xmm, xmm
/// </summary>
public static unsafe void MaskMove(Vector128<byte> source, Vector128<byte> mask, byte* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_max_epu8 (__m128i a, __m128i b); PMAXUB xmm, xmm/m128
+ /// __m128i _mm_max_epu8 (__m128i a, __m128i b);
+ /// PMAXUB xmm, xmm/m128
/// </summary>
public static Vector128<byte> Max(Vector128<byte> left, Vector128<byte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_max_epi16 (__m128i a, __m128i b); PMAXSW xmm, xmm/m128
+ /// __m128i _mm_max_epi16 (__m128i a, __m128i b);
+ /// PMAXSW xmm, xmm/m128
/// </summary>
public static Vector128<short> Max(Vector128<short> left, Vector128<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_max_pd (__m128d a, __m128d b); MAXPD xmm, xmm/m128
+ /// __m128d _mm_max_pd (__m128d a, __m128d b);
+ /// MAXPD xmm, xmm/m128
/// </summary>
public static Vector128<double> Max(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_max_sd (__m128d a, __m128d b); MAXSD xmm, xmm/m64
+ /// __m128d _mm_max_sd (__m128d a, __m128d b);
+ /// MAXSD xmm, xmm/m64
/// </summary>
public static Vector128<double> MaxScalar(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_min_epu8 (__m128i a, __m128i b); PMINUB xmm, xmm/m128
+ /// __m128i _mm_min_epu8 (__m128i a, __m128i b);
+ /// PMINUB xmm, xmm/m128
/// </summary>
public static Vector128<byte> Min(Vector128<byte> left, Vector128<byte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_min_epi16 (__m128i a, __m128i b); PMINSW xmm, xmm/m128
+ /// __m128i _mm_min_epi16 (__m128i a, __m128i b);
+ /// PMINSW xmm, xmm/m128
/// </summary>
public static Vector128<short> Min(Vector128<short> left, Vector128<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_min_pd (__m128d a, __m128d b); MINPD xmm, xmm/m128
+ /// __m128d _mm_min_pd (__m128d a, __m128d b);
+ /// MINPD xmm, xmm/m128
/// </summary>
public static Vector128<double> Min(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_min_sd (__m128d a, __m128d b); MINSD xmm, xmm/m64
+ /// __m128d _mm_min_sd (__m128d a, __m128d b);
+ /// MINSD xmm, xmm/m64
/// </summary>
public static Vector128<double> MinScalar(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_move_sd (__m128d a, __m128d b); MOVSD xmm, xmm
+ /// __m128d _mm_move_sd (__m128d a, __m128d b);
+ /// MOVSD xmm, xmm
/// </summary>
public static Vector128<double> MoveScalar(Vector128<double> upper, Vector128<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_movemask_epi8 (__m128i a); PMOVMSKB reg, xmm
+ /// int _mm_movemask_epi8 (__m128i a);
+ /// PMOVMSKB reg, xmm
/// </summary>
public static int MoveMask(Vector128<sbyte> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_movemask_epi8 (__m128i a); PMOVMSKB reg, xmm
+ /// int _mm_movemask_epi8 (__m128i a);
+ /// PMOVMSKB reg, xmm
/// </summary>
public static int MoveMask(Vector128<byte> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_movemask_pd (__m128d a); MOVMSKPD reg, xmm
+ /// int _mm_movemask_pd (__m128d a);
+ /// MOVMSKPD reg, xmm
/// </summary>
public static int MoveMask(Vector128<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_move_epi64 (__m128i a); MOVQ xmm, xmm
+ /// __m128i _mm_move_epi64 (__m128i a);
+ /// MOVQ xmm, xmm
/// </summary>
public static Vector128<long> MoveScalar(Vector128<long> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_move_epi64 (__m128i a); MOVQ xmm, xmm
+ /// __m128i _mm_move_epi64 (__m128i a);
+ /// MOVQ xmm, xmm
/// </summary>
public static Vector128<ulong> MoveScalar(Vector128<ulong> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_mul_epu32 (__m128i a, __m128i b); PMULUDQ xmm, xmm/m128
+ /// __m128i _mm_mul_epu32 (__m128i a, __m128i b);
+ /// PMULUDQ xmm, xmm/m128
/// </summary>
public static Vector128<ulong> Multiply(Vector128<uint> left, Vector128<uint> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_mul_pd (__m128d a, __m128d b); MULPD xmm, xmm/m128
+ /// __m128d _mm_mul_pd (__m128d a, __m128d b);
+ /// MULPD xmm, xmm/m128
/// </summary>
public static Vector128<double> Multiply(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_mul_sd (__m128d a, __m128d b); MULSD xmm, xmm/m64
+ /// __m128d _mm_mul_sd (__m128d a, __m128d b);
+ /// MULSD xmm, xmm/m64
/// </summary>
public static Vector128<double> MultiplyScalar(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_mulhi_epi16 (__m128i a, __m128i b); PMULHW xmm, xmm/m128
+ /// __m128i _mm_mulhi_epi16 (__m128i a, __m128i b);
+ /// PMULHW xmm, xmm/m128
/// </summary>
public static Vector128<short> MultiplyHigh(Vector128<short> left, Vector128<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_mulhi_epu16 (__m128i a, __m128i b); PMULHUW xmm, xmm/m128
+ /// __m128i _mm_mulhi_epu16 (__m128i a, __m128i b);
+ /// PMULHUW xmm, xmm/m128
/// </summary>
public static Vector128<ushort> MultiplyHigh(Vector128<ushort> left, Vector128<ushort> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_madd_epi16 (__m128i a, __m128i b); PMADDWD xmm, xmm/m128
+ /// __m128i _mm_madd_epi16 (__m128i a, __m128i b);
+ /// PMADDWD xmm, xmm/m128
/// </summary>
public static Vector128<int> MultiplyHorizontalAdd(Vector128<short> left, Vector128<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_mullo_epi16 (__m128i a, __m128i b); PMULLW xmm, xmm/m128
+ /// __m128i _mm_mullo_epi16 (__m128i a, __m128i b);
+ /// PMULLW xmm, xmm/m128
/// </summary>
public static Vector128<short> MultiplyLow(Vector128<short> left, Vector128<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_or_si128 (__m128i a, __m128i b); POR xmm, xmm/m128
+ /// __m128i _mm_or_si128 (__m128i a, __m128i b);
+ /// POR xmm, xmm/m128
/// </summary>
public static Vector128<byte> Or(Vector128<byte> left, Vector128<byte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_or_si128 (__m128i a, __m128i b); POR xmm, xmm/m128
+ /// __m128i _mm_or_si128 (__m128i a, __m128i b);
+ /// POR xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> Or(Vector128<sbyte> left, Vector128<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_or_si128 (__m128i a, __m128i b); POR xmm, xmm/m128
+ /// __m128i _mm_or_si128 (__m128i a, __m128i b);
+ /// POR xmm, xmm/m128
/// </summary>
public static Vector128<short> Or(Vector128<short> left, Vector128<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_or_si128 (__m128i a, __m128i b); POR xmm, xmm/m128
+ /// __m128i _mm_or_si128 (__m128i a, __m128i b);
+ /// POR xmm, xmm/m128
/// </summary>
public static Vector128<ushort> Or(Vector128<ushort> left, Vector128<ushort> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_or_si128 (__m128i a, __m128i b); POR xmm, xmm/m128
+ /// __m128i _mm_or_si128 (__m128i a, __m128i b);
+ /// POR xmm, xmm/m128
/// </summary>
public static Vector128<int> Or(Vector128<int> left, Vector128<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_or_si128 (__m128i a, __m128i b); POR xmm, xmm/m128
+ /// __m128i _mm_or_si128 (__m128i a, __m128i b);
+ /// POR xmm, xmm/m128
/// </summary>
public static Vector128<uint> Or(Vector128<uint> left, Vector128<uint> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_or_si128 (__m128i a, __m128i b); POR xmm, xmm/m128
+ /// __m128i _mm_or_si128 (__m128i a, __m128i b);
+ /// POR xmm, xmm/m128
/// </summary>
public static Vector128<long> Or(Vector128<long> left, Vector128<long> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_or_si128 (__m128i a, __m128i b); POR xmm, xmm/m128
+ /// __m128i _mm_or_si128 (__m128i a, __m128i b);
+ /// POR xmm, xmm/m128
/// </summary>
public static Vector128<ulong> Or(Vector128<ulong> left, Vector128<ulong> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_or_pd (__m128d a, __m128d b); ORPD xmm, xmm/m128
+ /// __m128d _mm_or_pd (__m128d a, __m128d b);
+ /// ORPD xmm, xmm/m128
/// </summary>
public static Vector128<double> Or(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_packs_epi16 (__m128i a, __m128i b); PACKSSWB xmm, xmm/m128
+ /// __m128i _mm_packs_epi16 (__m128i a, __m128i b);
+ /// PACKSSWB xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> PackSignedSaturate(Vector128<short> left, Vector128<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_packs_epi32 (__m128i a, __m128i b); PACKSSDW xmm, xmm/m128
+ /// __m128i _mm_packs_epi32 (__m128i a, __m128i b);
+ /// PACKSSDW xmm, xmm/m128
/// </summary>
public static Vector128<short> PackSignedSaturate(Vector128<int> left, Vector128<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_packus_epi16 (__m128i a, __m128i b); PACKUSWB xmm, xmm/m128
+ /// __m128i _mm_packus_epi16 (__m128i a, __m128i b);
+ /// PACKUSWB xmm, xmm/m128
/// </summary>
public static Vector128<byte> PackUnsignedSaturate(Vector128<short> left, Vector128<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// ___m128i _mm_set_epi8 (char e15, char e14, char e13, char e12, char e11, char e10, char e9, char e8, char e7, char e6, char e5, char e4, char e3, char e2, char e1, char e0); HELPER
+ /// ___m128i _mm_set_epi8 (char e15, char e14, char e13, char e12, char e11, char e10, char e9, char e8, char e7, char e6, char e5, char e4, char e3, char e2, char e1, char e0);
+ /// HELPER
/// </summary>
public static Vector128<sbyte> SetVector128(sbyte e15, sbyte e14, sbyte e13, sbyte e12, sbyte e11, sbyte e10, sbyte e9, sbyte e8, sbyte e7, sbyte e6, sbyte e5, sbyte e4, sbyte e3, sbyte e2, sbyte e1, sbyte e0) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// ___m128i _mm_set_epi8 (char e15, char e14, char e13, char e12, char e11, char e10, char e9, char e8, char e7, char e6, char e5, char e4, char e3, char e2, char e1, char e0); HELPER
+ /// ___m128i _mm_set_epi8 (char e15, char e14, char e13, char e12, char e11, char e10, char e9, char e8, char e7, char e6, char e5, char e4, char e3, char e2, char e1, char e0);
+ /// HELPER
/// </summary>
public static Vector128<byte> SetVector128(byte e15, byte e14, byte e13, byte e12, byte e11, byte e10, byte e9, byte e8, byte e7, byte e6, byte e5, byte e4, byte e3, byte e2, byte e1, byte e0) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_set_epi16 (short e7, short e6, short e5, short e4, short e3, short e2, short e1, short e0); HELPER
+ /// __m128i _mm_set_epi16 (short e7, short e6, short e5, short e4, short e3, short e2, short e1, short e0);
+ /// HELPER
/// </summary>
public static Vector128<short> SetVector128(short e7, short e6, short e5, short e4, short e3, short e2, short e1, short e0) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_set_epi16 (short e7, short e6, short e5, short e4, short e3, short e2, short e1, short e0); HELPER
+ /// __m128i _mm_set_epi16 (short e7, short e6, short e5, short e4, short e3, short e2, short e1, short e0);
+ /// HELPER
/// </summary>
public static Vector128<ushort> SetVector128(ushort e7, ushort e6, ushort e5, ushort e4, ushort e3, ushort e2, ushort e1, ushort e0) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_set_epi32 (int e3, int e2, int e1, int e0); HELPER
+ /// __m128i _mm_set_epi32 (int e3, int e2, int e1, int e0);
+ /// HELPER
/// </summary>
public static Vector128<int> SetVector128(int e3, int e2, int e1, int e0) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_set_epi32 (int e3, int e2, int e1, int e0); HELPER
+ /// __m128i _mm_set_epi32 (int e3, int e2, int e1, int e0);
+ /// HELPER
/// </summary>
public static Vector128<uint> SetVector128(uint e3, uint e2, uint e1, uint e0) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_set_epi64x (__int64 e1, __int64 e0); HELPER
+ /// __m128i _mm_set_epi64x (__int64 e1, __int64 e0);
+ /// HELPER
/// </summary>
public static Vector128<long> SetVector128(long e1, long e0) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_set_epi64x (__int64 e1, __int64 e0); HELPER
+ /// __m128i _mm_set_epi64x (__int64 e1, __int64 e0);
+ /// HELPER
/// </summary>
public static Vector128<ulong> SetVector128(ulong e1, ulong e0) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_set_pd (double e1, double e0); HELPER
+ /// __m128d _mm_set_pd (double e1, double e0);
+ /// HELPER
/// </summary>
public static Vector128<double> SetVector128(double e1, double e0) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_set_sd (double a); HELPER
+ /// __m128d _mm_set_sd (double a);
+ /// HELPER
/// </summary>
public static Vector128<double> SetScalarVector128(double value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_set1_epi8 (char a); HELPER
+ /// __m128i _mm_set1_epi8 (char a);
+ /// HELPER
/// </summary>
public static Vector128<byte> SetAllVector128(byte value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_set1_epi8 (char a); HELPER
+ /// __m128i _mm_set1_epi8 (char a);
+ /// HELPER
/// </summary>
public static Vector128<sbyte> SetAllVector128(sbyte value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_set1_epi16 (short a); HELPER
+ /// __m128i _mm_set1_epi16 (short a);
+ /// HELPER
/// </summary>
public static Vector128<short> SetAllVector128(short value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_set1_epi16 (short a); HELPER
+ /// __m128i _mm_set1_epi16 (short a);
+ /// HELPER
/// </summary>
public static Vector128<ushort> SetAllVector128(ushort value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_set1_epi32 (int a); HELPER
+ /// __m128i _mm_set1_epi32 (int a);
+ /// HELPER
/// </summary>
public static Vector128<int> SetAllVector128(int value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_set1_epi32 (int a); HELPER
+ /// __m128i _mm_set1_epi32 (int a);
+ /// HELPER
/// </summary>
public static Vector128<uint> SetAllVector128(uint value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_set1_epi64x (long long a); HELPER
+ /// __m128i _mm_set1_epi64x (long long a);
+ /// HELPER
/// </summary>
public static Vector128<long> SetAllVector128(long value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_set1_epi64x (long long a); HELPER
+ /// __m128i _mm_set1_epi64x (long long a);
+ /// HELPER
/// </summary>
public static Vector128<ulong> SetAllVector128(ulong value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_set1_pd (double a); HELPER
+ /// __m128d _mm_set1_pd (double a);
+ /// HELPER
/// </summary>
public static Vector128<double> SetAllVector128(double value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_setzero_si128 (); HELPER: PXOR
- /// __m128d _mm_setzero_pd (void); HELPER: XORPD
+ /// __m128i _mm_setzero_si128 ();
+ /// HELPER: PXOR
+ /// __m128d _mm_setzero_pd (void);
+ /// HELPER: XORPD
/// </summary>
public static Vector128<T> SetZeroVector128<T>() where T : struct { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_sad_epu8 (__m128i a, __m128i b); PSADBW xmm, xmm/m128
+ /// __m128i _mm_sad_epu8 (__m128i a, __m128i b);
+ /// PSADBW xmm, xmm/m128
/// </summary>
public static Vector128<long> SumAbsoluteDifferences(Vector128<byte> left, Vector128<byte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_shuffle_epi32 (__m128i a, int immediate); PSHUFD xmm, xmm/m128, imm8
+ /// __m128i _mm_shuffle_epi32 (__m128i a, int immediate);
+ /// PSHUFD xmm, xmm/m128, imm8
/// </summary>
public static Vector128<int> Shuffle(Vector128<int> value, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_shuffle_epi32 (__m128i a, int immediate); PSHUFD xmm, xmm/m128, imm8
+ /// __m128i _mm_shuffle_epi32 (__m128i a, int immediate);
+ /// PSHUFD xmm, xmm/m128, imm8
/// </summary>
public static Vector128<uint> Shuffle(Vector128<uint> value, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_shuffle_pd (__m128d a, __m128d b, int immediate); SHUFPD xmm, xmm/m128, imm8
+ /// __m128d _mm_shuffle_pd (__m128d a, __m128d b, int immediate);
+ /// SHUFPD xmm, xmm/m128, imm8
/// </summary>
public static Vector128<double> Shuffle(Vector128<double> left, Vector128<double> right, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_shufflehi_epi16 (__m128i a, int immediate); PSHUFHW xmm, xmm/m128, imm8
+ /// __m128i _mm_shufflehi_epi16 (__m128i a, int immediate);
+ /// PSHUFHW xmm, xmm/m128, imm8
/// </summary>
public static Vector128<short> ShuffleHigh(Vector128<short> value, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_shufflehi_epi16 (__m128i a, int control); PSHUFHW xmm, xmm/m128, imm8
+ /// __m128i _mm_shufflehi_epi16 (__m128i a, int control);
+ /// PSHUFHW xmm, xmm/m128, imm8
/// </summary>
public static Vector128<ushort> ShuffleHigh(Vector128<ushort> value, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_shufflelo_epi16 (__m128i a, int control); PSHUFLW xmm, xmm/m128, imm8
+ /// __m128i _mm_shufflelo_epi16 (__m128i a, int control);
+ /// PSHUFLW xmm, xmm/m128, imm8
/// </summary>
public static Vector128<short> ShuffleLow(Vector128<short> value, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_shufflelo_epi16 (__m128i a, int control); PSHUFLW xmm, xmm/m128, imm8
+ /// __m128i _mm_shufflelo_epi16 (__m128i a, int control);
+ /// PSHUFLW xmm, xmm/m128, imm8
/// </summary>
public static Vector128<ushort> ShuffleLow(Vector128<ushort> value, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_sll_epi16 (__m128i a, __m128i count); PSLLW xmm, xmm/m128
+ /// __m128i _mm_sll_epi16 (__m128i a, __m128i count);
+ /// PSLLW xmm, xmm/m128
/// </summary>
public static Vector128<short> ShiftLeftLogical(Vector128<short> value, Vector128<short> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_sll_epi16 (__m128i a, __m128i count); PSLLW xmm, xmm/m128
+ /// __m128i _mm_sll_epi16 (__m128i a, __m128i count);
+ /// PSLLW xmm, xmm/m128
/// </summary>
public static Vector128<ushort> ShiftLeftLogical(Vector128<ushort> value, Vector128<ushort> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_sll_epi32 (__m128i a, __m128i count); PSLLD xmm, xmm/m128
+ /// __m128i _mm_sll_epi32 (__m128i a, __m128i count);
+ /// PSLLD xmm, xmm/m128
/// </summary>
public static Vector128<int> ShiftLeftLogical(Vector128<int> value, Vector128<int> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_sll_epi32 (__m128i a, __m128i count); PSLLD xmm, xmm/m128
+ /// __m128i _mm_sll_epi32 (__m128i a, __m128i count);
+ /// PSLLD xmm, xmm/m128
/// </summary>
public static Vector128<uint> ShiftLeftLogical(Vector128<uint> value, Vector128<uint> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_sll_epi64 (__m128i a, __m128i count); PSLLQ xmm, xmm/m128
+ /// __m128i _mm_sll_epi64 (__m128i a, __m128i count);
+ /// PSLLQ xmm, xmm/m128
/// </summary>
public static Vector128<long> ShiftLeftLogical(Vector128<long> value, Vector128<long> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_sll_epi64 (__m128i a, __m128i count); PSLLQ xmm, xmm/m128
+ /// __m128i _mm_sll_epi64 (__m128i a, __m128i count);
+ /// PSLLQ xmm, xmm/m128
/// </summary>
public static Vector128<ulong> ShiftLeftLogical(Vector128<ulong> value, Vector128<ulong> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_slli_epi16 (__m128i a, int immediate); PSLLW xmm, imm8
+ /// __m128i _mm_slli_epi16 (__m128i a, int immediate);
+ /// PSLLW xmm, imm8
/// </summary>
public static Vector128<short> ShiftLeftLogical(Vector128<short> value, byte count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_slli_epi16 (__m128i a, int immediate); PSLLW xmm, imm8
+ /// __m128i _mm_slli_epi16 (__m128i a, int immediate);
+ /// PSLLW xmm, imm8
/// </summary>
public static Vector128<ushort> ShiftLeftLogical(Vector128<ushort> value, byte count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_slli_epi32 (__m128i a, int immediate); PSLLD xmm, imm8
+ /// __m128i _mm_slli_epi32 (__m128i a, int immediate);
+ /// PSLLD xmm, imm8
/// </summary>
public static Vector128<int> ShiftLeftLogical(Vector128<int> value, byte count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_slli_epi32 (__m128i a, int immediate); PSLLD xmm, imm8
+ /// __m128i _mm_slli_epi32 (__m128i a, int immediate);
+ /// PSLLD xmm, imm8
/// </summary>
public static Vector128<uint> ShiftLeftLogical(Vector128<uint> value, byte count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_slli_epi64 (__m128i a, int immediate); PSLLQ xmm, imm8
+ /// __m128i _mm_slli_epi64 (__m128i a, int immediate);
+ /// PSLLQ xmm, imm8
/// </summary>
public static Vector128<long> ShiftLeftLogical(Vector128<long> value, byte count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_slli_epi64 (__m128i a, int immediate); PSLLQ xmm, imm8
+ /// __m128i _mm_slli_epi64 (__m128i a, int immediate);
+ /// PSLLQ xmm, imm8
/// </summary>
public static Vector128<ulong> ShiftLeftLogical(Vector128<ulong> value, byte count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_bslli_si128 (__m128i a, int imm8); PSLLDQ xmm, imm8
+ /// __m128i _mm_bslli_si128 (__m128i a, int imm8);
+ /// PSLLDQ xmm, imm8
/// </summary>
public static Vector128<sbyte> ShiftLeftLogical128BitLane(Vector128<sbyte> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_bslli_si128 (__m128i a, int imm8); PSLLDQ xmm, imm8
+ /// __m128i _mm_bslli_si128 (__m128i a, int imm8);
+ /// PSLLDQ xmm, imm8
/// </summary>
public static Vector128<byte> ShiftLeftLogical128BitLane(Vector128<byte> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_bslli_si128 (__m128i a, int imm8); PSLLDQ xmm, imm8
+ /// __m128i _mm_bslli_si128 (__m128i a, int imm8);
+ /// PSLLDQ xmm, imm8
/// </summary>
public static Vector128<short> ShiftLeftLogical128BitLane(Vector128<short> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_bslli_si128 (__m128i a, int imm8); PSLLDQ xmm, imm8
+ /// __m128i _mm_bslli_si128 (__m128i a, int imm8);
+ /// PSLLDQ xmm, imm8
/// </summary>
public static Vector128<ushort> ShiftLeftLogical128BitLane(Vector128<ushort> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_bslli_si128 (__m128i a, int imm8); PSLLDQ xmm, imm8
+ /// __m128i _mm_bslli_si128 (__m128i a, int imm8);
+ /// PSLLDQ xmm, imm8
/// </summary>
public static Vector128<int> ShiftLeftLogical128BitLane(Vector128<int> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_bslli_si128 (__m128i a, int imm8); PSLLDQ xmm, imm8
+ /// __m128i _mm_bslli_si128 (__m128i a, int imm8);
+ /// PSLLDQ xmm, imm8
/// </summary>
public static Vector128<uint> ShiftLeftLogical128BitLane(Vector128<uint> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_bslli_si128 (__m128i a, int imm8); PSLLDQ xmm, imm8
+ /// __m128i _mm_bslli_si128 (__m128i a, int imm8);
+ /// PSLLDQ xmm, imm8
/// </summary>
public static Vector128<long> ShiftLeftLogical128BitLane(Vector128<long> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_bslli_si128 (__m128i a, int imm8); PSLLDQ xmm, imm8
+ /// __m128i _mm_bslli_si128 (__m128i a, int imm8);
+ /// PSLLDQ xmm, imm8
/// </summary>
public static Vector128<ulong> ShiftLeftLogical128BitLane(Vector128<ulong> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_sra_epi16 (__m128i a, __m128i count); PSRAW xmm, xmm/m128
+ /// __m128i _mm_sra_epi16 (__m128i a, __m128i count);
+ /// PSRAW xmm, xmm/m128
/// </summary>
public static Vector128<short> ShiftRightArithmetic(Vector128<short> value, Vector128<short> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_sra_epi32 (__m128i a, __m128i count); PSRAD xmm, xmm/m128
+ /// __m128i _mm_sra_epi32 (__m128i a, __m128i count);
+ /// PSRAD xmm, xmm/m128
/// </summary>
public static Vector128<int> ShiftRightArithmetic(Vector128<int> value, Vector128<int> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_srai_epi16 (__m128i a, int immediate); PSRAW xmm, imm8
+ /// __m128i _mm_srai_epi16 (__m128i a, int immediate);
+ /// PSRAW xmm, imm8
/// </summary>
public static Vector128<short> ShiftRightArithmetic(Vector128<short> value, byte count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_srai_epi32 (__m128i a, int immediate); PSRAD xmm, imm8
+ /// __m128i _mm_srai_epi32 (__m128i a, int immediate);
+ /// PSRAD xmm, imm8
/// </summary>
public static Vector128<int> ShiftRightArithmetic(Vector128<int> value, byte count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_srl_epi16 (__m128i a, __m128i count); PSRLW xmm, xmm/m128
+ /// __m128i _mm_srl_epi16 (__m128i a, __m128i count);
+ /// PSRLW xmm, xmm/m128
/// </summary>
public static Vector128<short> ShiftRightLogical(Vector128<short> value, Vector128<short> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_srl_epi16 (__m128i a, __m128i count); PSRLW xmm, xmm/m128
+ /// __m128i _mm_srl_epi16 (__m128i a, __m128i count);
+ /// PSRLW xmm, xmm/m128
/// </summary>
public static Vector128<ushort> ShiftRightLogical(Vector128<ushort> value, Vector128<ushort> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_srl_epi32 (__m128i a, __m128i count); PSRLD xmm, xmm/m128
+ /// __m128i _mm_srl_epi32 (__m128i a, __m128i count);
+ /// PSRLD xmm, xmm/m128
/// </summary>
public static Vector128<int> ShiftRightLogical(Vector128<int> value, Vector128<int> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_srl_epi32 (__m128i a, __m128i count); PSRLD xmm, xmm/m128
+ /// __m128i _mm_srl_epi32 (__m128i a, __m128i count);
+ /// PSRLD xmm, xmm/m128
/// </summary>
public static Vector128<uint> ShiftRightLogical(Vector128<uint> value, Vector128<uint> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_srl_epi64 (__m128i a, __m128i count); PSRLQ xmm, xmm/m128
+ /// __m128i _mm_srl_epi64 (__m128i a, __m128i count);
+ /// PSRLQ xmm, xmm/m128
/// </summary>
public static Vector128<long> ShiftRightLogical(Vector128<long> value, Vector128<long> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_srl_epi64 (__m128i a, __m128i count); PSRLQ xmm, xmm/m128
+ /// __m128i _mm_srl_epi64 (__m128i a, __m128i count);
+ /// PSRLQ xmm, xmm/m128
/// </summary>
public static Vector128<ulong> ShiftRightLogical(Vector128<ulong> value, Vector128<ulong> count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_srli_epi16 (__m128i a, int immediate); PSRLW xmm, imm8
+ /// __m128i _mm_srli_epi16 (__m128i a, int immediate);
+ /// PSRLW xmm, imm8
/// </summary>
public static Vector128<short> ShiftRightLogical(Vector128<short> value, byte count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_srli_epi16 (__m128i a, int immediate); PSRLW xmm, imm8
+ /// __m128i _mm_srli_epi16 (__m128i a, int immediate);
+ /// PSRLW xmm, imm8
/// </summary>
public static Vector128<ushort> ShiftRightLogical(Vector128<ushort> value, byte count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_srli_epi32 (__m128i a, int immediate); PSRLD xmm, imm8
+ /// __m128i _mm_srli_epi32 (__m128i a, int immediate);
+ /// PSRLD xmm, imm8
/// </summary>
public static Vector128<int> ShiftRightLogical(Vector128<int> value, byte count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_srli_epi32 (__m128i a, int immediate); PSRLD xmm, imm8
+ /// __m128i _mm_srli_epi32 (__m128i a, int immediate);
+ /// PSRLD xmm, imm8
/// </summary>
public static Vector128<uint> ShiftRightLogical(Vector128<uint> value, byte count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_srli_epi64 (__m128i a, int immediate); PSRLQ xmm, imm8
+ /// __m128i _mm_srli_epi64 (__m128i a, int immediate);
+ /// PSRLQ xmm, imm8
/// </summary>
public static Vector128<long> ShiftRightLogical(Vector128<long> value, byte count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_srli_epi64 (__m128i a, int immediate); PSRLQ xmm, imm8
+ /// __m128i _mm_srli_epi64 (__m128i a, int immediate);
+ /// PSRLQ xmm, imm8
/// </summary>
public static Vector128<ulong> ShiftRightLogical(Vector128<ulong> value, byte count) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_bsrli_si128 (__m128i a, int imm8); PSRLDQ xmm, imm8
+ /// __m128i _mm_bsrli_si128 (__m128i a, int imm8);
+ /// PSRLDQ xmm, imm8
/// </summary>
public static Vector128<sbyte> ShiftRightLogical128BitLane(Vector128<sbyte> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_bsrli_si128 (__m128i a, int imm8); PSRLDQ xmm, imm8
+ /// __m128i _mm_bsrli_si128 (__m128i a, int imm8);
+ /// PSRLDQ xmm, imm8
/// </summary>
public static Vector128<byte> ShiftRightLogical128BitLane(Vector128<byte> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_bsrli_si128 (__m128i a, int imm8); PSRLDQ xmm, imm8
+ /// __m128i _mm_bsrli_si128 (__m128i a, int imm8);
+ /// PSRLDQ xmm, imm8
/// </summary>
public static Vector128<short> ShiftRightLogical128BitLane(Vector128<short> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_bsrli_si128 (__m128i a, int imm8); PSRLDQ xmm, imm8
+ /// __m128i _mm_bsrli_si128 (__m128i a, int imm8);
+ /// PSRLDQ xmm, imm8
/// </summary>
public static Vector128<ushort> ShiftRightLogical128BitLane(Vector128<ushort> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_bsrli_si128 (__m128i a, int imm8); PSRLDQ xmm, imm8
+ /// __m128i _mm_bsrli_si128 (__m128i a, int imm8);
+ /// PSRLDQ xmm, imm8
/// </summary>
public static Vector128<int> ShiftRightLogical128BitLane(Vector128<int> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_bsrli_si128 (__m128i a, int imm8); PSRLDQ xmm, imm8
+ /// __m128i _mm_bsrli_si128 (__m128i a, int imm8);
+ /// PSRLDQ xmm, imm8
/// </summary>
public static Vector128<uint> ShiftRightLogical128BitLane(Vector128<uint> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_bsrli_si128 (__m128i a, int imm8); PSRLDQ xmm, imm8
+ /// __m128i _mm_bsrli_si128 (__m128i a, int imm8);
+ /// PSRLDQ xmm, imm8
/// </summary>
public static Vector128<long> ShiftRightLogical128BitLane(Vector128<long> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_bsrli_si128 (__m128i a, int imm8); PSRLDQ xmm, imm8
+ /// __m128i _mm_bsrli_si128 (__m128i a, int imm8);
+ /// PSRLDQ xmm, imm8
/// </summary>
public static Vector128<ulong> ShiftRightLogical128BitLane(Vector128<ulong> value, byte numBytes) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_sqrt_pd (__m128d a); SQRTPD xmm, xmm/m128
+ /// __m128d _mm_sqrt_pd (__m128d a);
+ /// SQRTPD xmm, xmm/m128
/// </summary>
public static Vector128<double> Sqrt(Vector128<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_sqrt_sd (__m128d a); SQRTSD xmm, xmm/64
+ /// __m128d _mm_sqrt_sd (__m128d a);
+ /// SQRTSD xmm, xmm/64
/// The above native signature does not exist. We provide this additional overload for the recommended use case of this intrinsic.
/// </summary>
public static Vector128<double> SqrtScalar(Vector128<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_sqrt_sd (__m128d a, __m128d b); SQRTSD xmm, xmm/64
+ /// __m128d _mm_sqrt_sd (__m128d a, __m128d b);
+ /// SQRTSD xmm, xmm/64
/// </summary>
public static Vector128<double> SqrtScalar(Vector128<double> upper, Vector128<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_store_sd (double* mem_addr, __m128d a); MOVSD m64, xmm
+ /// void _mm_store_sd (double* mem_addr, __m128d a);
+ /// MOVSD m64, xmm
/// </summary>
public static unsafe void StoreScalar(double* address, Vector128<double> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_store_si128 (__m128i* mem_addr, __m128i a); MOVDQA m128, xmm
+ /// void _mm_store_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQA m128, xmm
/// </summary>
public static unsafe void StoreAligned(sbyte* address, Vector128<sbyte> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_store_si128 (__m128i* mem_addr, __m128i a); MOVDQA m128, xmm
+ /// void _mm_store_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQA m128, xmm
/// </summary>
public static unsafe void StoreAligned(byte* address, Vector128<byte> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_store_si128 (__m128i* mem_addr, __m128i a); MOVDQA m128, xmm
+ /// void _mm_store_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQA m128, xmm
/// </summary>
public static unsafe void StoreAligned(short* address, Vector128<short> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_store_si128 (__m128i* mem_addr, __m128i a); MOVDQA m128, xmm
+ /// void _mm_store_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQA m128, xmm
/// </summary>
public static unsafe void StoreAligned(ushort* address, Vector128<ushort> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_store_si128 (__m128i* mem_addr, __m128i a); MOVDQA m128, xmm
+ /// void _mm_store_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQA m128, xmm
/// </summary>
public static unsafe void StoreAligned(int* address, Vector128<int> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_store_si128 (__m128i* mem_addr, __m128i a); MOVDQA m128, xmm
+ /// void _mm_store_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQA m128, xmm
/// </summary>
public static unsafe void StoreAligned(uint* address, Vector128<uint> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_store_si128 (__m128i* mem_addr, __m128i a); MOVDQA m128, xmm
+ /// void _mm_store_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQA m128, xmm
/// </summary>
public static unsafe void StoreAligned(long* address, Vector128<long> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_store_si128 (__m128i* mem_addr, __m128i a); MOVDQA m128, xmm
+ /// void _mm_store_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQA m128, xmm
/// </summary>
public static unsafe void StoreAligned(ulong* address, Vector128<ulong> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_store_pd (double* mem_addr, __m128d a); MOVAPD m128, xmm
+ /// void _mm_store_pd (double* mem_addr, __m128d a);
+ /// MOVAPD m128, xmm
/// </summary>
public static unsafe void StoreAligned(double* address, Vector128<double> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a); MOVNTDQ m128, xmm
+ /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVNTDQ m128, xmm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(sbyte* address, Vector128<sbyte> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a); MOVNTDQ m128, xmm
+ /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVNTDQ m128, xmm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(byte* address, Vector128<byte> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a); MOVNTDQ m128, xmm
+ /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVNTDQ m128, xmm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(short* address, Vector128<short> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a); MOVNTDQ m128, xmm
+ /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVNTDQ m128, xmm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(ushort* address, Vector128<ushort> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a); MOVNTDQ m128, xmm
+ /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVNTDQ m128, xmm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(int* address, Vector128<int> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a); MOVNTDQ m128, xmm
+ /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVNTDQ m128, xmm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(uint* address, Vector128<uint> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a); MOVNTDQ m128, xmm
+ /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVNTDQ m128, xmm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(long* address, Vector128<long> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a); MOVNTDQ m128, xmm
+ /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVNTDQ m128, xmm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(ulong* address, Vector128<ulong> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_stream_pd (double* mem_addr, __m128d a); MOVNTPD m128, xmm
+ /// void _mm_stream_pd (double* mem_addr, __m128d a);
+ /// MOVNTPD m128, xmm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(double* address, Vector128<double> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a); MOVDQU m128, xmm
+ /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQU m128, xmm
/// </summary>
public static unsafe void Store(sbyte* address, Vector128<sbyte> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a); MOVDQU m128, xmm
+ /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQU m128, xmm
/// </summary>
public static unsafe void Store(byte* address, Vector128<byte> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a); MOVDQU m128, xmm
+ /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQU m128, xmm
/// </summary>
public static unsafe void Store(short* address, Vector128<short> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a); MOVDQU m128, xmm
+ /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQU m128, xmm
/// </summary>
public static unsafe void Store(ushort* address, Vector128<ushort> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a); MOVDQU m128, xmm
+ /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQU m128, xmm
/// </summary>
public static unsafe void Store(int* address, Vector128<int> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a); MOVDQU m128, xmm
+ /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQU m128, xmm
/// </summary>
public static unsafe void Store(uint* address, Vector128<uint> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a); MOVDQU m128, xmm
+ /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQU m128, xmm
/// </summary>
public static unsafe void Store(long* address, Vector128<long> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a); MOVDQU m128, xmm
+ /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQU m128, xmm
/// </summary>
public static unsafe void Store(ulong* address, Vector128<ulong> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_storeu_pd (double* mem_addr, __m128d a); MOVUPD m128, xmm
+ /// void _mm_storeu_pd (double* mem_addr, __m128d a);
+ /// MOVUPD m128, xmm
/// </summary>
public static unsafe void Store(double* address, Vector128<double> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_storeh_pd (double* mem_addr, __m128d a); MOVHPD m64, xmm
+ /// void _mm_storeh_pd (double* mem_addr, __m128d a);
+ /// MOVHPD m64, xmm
/// </summary>
public static unsafe void StoreHigh(double* address, Vector128<double> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_storel_epi64 (__m128i* mem_addr, __m128i a); MOVQ m64, xmm
+ /// void _mm_storel_epi64 (__m128i* mem_addr, __m128i a);
+ /// MOVQ m64, xmm
/// </summary>
public static unsafe void StoreLow(long* address, Vector128<long> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_storel_epi64 (__m128i* mem_addr, __m128i a); MOVQ m64, xmm
+ /// void _mm_storel_epi64 (__m128i* mem_addr, __m128i a);
+ /// MOVQ m64, xmm
/// </summary>
public static unsafe void StoreLow(ulong* address, Vector128<ulong> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// void _mm_storel_pd (double* mem_addr, __m128d a); MOVLPD m64, xmm
+ /// void _mm_storel_pd (double* mem_addr, __m128d a);
+ /// MOVLPD m64, xmm
/// </summary>
public static unsafe void StoreLow(double* address, Vector128<double> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_sub_epi8 (__m128i a, __m128i b); PSUBB xmm, xmm/m128
+ /// __m128i _mm_sub_epi8 (__m128i a, __m128i b);
+ /// PSUBB xmm, xmm/m128
/// </summary>
public static Vector128<byte> Subtract(Vector128<byte> left, Vector128<byte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_sub_epi8 (__m128i a, __m128i b); PSUBB xmm, xmm/m128
+ /// __m128i _mm_sub_epi8 (__m128i a, __m128i b);
+ /// PSUBB xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> Subtract(Vector128<sbyte> left, Vector128<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_sub_epi16 (__m128i a, __m128i b); PSUBW xmm, xmm/m128
+ /// __m128i _mm_sub_epi16 (__m128i a, __m128i b);
+ /// PSUBW xmm, xmm/m128
/// </summary>
public static Vector128<short> Subtract(Vector128<short> left, Vector128<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_sub_epi16 (__m128i a, __m128i b); PSUBW xmm, xmm/m128
+ /// __m128i _mm_sub_epi16 (__m128i a, __m128i b);
+ /// PSUBW xmm, xmm/m128
/// </summary>
public static Vector128<ushort> Subtract(Vector128<ushort> left, Vector128<ushort> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_sub_epi32 (__m128i a, __m128i b); PSUBD xmm, xmm/m128
+ /// __m128i _mm_sub_epi32 (__m128i a, __m128i b);
+ /// PSUBD xmm, xmm/m128
/// </summary>
public static Vector128<int> Subtract(Vector128<int> left, Vector128<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_sub_epi32 (__m128i a, __m128i b); PSUBD xmm, xmm/m128
+ /// __m128i _mm_sub_epi32 (__m128i a, __m128i b);
+ /// PSUBD xmm, xmm/m128
/// </summary>
public static Vector128<uint> Subtract(Vector128<uint> left, Vector128<uint> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_sub_epi64 (__m128i a, __m128i b); PSUBQ xmm, xmm/m128
+ /// __m128i _mm_sub_epi64 (__m128i a, __m128i b);
+ /// PSUBQ xmm, xmm/m128
/// </summary>
public static Vector128<long> Subtract(Vector128<long> left, Vector128<long> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_sub_epi64 (__m128i a, __m128i b); PSUBQ xmm, xmm/m128
+ /// __m128i _mm_sub_epi64 (__m128i a, __m128i b);
+ /// PSUBQ xmm, xmm/m128
/// </summary>
public static Vector128<ulong> Subtract(Vector128<ulong> left, Vector128<ulong> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_sub_pd (__m128d a, __m128d b); SUBPD xmm, xmm/m128
+ /// __m128d _mm_sub_pd (__m128d a, __m128d b);
+ /// SUBPD xmm, xmm/m128
/// </summary>
public static Vector128<double> Subtract(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_sub_sd (__m128d a, __m128d b); SUBSD xmm, xmm/m64
+ /// __m128d _mm_sub_sd (__m128d a, __m128d b);
+ /// SUBSD xmm, xmm/m64
/// </summary>
public static Vector128<double> SubtractScalar(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_subs_epi8 (__m128i a, __m128i b); PSUBSB xmm, xmm/m128
+ /// __m128i _mm_subs_epi8 (__m128i a, __m128i b);
+ /// PSUBSB xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> SubtractSaturate(Vector128<sbyte> left, Vector128<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_subs_epi16 (__m128i a, __m128i b); PSUBSW xmm, xmm/m128
+ /// __m128i _mm_subs_epi16 (__m128i a, __m128i b);
+ /// PSUBSW xmm, xmm/m128
/// </summary>
public static Vector128<short> SubtractSaturate(Vector128<short> left, Vector128<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_subs_epu8 (__m128i a, __m128i b); PSUBUSB xmm, xmm/m128
+ /// __m128i _mm_subs_epu8 (__m128i a, __m128i b);
+ /// PSUBUSB xmm, xmm/m128
/// </summary>
public static Vector128<byte> SubtractSaturate(Vector128<byte> left, Vector128<byte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_subs_epu16 (__m128i a, __m128i b); PSUBUSW xmm, xmm/m128
+ /// __m128i _mm_subs_epu16 (__m128i a, __m128i b);
+ /// PSUBUSW xmm, xmm/m128
/// </summary>
public static Vector128<ushort> SubtractSaturate(Vector128<ushort> left, Vector128<ushort> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_unpackhi_epi8 (__m128i a, __m128i b); PUNPCKHBW xmm, xmm/m128
+ /// __m128i _mm_unpackhi_epi8 (__m128i a, __m128i b);
+ /// PUNPCKHBW xmm, xmm/m128
/// </summary>
public static Vector128<byte> UnpackHigh(Vector128<byte> left, Vector128<byte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_unpackhi_epi8 (__m128i a, __m128i b); PUNPCKHBW xmm, xmm/m128
+ /// __m128i _mm_unpackhi_epi8 (__m128i a, __m128i b);
+ /// PUNPCKHBW xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> UnpackHigh(Vector128<sbyte> left, Vector128<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_unpackhi_epi16 (__m128i a, __m128i b); PUNPCKHWD xmm, xmm/m128
+ /// __m128i _mm_unpackhi_epi16 (__m128i a, __m128i b);
+ /// PUNPCKHWD xmm, xmm/m128
/// </summary>
public static Vector128<short> UnpackHigh(Vector128<short> left, Vector128<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_unpackhi_epi16 (__m128i a, __m128i b); PUNPCKHWD xmm, xmm/m128
+ /// __m128i _mm_unpackhi_epi16 (__m128i a, __m128i b);
+ /// PUNPCKHWD xmm, xmm/m128
/// </summary
public static Vector128<ushort> UnpackHigh(Vector128<ushort> left, Vector128<ushort> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_unpackhi_epi32 (__m128i a, __m128i b); PUNPCKHDQ xmm, xmm/m128
+ /// __m128i _mm_unpackhi_epi32 (__m128i a, __m128i b);
+ /// PUNPCKHDQ xmm, xmm/m128
/// </summary>
public static Vector128<int> UnpackHigh(Vector128<int> left, Vector128<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_unpackhi_epi32 (__m128i a, __m128i b); PUNPCKHDQ xmm, xmm/m128
+ /// __m128i _mm_unpackhi_epi32 (__m128i a, __m128i b);
+ /// PUNPCKHDQ xmm, xmm/m128
/// </summary>
public static Vector128<uint> UnpackHigh(Vector128<uint> left, Vector128<uint> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_unpackhi_epi64 (__m128i a, __m128i b); PUNPCKHQDQ xmm, xmm/m128
+ /// __m128i _mm_unpackhi_epi64 (__m128i a, __m128i b);
+ /// PUNPCKHQDQ xmm, xmm/m128
/// </summary>
public static Vector128<long> UnpackHigh(Vector128<long> left, Vector128<long> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_unpackhi_epi64 (__m128i a, __m128i b); PUNPCKHQDQ xmm, xmm/m128
+ /// __m128i _mm_unpackhi_epi64 (__m128i a, __m128i b);
+ /// PUNPCKHQDQ xmm, xmm/m128
/// </summary>
public static Vector128<ulong> UnpackHigh(Vector128<ulong> left, Vector128<ulong> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_unpackhi_pd (__m128d a, __m128d b); UNPCKHPD xmm, xmm/m128
+ /// __m128d _mm_unpackhi_pd (__m128d a, __m128d b);
+ /// UNPCKHPD xmm, xmm/m128
/// </summary>
public static Vector128<double> UnpackHigh(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_unpacklo_epi8 (__m128i a, __m128i b); PUNPCKLBW xmm, xmm/m128
+ /// __m128i _mm_unpacklo_epi8 (__m128i a, __m128i b);
+ /// PUNPCKLBW xmm, xmm/m128
/// </summary>
public static Vector128<byte> UnpackLow(Vector128<byte> left, Vector128<byte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_unpacklo_epi8 (__m128i a, __m128i b); PUNPCKLBW xmm, xmm/m128
+ /// __m128i _mm_unpacklo_epi8 (__m128i a, __m128i b);
+ /// PUNPCKLBW xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> UnpackLow(Vector128<sbyte> left, Vector128<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_unpacklo_epi16 (__m128i a, __m128i b); PUNPCKLWD xmm, xmm/m128
+ /// __m128i _mm_unpacklo_epi16 (__m128i a, __m128i b);
+ /// PUNPCKLWD xmm, xmm/m128
/// </summary>
public static Vector128<short> UnpackLow(Vector128<short> left, Vector128<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_unpacklo_epi16 (__m128i a, __m128i b); PUNPCKLWD xmm, xmm/m128
+ /// __m128i _mm_unpacklo_epi16 (__m128i a, __m128i b);
+ /// PUNPCKLWD xmm, xmm/m128
/// </summary>
public static Vector128<ushort> UnpackLow(Vector128<ushort> left, Vector128<ushort> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_unpacklo_epi32 (__m128i a, __m128i b); PUNPCKLDQ xmm, xmm/m128
+ /// __m128i _mm_unpacklo_epi32 (__m128i a, __m128i b);
+ /// PUNPCKLDQ xmm, xmm/m128
/// </summary>
public static Vector128<int> UnpackLow(Vector128<int> left, Vector128<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_unpacklo_epi32 (__m128i a, __m128i b); PUNPCKLDQ xmm, xmm/m128
+ /// __m128i _mm_unpacklo_epi32 (__m128i a, __m128i b);
+ /// PUNPCKLDQ xmm, xmm/m128
/// </summary>
public static Vector128<uint> UnpackLow(Vector128<uint> left, Vector128<uint> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_unpacklo_epi64 (__m128i a, __m128i b); PUNPCKLQDQ xmm, xmm/m128
+ /// __m128i _mm_unpacklo_epi64 (__m128i a, __m128i b);
+ /// PUNPCKLQDQ xmm, xmm/m128
/// </summary>
public static Vector128<long> UnpackLow(Vector128<long> left, Vector128<long> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_unpacklo_epi64 (__m128i a, __m128i b); PUNPCKLQDQ xmm, xmm/m128
+ /// __m128i _mm_unpacklo_epi64 (__m128i a, __m128i b);
+ /// PUNPCKLQDQ xmm, xmm/m128
/// </summary>
public static Vector128<ulong> UnpackLow(Vector128<ulong> left, Vector128<ulong> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_unpacklo_pd (__m128d a, __m128d b); UNPCKLPD xmm, xmm/m128
+ /// __m128d _mm_unpacklo_pd (__m128d a, __m128d b);
+ /// UNPCKLPD xmm, xmm/m128
/// </summary>
public static Vector128<double> UnpackLow(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_xor_si128 (__m128i a, __m128i b); PXOR xmm, xmm/m128
+ /// __m128i _mm_xor_si128 (__m128i a, __m128i b);
+ /// PXOR xmm, xmm/m128
/// </summary>
public static Vector128<byte> Xor(Vector128<byte> left, Vector128<byte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_xor_si128 (__m128i a, __m128i b); PXOR xmm, xmm/m128
+ /// __m128i _mm_xor_si128 (__m128i a, __m128i b);
+ /// PXOR xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> Xor(Vector128<sbyte> left, Vector128<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_xor_si128 (__m128i a, __m128i b); PXOR xmm, xmm/m128
+ /// __m128i _mm_xor_si128 (__m128i a, __m128i b);
+ /// PXOR xmm, xmm/m128
/// </summary>
public static Vector128<short> Xor(Vector128<short> left, Vector128<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_xor_si128 (__m128i a, __m128i b); PXOR xmm, xmm/m128
+ /// __m128i _mm_xor_si128 (__m128i a, __m128i b);
+ /// PXOR xmm, xmm/m128
/// </summary>
public static Vector128<ushort> Xor(Vector128<ushort> left, Vector128<ushort> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_xor_si128 (__m128i a, __m128i b); PXOR xmm, xmm/m128
+ /// __m128i _mm_xor_si128 (__m128i a, __m128i b);
+ /// PXOR xmm, xmm/m128
/// </summary>
public static Vector128<int> Xor(Vector128<int> left, Vector128<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_xor_si128 (__m128i a, __m128i b); PXOR xmm, xmm/m128
+ /// __m128i _mm_xor_si128 (__m128i a, __m128i b);
+ /// PXOR xmm, xmm/m128
/// </summary>
public static Vector128<uint> Xor(Vector128<uint> left, Vector128<uint> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_xor_si128 (__m128i a, __m128i b); PXOR xmm, xmm/m128
+ /// __m128i _mm_xor_si128 (__m128i a, __m128i b);
+ /// PXOR xmm, xmm/m128
/// </summary>
public static Vector128<long> Xor(Vector128<long> left, Vector128<long> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_xor_si128 (__m128i a, __m128i b); PXOR xmm, xmm/m128
+ /// __m128i _mm_xor_si128 (__m128i a, __m128i b);
+ /// PXOR xmm, xmm/m128
/// </summary>
public static Vector128<ulong> Xor(Vector128<ulong> left, Vector128<ulong> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_xor_pd (__m128d a, __m128d b); XORPD xmm, xmm/m128
+ /// __m128d _mm_xor_pd (__m128d a, __m128d b);
+ /// XORPD xmm, xmm/m128
/// </summary>
public static Vector128<double> Xor(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
}
diff --git a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse2.cs b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse2.cs
index f960ea7d64..cfc1bb6b13 100644
--- a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse2.cs
+++ b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse2.cs
@@ -16,868 +16,1066 @@ namespace System.Runtime.Intrinsics.X86
public static bool IsSupported { get => IsSupported; }
/// <summary>
- /// __m128i _mm_add_epi8 (__m128i a, __m128i b); PADDB xmm, xmm/m128
+ /// __m128i _mm_add_epi8 (__m128i a, __m128i b);
+ /// PADDB xmm, xmm/m128
/// </summary>
public static Vector128<byte> Add(Vector128<byte> left, Vector128<byte> right) => Add(left, right);
/// <summary>
- /// __m128i _mm_add_epi8 (__m128i a, __m128i b); PADDB xmm, xmm/m128
+ /// __m128i _mm_add_epi8 (__m128i a, __m128i b);
+ /// PADDB xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> Add(Vector128<sbyte> left, Vector128<sbyte> right) => Add(left, right);
/// <summary>
- /// __m128i _mm_add_epi16 (__m128i a, __m128i b); PADDW xmm, xmm/m128
+ /// __m128i _mm_add_epi16 (__m128i a, __m128i b);
+ /// PADDW xmm, xmm/m128
/// </summary>
public static Vector128<short> Add(Vector128<short> left, Vector128<short> right) => Add(left, right);
/// <summary>
- /// __m128i _mm_add_epi16 (__m128i a, __m128i b); PADDW xmm, xmm/m128
+ /// __m128i _mm_add_epi16 (__m128i a, __m128i b);
+ /// PADDW xmm, xmm/m128
/// </summary>
public static Vector128<ushort> Add(Vector128<ushort> left, Vector128<ushort> right) => Add(left, right);
/// <summary>
- /// __m128i _mm_add_epi32 (__m128i a, __m128i b); PADDD xmm, xmm/m128
+ /// __m128i _mm_add_epi32 (__m128i a, __m128i b);
+ /// PADDD xmm, xmm/m128
/// </summary>
public static Vector128<int> Add(Vector128<int> left, Vector128<int> right) => Add(left, right);
/// <summary>
- /// __m128i _mm_add_epi32 (__m128i a, __m128i b); PADDD xmm, xmm/m128
+ /// __m128i _mm_add_epi32 (__m128i a, __m128i b);
+ /// PADDD xmm, xmm/m128
/// </summary>
public static Vector128<uint> Add(Vector128<uint> left, Vector128<uint> right) => Add(left, right);
/// <summary>
- /// __m128i _mm_add_epi64 (__m128i a, __m128i b); PADDQ xmm, xmm/m128
+ /// __m128i _mm_add_epi64 (__m128i a, __m128i b);
+ /// PADDQ xmm, xmm/m128
/// </summary>
public static Vector128<long> Add(Vector128<long> left, Vector128<long> right) => Add(left, right);
/// <summary>
- /// __m128i _mm_add_epi64 (__m128i a, __m128i b); PADDQ xmm, xmm/m128
+ /// __m128i _mm_add_epi64 (__m128i a, __m128i b);
+ /// PADDQ xmm, xmm/m128
/// </summary>
public static Vector128<ulong> Add(Vector128<ulong> left, Vector128<ulong> right) => Add(left, right);
/// <summary>
- /// __m128d _mm_add_pd (__m128d a, __m128d b); ADDPD xmm, xmm/m128
+ /// __m128d _mm_add_pd (__m128d a, __m128d b);
+ /// ADDPD xmm, xmm/m128
/// </summary>
public static Vector128<double> Add(Vector128<double> left, Vector128<double> right) => Add(left, right);
/// <summary>
- /// __m128d _mm_add_sd (__m128d a, __m128d b); ADDSD xmm, xmm/m64
+ /// __m128d _mm_add_sd (__m128d a, __m128d b);
+ /// ADDSD xmm, xmm/m64
/// </summary>
public static Vector128<double> AddScalar(Vector128<double> left, Vector128<double> right) => AddScalar(left, right);
/// <summary>
- /// __m128i _mm_adds_epi8 (__m128i a, __m128i b); PADDSB xmm, xmm/m128
+ /// __m128i _mm_adds_epi8 (__m128i a, __m128i b);
+ /// PADDSB xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> AddSaturate(Vector128<sbyte> left, Vector128<sbyte> right) => AddSaturate(left, right);
/// <summary>
- /// __m128i _mm_adds_epu8 (__m128i a, __m128i b); PADDUSB xmm, xmm/m128
+ /// __m128i _mm_adds_epu8 (__m128i a, __m128i b);
+ /// PADDUSB xmm, xmm/m128
/// </summary>
public static Vector128<byte> AddSaturate(Vector128<byte> left, Vector128<byte> right) => AddSaturate(left, right);
/// <summary>
- /// __m128i _mm_adds_epi16 (__m128i a, __m128i b); PADDSW xmm, xmm/m128
+ /// __m128i _mm_adds_epi16 (__m128i a, __m128i b);
+ /// PADDSW xmm, xmm/m128
/// </summary>
public static Vector128<short> AddSaturate(Vector128<short> left, Vector128<short> right) => AddSaturate(left, right);
/// <summary>
- /// __m128i _mm_adds_epu16 (__m128i a, __m128i b); PADDUSW xmm, xmm/m128
+ /// __m128i _mm_adds_epu16 (__m128i a, __m128i b);
+ /// PADDUSW xmm, xmm/m128
/// </summary>
public static Vector128<ushort> AddSaturate(Vector128<ushort> left, Vector128<ushort> right) => AddSaturate(left, right);
/// <summary>
- /// __m128i _mm_and_si128 (__m128i a, __m128i b); PAND xmm, xmm/m128
+ /// __m128i _mm_and_si128 (__m128i a, __m128i b);
+ /// PAND xmm, xmm/m128
/// </summary>
public static Vector128<byte> And(Vector128<byte> left, Vector128<byte> right) => And(left, right);
/// <summary>
- /// __m128i _mm_and_si128 (__m128i a, __m128i b); PAND xmm, xmm/m128
+ /// __m128i _mm_and_si128 (__m128i a, __m128i b);
+ /// PAND xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> And(Vector128<sbyte> left, Vector128<sbyte> right) => And(left, right);
/// <summary>
- /// __m128i _mm_and_si128 (__m128i a, __m128i b); PAND xmm, xmm/m128
+ /// __m128i _mm_and_si128 (__m128i a, __m128i b);
+ /// PAND xmm, xmm/m128
/// </summary>
public static Vector128<short> And(Vector128<short> left, Vector128<short> right) => And(left, right);
/// <summary>
- /// __m128i _mm_and_si128 (__m128i a, __m128i b); PAND xmm, xmm/m128
+ /// __m128i _mm_and_si128 (__m128i a, __m128i b);
+ /// PAND xmm, xmm/m128
/// </summary>
public static Vector128<ushort> And(Vector128<ushort> left, Vector128<ushort> right) => And(left, right);
/// <summary>
- /// __m128i _mm_and_si128 (__m128i a, __m128i b); PAND xmm, xmm/m128
+ /// __m128i _mm_and_si128 (__m128i a, __m128i b);
+ /// PAND xmm, xmm/m128
/// </summary>
public static Vector128<int> And(Vector128<int> left, Vector128<int> right) => And(left, right);
/// <summary>
- /// __m128i _mm_and_si128 (__m128i a, __m128i b); PAND xmm, xmm/m128
+ /// __m128i _mm_and_si128 (__m128i a, __m128i b);
+ /// PAND xmm, xmm/m128
/// </summary>
public static Vector128<uint> And(Vector128<uint> left, Vector128<uint> right) => And(left, right);
/// <summary>
- /// __m128i _mm_and_si128 (__m128i a, __m128i b); PAND xmm, xmm/m128
+ /// __m128i _mm_and_si128 (__m128i a, __m128i b);
+ /// PAND xmm, xmm/m128
/// </summary>
public static Vector128<long> And(Vector128<long> left, Vector128<long> right) => And(left, right);
/// <summary>
- /// __m128i _mm_and_si128 (__m128i a, __m128i b); PAND xmm, xmm/m128
+ /// __m128i _mm_and_si128 (__m128i a, __m128i b);
+ /// PAND xmm, xmm/m128
/// </summary>
public static Vector128<ulong> And(Vector128<ulong> left, Vector128<ulong> right) => And(left, right);
/// <summary>
- /// __m128d _mm_and_pd (__m128d a, __m128d b); ANDPD xmm, xmm/m128
+ /// __m128d _mm_and_pd (__m128d a, __m128d b);
+ /// ANDPD xmm, xmm/m128
/// </summary>
public static Vector128<double> And(Vector128<double> left, Vector128<double> right) => And(left, right);
/// <summary>
- /// __m128i _mm_andnot_si128 (__m128i a, __m128i b); PANDN xmm, xmm/m128
+ /// __m128i _mm_andnot_si128 (__m128i a, __m128i b);
+ /// PANDN xmm, xmm/m128
/// </summary>
public static Vector128<byte> AndNot(Vector128<byte> left, Vector128<byte> right) => AndNot(left, right);
/// <summary>
- /// __m128i _mm_andnot_si128 (__m128i a, __m128i b); PANDN xmm, xmm/m128
+ /// __m128i _mm_andnot_si128 (__m128i a, __m128i b);
+ /// PANDN xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> AndNot(Vector128<sbyte> left, Vector128<sbyte> right) => AndNot(left, right);
/// <summary>
- /// __m128i _mm_andnot_si128 (__m128i a, __m128i b); PANDN xmm, xmm/m128
+ /// __m128i _mm_andnot_si128 (__m128i a, __m128i b);
+ /// PANDN xmm, xmm/m128
/// </summary>
public static Vector128<short> AndNot(Vector128<short> left, Vector128<short> right) => AndNot(left, right);
/// <summary>
- /// __m128i _mm_andnot_si128 (__m128i a, __m128i b); PANDN xmm, xmm/m128
+ /// __m128i _mm_andnot_si128 (__m128i a, __m128i b);
+ /// PANDN xmm, xmm/m128
/// </summary>
public static Vector128<ushort> AndNot(Vector128<ushort> left, Vector128<ushort> right) => AndNot(left, right);
/// <summary>
- /// __m128i _mm_andnot_si128 (__m128i a, __m128i b); PANDN xmm, xmm/m128
+ /// __m128i _mm_andnot_si128 (__m128i a, __m128i b);
+ /// PANDN xmm, xmm/m128
/// </summary>
public static Vector128<int> AndNot(Vector128<int> left, Vector128<int> right) => AndNot(left, right);
/// <summary>
- /// __m128i _mm_andnot_si128 (__m128i a, __m128i b); PANDN xmm, xmm/m128
+ /// __m128i _mm_andnot_si128 (__m128i a, __m128i b);
+ /// PANDN xmm, xmm/m128
/// </summary>
public static Vector128<uint> AndNot(Vector128<uint> left, Vector128<uint> right) => AndNot(left, right);
/// <summary>
- /// __m128i _mm_andnot_si128 (__m128i a, __m128i b); PANDN xmm, xmm/m128
+ /// __m128i _mm_andnot_si128 (__m128i a, __m128i b);
+ /// PANDN xmm, xmm/m128
/// </summary>
public static Vector128<long> AndNot(Vector128<long> left, Vector128<long> right) => AndNot(left, right);
/// <summary>
- /// __m128i _mm_andnot_si128 (__m128i a, __m128i b); PANDN xmm, xmm/m128
+ /// __m128i _mm_andnot_si128 (__m128i a, __m128i b);
+ /// PANDN xmm, xmm/m128
/// </summary>
public static Vector128<ulong> AndNot(Vector128<ulong> left, Vector128<ulong> right) => AndNot(left, right);
/// <summary>
- /// __m128d _mm_andnot_pd (__m128d a, __m128d b); ADDNPD xmm, xmm/m128
+ /// __m128d _mm_andnot_pd (__m128d a, __m128d b);
+ /// ADDNPD xmm, xmm/m128
/// </summary>
public static Vector128<double> AndNot(Vector128<double> left, Vector128<double> right) => AndNot(left, right);
/// <summary>
- /// __m128i _mm_avg_epu8 (__m128i a, __m128i b); PAVGB xmm, xmm/m128
+ /// __m128i _mm_avg_epu8 (__m128i a, __m128i b);
+ /// PAVGB xmm, xmm/m128
/// </summary>
public static Vector128<byte> Average(Vector128<byte> left, Vector128<byte> right) => Average(left, right);
/// <summary>
- /// __m128i _mm_avg_epu16 (__m128i a, __m128i b); PAVGW xmm, xmm/m128
+ /// __m128i _mm_avg_epu16 (__m128i a, __m128i b);
+ /// PAVGW xmm, xmm/m128
/// </summary>
public static Vector128<ushort> Average(Vector128<ushort> left, Vector128<ushort> right) => Average(left, right);
/// <summary>
- /// __m128i _mm_cmpeq_epi8 (__m128i a, __m128i b); PCMPEQB xmm, xmm/m128
+ /// __m128i _mm_cmpeq_epi8 (__m128i a, __m128i b);
+ /// PCMPEQB xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> CompareEqual(Vector128<sbyte> left, Vector128<sbyte> right) => CompareEqual(left, right);
/// <summary>
- /// __m128i _mm_cmpeq_epi8 (__m128i a, __m128i b); PCMPEQB xmm, xmm/m128
+ /// __m128i _mm_cmpeq_epi8 (__m128i a, __m128i b);
+ /// PCMPEQB xmm, xmm/m128
/// </summary>
public static Vector128<byte> CompareEqual(Vector128<byte> left, Vector128<byte> right) => CompareEqual(left, right);
/// <summary>
- /// __m128i _mm_cmpeq_epi16 (__m128i a, __m128i b); PCMPEQW xmm, xmm/m128
+ /// __m128i _mm_cmpeq_epi16 (__m128i a, __m128i b);
+ /// PCMPEQW xmm, xmm/m128
/// </summary>
public static Vector128<short> CompareEqual(Vector128<short> left, Vector128<short> right) => CompareEqual(left, right);
/// <summary>
- /// __m128i _mm_cmpeq_epi16 (__m128i a, __m128i b); PCMPEQW xmm, xmm/m128
+ /// __m128i _mm_cmpeq_epi16 (__m128i a, __m128i b);
+ /// PCMPEQW xmm, xmm/m128
/// </summary>
public static Vector128<ushort> CompareEqual(Vector128<ushort> left, Vector128<ushort> right) => CompareEqual(left, right);
/// <summary>
- /// __m128i _mm_cmpeq_epi32 (__m128i a, __m128i b); PCMPEQD xmm, xmm/m128
+ /// __m128i _mm_cmpeq_epi32 (__m128i a, __m128i b);
+ /// PCMPEQD xmm, xmm/m128
/// </summary>
public static Vector128<int> CompareEqual(Vector128<int> left, Vector128<int> right) => CompareEqual(left, right);
/// <summary>
- /// __m128i _mm_cmpeq_epi32 (__m128i a, __m128i b); PCMPEQD xmm, xmm/m128
+ /// __m128i _mm_cmpeq_epi32 (__m128i a, __m128i b);
+ /// PCMPEQD xmm, xmm/m128
/// </summary>
public static Vector128<uint> CompareEqual(Vector128<uint> left, Vector128<uint> right) => CompareEqual(left, right);
/// <summary>
- /// __m128d _mm_cmpeq_pd (__m128d a, __m128d b); CMPPD xmm, xmm/m128, imm8(0)
+ /// __m128d _mm_cmpeq_pd (__m128d a, __m128d b);
+ /// CMPPD xmm, xmm/m128, imm8(0)
/// </summary>
public static Vector128<double> CompareEqual(Vector128<double> left, Vector128<double> right) => CompareEqual(left, right);
/// <summary>
- /// int _mm_comieq_sd (__m128d a, __m128d b); COMISS xmm, xmm/m64
+ /// int _mm_comieq_sd (__m128d a, __m128d b);
+ /// COMISS xmm, xmm/m64
/// </summary>
public static bool CompareEqualOrderedScalar(Vector128<double> left, Vector128<double> right) => CompareEqualOrderedScalar(left, right);
/// <summary>
- /// int _mm_ucomieq_sd (__m128d a, __m128d b); UCOMISS xmm, xmm/m64
+ /// int _mm_ucomieq_sd (__m128d a, __m128d b);
+ /// UCOMISS xmm, xmm/m64
/// </summary>
public static bool CompareEqualUnorderedScalar(Vector128<double> left, Vector128<double> right) => CompareEqualUnorderedScalar(left, right);
/// <summary>
- /// __m128d _mm_cmpeq_sd (__m128d a, __m128d b); CMPSD xmm, xmm/m64, imm8(0)
+ /// __m128d _mm_cmpeq_sd (__m128d a, __m128d b);
+ /// CMPSD xmm, xmm/m64, imm8(0)
/// </summary>
public static Vector128<double> CompareEqualScalar(Vector128<double> left, Vector128<double> right) => CompareEqualScalar(left, right);
/// <summary>
- /// __m128i _mm_cmpgt_epi8 (__m128i a, __m128i b); PCMPGTB xmm, xmm/m128
+ /// __m128i _mm_cmpgt_epi8 (__m128i a, __m128i b);
+ /// PCMPGTB xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> CompareGreaterThan(Vector128<sbyte> left, Vector128<sbyte> right) => CompareGreaterThan(left, right);
/// <summary>
- /// __m128i _mm_cmpgt_epi16 (__m128i a, __m128i b); PCMPGTW xmm, xmm/m128
+ /// __m128i _mm_cmpgt_epi16 (__m128i a, __m128i b);
+ /// PCMPGTW xmm, xmm/m128
/// </summary>
public static Vector128<short> CompareGreaterThan(Vector128<short> left, Vector128<short> right) => CompareGreaterThan(left, right);
/// <summary>
- /// __m128i _mm_cmpgt_epi32 (__m128i a, __m128i b); PCMPGTD xmm, xmm/m128
+ /// __m128i _mm_cmpgt_epi32 (__m128i a, __m128i b);
+ /// PCMPGTD xmm, xmm/m128
/// </summary>
public static Vector128<int> CompareGreaterThan(Vector128<int> left, Vector128<int> right) => CompareGreaterThan(left, right);
/// <summary>
- /// __m128d _mm_cmpgt_pd (__m128d a, __m128d b); CMPPD xmm, xmm/m128, imm8(6)
+ /// __m128d _mm_cmpgt_pd (__m128d a, __m128d b);
+ /// CMPPD xmm, xmm/m128, imm8(6)
/// </summary>
public static Vector128<double> CompareGreaterThan(Vector128<double> left, Vector128<double> right) => CompareGreaterThan(left, right);
/// <summary>
- /// int _mm_comigt_sd (__m128d a, __m128d b); COMISS xmm, xmm/m64
+ /// int _mm_comigt_sd (__m128d a, __m128d b);
+ /// COMISS xmm, xmm/m64
/// </summary>
public static bool CompareGreaterThanOrderedScalar(Vector128<double> left, Vector128<double> right) => CompareGreaterThanOrderedScalar(left, right);
/// <summary>
- /// int _mm_ucomigt_sd (__m128d a, __m128d b); UCOMISS xmm, xmm/m64
+ /// int _mm_ucomigt_sd (__m128d a, __m128d b);
+ /// UCOMISS xmm, xmm/m64
/// </summary>
public static bool CompareGreaterThanUnorderedScalar(Vector128<double> left, Vector128<double> right) => CompareGreaterThanUnorderedScalar(left, right);
/// <summary>
- /// __m128d _mm_cmpgt_sd (__m128d a, __m128d b); CMPSD xmm, xmm/m64, imm8(6)
+ /// __m128d _mm_cmpgt_sd (__m128d a, __m128d b);
+ /// CMPSD xmm, xmm/m64, imm8(6)
/// </summary>
public static Vector128<double> CompareGreaterThanScalar(Vector128<double> left, Vector128<double> right) => CompareGreaterThanScalar(left, right);
/// <summary>
- /// __m128d _mm_cmpge_pd (__m128d a, __m128d b); CMPPD xmm, xmm/m128, imm8(5)
+ /// __m128d _mm_cmpge_pd (__m128d a, __m128d b);
+ /// CMPPD xmm, xmm/m128, imm8(5)
/// </summary>
public static Vector128<double> CompareGreaterThanOrEqual(Vector128<double> left, Vector128<double> right) => CompareGreaterThanOrEqual(left, right);
/// <summary>
- /// int _mm_comige_sd (__m128d a, __m128d b); COMISS xmm, xmm/m64
+ /// int _mm_comige_sd (__m128d a, __m128d b);
+ /// COMISS xmm, xmm/m64
/// </summary>
public static bool CompareGreaterThanOrEqualOrderedScalar(Vector128<double> left, Vector128<double> right) => CompareGreaterThanOrEqualOrderedScalar(left, right);
/// <summary>
- /// int _mm_ucomige_sd (__m128d a, __m128d b); UCOMISS xmm, xmm/m64
+ /// int _mm_ucomige_sd (__m128d a, __m128d b);
+ /// UCOMISS xmm, xmm/m64
/// </summary>
public static bool CompareGreaterThanOrEqualUnorderedScalar(Vector128<double> left, Vector128<double> right) => CompareGreaterThanOrEqualUnorderedScalar(left, right);
/// <summary>
- /// __m128d _mm_cmpge_sd (__m128d a, __m128d b); CMPSD xmm, xmm/m64, imm8(5)
+ /// __m128d _mm_cmpge_sd (__m128d a, __m128d b);
+ /// CMPSD xmm, xmm/m64, imm8(5)
/// </summary>
public static Vector128<double> CompareGreaterThanOrEqualScalar(Vector128<double> left, Vector128<double> right) => CompareGreaterThanOrEqualScalar(left, right);
/// <summary>
- /// __m128i _mm_cmplt_epi8 (__m128i a, __m128i b); PCMPGTB xmm, xmm/m128
+ /// __m128i _mm_cmplt_epi8 (__m128i a, __m128i b);
+ /// PCMPGTB xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> CompareLessThan(Vector128<sbyte> left, Vector128<sbyte> right) => CompareLessThan(left, right);
/// <summary>
- /// __m128i _mm_cmplt_epi16 (__m128i a, __m128i b); PCMPGTW xmm, xmm/m128
+ /// __m128i _mm_cmplt_epi16 (__m128i a, __m128i b);
+ /// PCMPGTW xmm, xmm/m128
/// </summary>
public static Vector128<short> CompareLessThan(Vector128<short> left, Vector128<short> right) => CompareLessThan(left, right);
/// <summary>
- /// __m128i _mm_cmplt_epi32 (__m128i a, __m128i b); PCMPGTD xmm, xmm/m128
+ /// __m128i _mm_cmplt_epi32 (__m128i a, __m128i b);
+ /// PCMPGTD xmm, xmm/m128
/// </summary>
public static Vector128<int> CompareLessThan(Vector128<int> left, Vector128<int> right) => CompareLessThan(left, right);
/// <summary>
- /// __m128d _mm_cmplt_pd (__m128d a, __m128d b); CMPPD xmm, xmm/m128, imm8(1)
+ /// __m128d _mm_cmplt_pd (__m128d a, __m128d b);
+ /// CMPPD xmm, xmm/m128, imm8(1)
/// </summary>
public static Vector128<double> CompareLessThan(Vector128<double> left, Vector128<double> right) => CompareLessThan(left, right);
/// <summary>
- /// int _mm_comilt_sd (__m128d a, __m128d b); COMISS xmm, xmm/m64
+ /// int _mm_comilt_sd (__m128d a, __m128d b);
+ /// COMISS xmm, xmm/m64
/// </summary>
public static bool CompareLessThanOrderedScalar(Vector128<double> left, Vector128<double> right) => CompareLessThanOrderedScalar(left, right);
/// <summary>
- /// int _mm_ucomilt_sd (__m128d a, __m128d b); UCOMISS xmm, xmm/m64
+ /// int _mm_ucomilt_sd (__m128d a, __m128d b);
+ /// UCOMISS xmm, xmm/m64
/// </summary>
public static bool CompareLessThanUnorderedScalar(Vector128<double> left, Vector128<double> right) => CompareLessThanUnorderedScalar(left, right);
/// <summary>
- /// __m128d _mm_cmplt_sd (__m128d a, __m128d b); CMPSD xmm, xmm/m64, imm8(1)
+ /// __m128d _mm_cmplt_sd (__m128d a, __m128d b);
+ /// CMPSD xmm, xmm/m64, imm8(1)
/// </summary>
public static Vector128<double> CompareLessThanScalar(Vector128<double> left, Vector128<double> right) => CompareLessThanScalar(left, right);
/// <summary>
- /// __m128d _mm_cmple_pd (__m128d a, __m128d b); CMPPD xmm, xmm/m128, imm8(2)
+ /// __m128d _mm_cmple_pd (__m128d a, __m128d b);
+ /// CMPPD xmm, xmm/m128, imm8(2)
/// </summary>
public static Vector128<double> CompareLessThanOrEqual(Vector128<double> left, Vector128<double> right) => CompareLessThanOrEqual(left, right);
/// <summary>
- /// int _mm_comile_sd (__m128d a, __m128d b); COMISS xmm, xmm/m64
+ /// int _mm_comile_sd (__m128d a, __m128d b);
+ /// COMISS xmm, xmm/m64
/// </summary>
public static bool CompareLessThanOrEqualOrderedScalar(Vector128<double> left, Vector128<double> right) => CompareLessThanOrEqualOrderedScalar(left, right);
/// <summary>
- /// int _mm_ucomile_sd (__m128d a, __m128d b); UCOMISS xmm, xmm/m64
+ /// int _mm_ucomile_sd (__m128d a, __m128d b);
+ /// UCOMISS xmm, xmm/m64
/// </summary>
public static bool CompareLessThanOrEqualUnorderedScalar(Vector128<double> left, Vector128<double> right) => CompareLessThanOrEqualUnorderedScalar(left, right);
/// <summary>
- /// __m128d _mm_cmple_sd (__m128d a, __m128d b); CMPSD xmm, xmm/m64, imm8(2)
+ /// __m128d _mm_cmple_sd (__m128d a, __m128d b);
+ /// CMPSD xmm, xmm/m64, imm8(2)
/// </summary>
public static Vector128<double> CompareLessThanOrEqualScalar(Vector128<double> left, Vector128<double> right) => CompareLessThanOrEqualScalar(left, right);
/// <summary>
- /// __m128d _mm_cmpneq_pd (__m128d a, __m128d b); CMPPD xmm, xmm/m128, imm8(4)
+ /// __m128d _mm_cmpneq_pd (__m128d a, __m128d b);
+ /// CMPPD xmm, xmm/m128, imm8(4)
/// </summary>
public static Vector128<double> CompareNotEqual(Vector128<double> left, Vector128<double> right) => CompareNotEqual(left, right);
/// <summary>
- /// int _mm_comineq_sd (__m128d a, __m128d b); COMISS xmm, xmm/m64
+ /// int _mm_comineq_sd (__m128d a, __m128d b);
+ /// COMISS xmm, xmm/m64
/// </summary>
public static bool CompareNotEqualOrderedScalar(Vector128<double> left, Vector128<double> right) => CompareNotEqualOrderedScalar(left, right);
/// <summary>
- /// int _mm_ucomineq_sd (__m128d a, __m128d b); UCOMISS xmm, xmm/m64
+ /// int _mm_ucomineq_sd (__m128d a, __m128d b);
+ /// UCOMISS xmm, xmm/m64
/// </summary>
public static bool CompareNotEqualUnorderedScalar(Vector128<double> left, Vector128<double> right) => CompareNotEqualUnorderedScalar(left, right);
/// <summary>
- /// __m128d _mm_cmpneq_sd (__m128d a, __m128d b); CMPSD xmm, xmm/m64, imm8(4)
+ /// __m128d _mm_cmpneq_sd (__m128d a, __m128d b);
+ /// CMPSD xmm, xmm/m64, imm8(4)
/// </summary>
public static Vector128<double> CompareNotEqualScalar(Vector128<double> left, Vector128<double> right) => CompareNotEqualScalar(left, right);
/// <summary>
- /// __m128d _mm_cmpngt_pd (__m128d a, __m128d b); CMPPD xmm, xmm/m128, imm8(2)
+ /// __m128d _mm_cmpngt_pd (__m128d a, __m128d b);
+ /// CMPPD xmm, xmm/m128, imm8(2)
/// </summary>
public static Vector128<double> CompareNotGreaterThan(Vector128<double> left, Vector128<double> right) => CompareNotGreaterThan(left, right);
/// <summary>
- /// __m128d _mm_cmpngt_sd (__m128d a, __m128d b); CMPSD xmm, xmm/m64, imm8(2)
+ /// __m128d _mm_cmpngt_sd (__m128d a, __m128d b);
+ /// CMPSD xmm, xmm/m64, imm8(2)
/// </summary>
public static Vector128<double> CompareNotGreaterThanScalar(Vector128<double> left, Vector128<double> right) => CompareNotGreaterThanScalar(left, right);
/// <summary>
- /// __m128d _mm_cmpnge_pd (__m128d a, __m128d b); CMPPD xmm, xmm/m128, imm8(1)
+ /// __m128d _mm_cmpnge_pd (__m128d a, __m128d b);
+ /// CMPPD xmm, xmm/m128, imm8(1)
/// </summary>
public static Vector128<double> CompareNotGreaterThanOrEqual(Vector128<double> left, Vector128<double> right) => CompareNotGreaterThanOrEqual(left, right);
/// <summary>
- /// __m128d _mm_cmpnge_sd (__m128d a, __m128d b); CMPSD xmm, xmm/m64, imm8(1)
+ /// __m128d _mm_cmpnge_sd (__m128d a, __m128d b);
+ /// CMPSD xmm, xmm/m64, imm8(1)
/// </summary>
public static Vector128<double> CompareNotGreaterThanOrEqualScalar(Vector128<double> left, Vector128<double> right) => CompareNotGreaterThanOrEqualScalar(left, right);
/// <summary>
- /// __m128d _mm_cmpnlt_pd (__m128d a, __m128d b); CMPPD xmm, xmm/m128, imm8(5)
+ /// __m128d _mm_cmpnlt_pd (__m128d a, __m128d b);
+ /// CMPPD xmm, xmm/m128, imm8(5)
/// </summary>
public static Vector128<double> CompareNotLessThan(Vector128<double> left, Vector128<double> right) => CompareNotLessThan(left, right);
/// <summary>
- /// __m128d _mm_cmpnlt_sd (__m128d a, __m128d b); CMPSD xmm, xmm/m64, imm8(5)
+ /// __m128d _mm_cmpnlt_sd (__m128d a, __m128d b);
+ /// CMPSD xmm, xmm/m64, imm8(5)
/// </summary>
public static Vector128<double> CompareNotLessThanScalar(Vector128<double> left, Vector128<double> right) => CompareNotLessThanScalar(left, right);
/// <summary>
- /// __m128d _mm_cmpnle_pd (__m128d a, __m128d b); CMPPD xmm, xmm/m128, imm8(6)
+ /// __m128d _mm_cmpnle_pd (__m128d a, __m128d b);
+ /// CMPPD xmm, xmm/m128, imm8(6)
/// </summary>
public static Vector128<double> CompareNotLessThanOrEqual(Vector128<double> left, Vector128<double> right) => CompareNotLessThanOrEqual(left, right);
/// <summary>
- /// __m128d _mm_cmpnle_sd (__m128d a, __m128d b); CMPSD xmm, xmm/m64, imm8(6)
+ /// __m128d _mm_cmpnle_sd (__m128d a, __m128d b);
+ /// CMPSD xmm, xmm/m64, imm8(6)
/// </summary>
public static Vector128<double> CompareNotLessThanOrEqualScalar(Vector128<double> left, Vector128<double> right) => CompareNotLessThanOrEqualScalar(left, right);
/// <summary>
- /// __m128d _mm_cmpord_pd (__m128d a, __m128d b); CMPPD xmm, xmm/m128, imm8(7)
+ /// __m128d _mm_cmpord_pd (__m128d a, __m128d b);
+ /// CMPPD xmm, xmm/m128, imm8(7)
/// </summary>
public static Vector128<double> CompareOrdered(Vector128<double> left, Vector128<double> right) => CompareOrdered(left, right);
/// <summary>
- /// __m128d _mm_cmpord_sd (__m128d a, __m128d b); CMPSD xmm, xmm/m64, imm8(7)
+ /// __m128d _mm_cmpord_sd (__m128d a, __m128d b);
+ /// CMPSD xmm, xmm/m64, imm8(7)
/// </summary>
public static Vector128<double> CompareOrderedScalar(Vector128<double> left, Vector128<double> right) => CompareOrderedScalar(left, right);
/// <summary>
- /// __m128d _mm_cmpunord_pd (__m128d a, __m128d b); CMPPD xmm, xmm/m128, imm8(3)
+ /// __m128d _mm_cmpunord_pd (__m128d a, __m128d b);
+ /// CMPPD xmm, xmm/m128, imm8(3)
/// </summary>
public static Vector128<double> CompareUnordered(Vector128<double> left, Vector128<double> right) => CompareUnordered(left, right);
/// <summary>
- /// __m128d _mm_cmpunord_sd (__m128d a, __m128d b); CMPSD xmm, xmm/m64, imm8(3)
+ /// __m128d _mm_cmpunord_sd (__m128d a, __m128d b);
+ /// CMPSD xmm, xmm/m64, imm8(3)
/// </summary>
public static Vector128<double> CompareUnorderedScalar(Vector128<double> left, Vector128<double> right) => CompareUnorderedScalar(left, right);
/// <summary>
- /// __m128i _mm_cvtps_epi32 (__m128 a); CVTPS2DQ xmm, xmm/m128
+ /// __m128i _mm_cvtps_epi32 (__m128 a);
+ /// CVTPS2DQ xmm, xmm/m128
/// </summary>
public static Vector128<int> ConvertToVector128Int32(Vector128<float> value) => ConvertToVector128Int32(value);
/// <summary>
- /// __m128i _mm_cvtpd_epi32 (__m128d a); CVTPD2DQ xmm, xmm/m128
+ /// __m128i _mm_cvtpd_epi32 (__m128d a);
+ /// CVTPD2DQ xmm, xmm/m128
/// </summary>
public static Vector128<int> ConvertToVector128Int32(Vector128<double> value) => ConvertToVector128Int32(value);
/// <summary>
- /// __m128 _mm_cvtepi32_ps (__m128i a); CVTDQ2PS xmm, xmm/m128
+ /// __m128 _mm_cvtepi32_ps (__m128i a);
+ /// CVTDQ2PS xmm, xmm/m128
/// </summary>
public static Vector128<float> ConvertToVector128Single(Vector128<int> value) => ConvertToVector128Single(value);
/// <summary>
- /// __m128 _mm_cvtpd_ps (__m128d a); CVTPD2PS xmm, xmm/m128
+ /// __m128 _mm_cvtpd_ps (__m128d a);
+ /// CVTPD2PS xmm, xmm/m128
/// </summary>
public static Vector128<float> ConvertToVector128Single(Vector128<double> value) => ConvertToVector128Single(value);
/// <summary>
- /// __m128d _mm_cvtepi32_pd (__m128i a); CVTDQ2PD xmm, xmm/m128
+ /// __m128d _mm_cvtepi32_pd (__m128i a);
+ /// CVTDQ2PD xmm, xmm/m128
/// </summary>
public static Vector128<double> ConvertToVector128Double(Vector128<int> value) => ConvertToVector128Double(value);
/// <summary>
- /// __m128d _mm_cvtps_pd (__m128 a); CVTPS2PD xmm, xmm/m128
+ /// __m128d _mm_cvtps_pd (__m128 a);
+ /// CVTPS2PD xmm, xmm/m128
/// </summary>
public static Vector128<double> ConvertToVector128Double(Vector128<float> value) => ConvertToVector128Double(value);
/// <summary>
- /// double _mm_cvtsd_f64(__m128d a); HELPER: MOVSD
+ /// double _mm_cvtsd_f64(__m128d a);
+ /// HELPER: MOVSD
/// </summary>
public static double ConvertToDouble(Vector128<double> value) => ConvertToDouble(value);
/// <summary>
- /// int _mm_cvtsd_si32 (__m128d a); CVTSD2SI r32, xmm/m64
+ /// int _mm_cvtsd_si32 (__m128d a);
+ /// CVTSD2SI r32, xmm/m64
/// </summary>
public static int ConvertToInt32(Vector128<double> value) => ConvertToInt32(value);
/// <summary>
- /// int _mm_cvtsi128_si32 (__m128i a); MOVD reg/m32, xmm
+ /// int _mm_cvtsi128_si32 (__m128i a);
+ /// MOVD reg/m32, xmm
/// </summary>
public static int ConvertToInt32(Vector128<int> value) => ConvertToInt32(value);
/// <summary>
- /// __int64 _mm_cvtsd_si64 (__m128d a); CVTSD2SI r64, xmm/m64
+ /// __int64 _mm_cvtsd_si64 (__m128d a);
+ /// CVTSD2SI r64, xmm/m64
/// </summary>
public static long ConvertToInt64(Vector128<double> value) => ConvertToInt64(value);
/// <summary>
- /// __int64 _mm_cvtsi128_si64 (__m128i a); MOVQ reg/m64, xmm
+ /// __int64 _mm_cvtsi128_si64 (__m128i a);
+ /// MOVQ reg/m64, xmm
/// </summary>
public static long ConvertToInt64(Vector128<long> value) => ConvertToInt64(value);
/// <summary>
- /// int _mm_cvtsi128_si32 (__m128i a); MOVD reg/m32, xmm
+ /// int _mm_cvtsi128_si32 (__m128i a);
+ /// MOVD reg/m32, xmm
/// </summary>
public static uint ConvertToUInt32(Vector128<uint> value) => ConvertToUInt32(value);
/// <summary>
- /// __int64 _mm_cvtsi128_si64 (__m128i a); MOVQ reg/m64, xmm
+ /// __int64 _mm_cvtsi128_si64 (__m128i a);
+ /// MOVQ reg/m64, xmm
/// </summary>
public static ulong ConvertToUInt64(Vector128<ulong> value) => ConvertToUInt64(value);
/// <summary>
- /// __m128d _mm_cvtsi32_sd (__m128d a, int b); CVTSI2SD xmm, reg/m64
+ /// __m128d _mm_cvtsi32_sd (__m128d a, int b);
+ /// CVTSI2SD xmm, reg/m64
/// </summary>
public static Vector128<double> ConvertToVector128DoubleScalar(Vector128<double> upper, int value) => ConvertToVector128DoubleScalar(upper, value);
/// <summary>
- /// __m128d _mm_cvtsi64_sd (__m128d a, int b); CVTSI2SD xmm, reg/m64
+ /// __m128d _mm_cvtsi64_sd (__m128d a, int b);
+ /// CVTSI2SD xmm, reg/m64
/// </summary>
public static Vector128<double> ConvertToVector128DoubleScalar(Vector128<double> upper, long value) => ConvertToVector128DoubleScalar(upper, value);
/// <summary>
- /// __m128d _mm_cvtss_sd (__m128d a, __m128 b); CVTSS2SD xmm, xmm/m32
+ /// __m128d _mm_cvtss_sd (__m128d a, __m128 b);
+ /// CVTSS2SD xmm, xmm/m32
/// </summary>
public static Vector128<double> ConvertToVector128DoubleScalar(Vector128<double> upper, Vector128<float> value) => ConvertToVector128DoubleScalar(upper, value);
/// <summary>
- /// __m128i _mm_cvtsi32_si128 (int a); MOVD xmm, reg/m32
+ /// __m128i _mm_cvtsi32_si128 (int a);
+ /// MOVD xmm, reg/m32
/// </summary>
public static Vector128<int> ConvertToVector128Int32Scalar(int value) => ConvertToVector128Int32Scalar(value);
/// <summary>
- /// __m128i _mm_cvtsi64_si128 (__int64 a); MOVQ xmm, reg/m64
+ /// __m128i _mm_cvtsi64_si128 (__int64 a);
+ /// MOVQ xmm, reg/m64
/// </summary>
public static Vector128<long> ConvertToVector128Int64Scalar(long value) => ConvertToVector128Int64Scalar(value);
/// <summary>
- /// __m128 _mm_cvtsd_ss (__m128 a, __m128d b); CVTSD2SS xmm, xmm/m64
+ /// __m128 _mm_cvtsd_ss (__m128 a, __m128d b);
+ /// CVTSD2SS xmm, xmm/m64
/// </summary>
public static Vector128<float> ConvertToVector128SingleScalar(Vector128<float> upper, Vector128<double> value) => ConvertToVector128SingleScalar(upper, value);
/// <summary>
- /// __m128i _mm_cvtsi32_si128 (int a); MOVD xmm, reg/m32
+ /// __m128i _mm_cvtsi32_si128 (int a);
+ /// MOVD xmm, reg/m32
/// </summary>
public static Vector128<uint> ConvertToVector128UInt32Scalar(uint value) => ConvertToVector128UInt32Scalar(value);
/// <summary>
- /// __m128i _mm_cvtsi64_si128 (__int64 a); MOVQ xmm, reg/m64
+ /// __m128i _mm_cvtsi64_si128 (__int64 a);
+ /// MOVQ xmm, reg/m64
/// </summary>
public static Vector128<ulong> ConvertToVector128UInt64Scalar(ulong value) => ConvertToVector128UInt64Scalar(value);
/// <summary>
- /// __m128i _mm_cvttps_epi32 (__m128 a); CVTTPS2DQ xmm, xmm/m128
+ /// __m128i _mm_cvttps_epi32 (__m128 a);
+ /// CVTTPS2DQ xmm, xmm/m128
/// </summary>
public static Vector128<int> ConvertToVector128Int32WithTruncation(Vector128<float> value) => ConvertToVector128Int32WithTruncation(value);
/// <summary>
- /// __m128i _mm_cvttpd_epi32 (__m128d a); CVTTPD2DQ xmm, xmm/m128
+ /// __m128i _mm_cvttpd_epi32 (__m128d a);
+ /// CVTTPD2DQ xmm, xmm/m128
/// </summary>
public static Vector128<int> ConvertToVector128Int32WithTruncation(Vector128<double> value) => ConvertToVector128Int32WithTruncation(value);
/// <summary>
- /// int _mm_cvttsd_si32 (__m128d a); CVTTSD2SI reg, xmm/m64
+ /// int _mm_cvttsd_si32 (__m128d a);
+ /// CVTTSD2SI reg, xmm/m64
/// </summary>
public static int ConvertToInt32WithTruncation(Vector128<double> value) => ConvertToInt32WithTruncation(value);
/// <summary>
- /// __int64 _mm_cvttsd_si64 (__m128d a); CVTTSD2SI reg, xmm/m64
+ /// __int64 _mm_cvttsd_si64 (__m128d a);
+ /// CVTTSD2SI reg, xmm/m64
/// </summary>
public static long ConvertToInt64WithTruncation(Vector128<double> value) => ConvertToInt64WithTruncation(value);
/// <summary>
- /// __m128d _mm_div_pd (__m128d a, __m128d b); DIVPD xmm, xmm/m128
+ /// __m128d _mm_div_pd (__m128d a, __m128d b);
+ /// DIVPD xmm, xmm/m128
/// </summary>
public static Vector128<double> Divide(Vector128<double> left, Vector128<double> right) => Divide(left, right);
/// <summary>
- /// __m128d _mm_div_sd (__m128d a, __m128d b); DIVSD xmm, xmm/m64
+ /// __m128d _mm_div_sd (__m128d a, __m128d b);
+ /// DIVSD xmm, xmm/m64
/// </summary>
public static Vector128<double> DivideScalar(Vector128<double> left, Vector128<double> right) => DivideScalar(left, right);
/// <summary>
- /// int _mm_extract_epi16 (__m128i a, int immediate); PEXTRW reg, xmm, imm8
+ /// int _mm_extract_epi16 (__m128i a, int immediate);
+ /// PEXTRW reg, xmm, imm8
/// </summary>
public static short Extract(Vector128<short> value, byte index) => Extract(value, index);
/// <summary>
- /// int _mm_extract_epi16 (__m128i a, int immediate); PEXTRW reg, xmm, imm8
+ /// int _mm_extract_epi16 (__m128i a, int immediate);
+ /// PEXTRW reg, xmm, imm8
/// </summary>
public static ushort Extract(Vector128<ushort> value, byte index) => Extract(value, index);
/// <summary>
- /// __m128i _mm_insert_epi16 (__m128i a, int i, int immediate); PINSRW xmm, reg/m16, imm8
+ /// __m128i _mm_insert_epi16 (__m128i a, int i, int immediate);
+ /// PINSRW xmm, reg/m16, imm8
/// </summary>
public static Vector128<short> Insert(Vector128<short> value, short data, byte index) => Insert(value, data, index);
/// <summary>
- /// __m128i _mm_insert_epi16 (__m128i a, int i, int immediate); PINSRW xmm, reg/m16, imm8
+ /// __m128i _mm_insert_epi16 (__m128i a, int i, int immediate);
+ /// PINSRW xmm, reg/m16, imm8
/// </summary>
public static Vector128<ushort> Insert(Vector128<ushort> value, ushort data, byte index) => Insert(value, data, index);
/// <summary>
- /// __m128i _mm_loadu_si128 (__m128i const* mem_address); MOVDQU xmm, m128
+ /// __m128i _mm_loadu_si128 (__m128i const* mem_address);
+ /// MOVDQU xmm, m128
/// </summary>
public static unsafe Vector128<sbyte> LoadVector128(sbyte* address) => LoadVector128(address);
/// <summary>
- /// __m128i _mm_loadu_si128 (__m128i const* mem_address); MOVDQU xmm, m128
+ /// __m128i _mm_loadu_si128 (__m128i const* mem_address);
+ /// MOVDQU xmm, m128
/// </summary>
public static unsafe Vector128<byte> LoadVector128(byte* address) => LoadVector128(address);
/// <summary>
- /// __m128i _mm_loadu_si128 (__m128i const* mem_address); MOVDQU xmm, m128
+ /// __m128i _mm_loadu_si128 (__m128i const* mem_address);
+ /// MOVDQU xmm, m128
/// </summary>
public static unsafe Vector128<short> LoadVector128(short* address) => LoadVector128(address);
/// <summary>
- /// __m128i _mm_loadu_si128 (__m128i const* mem_address); MOVDQU xmm, m128
+ /// __m128i _mm_loadu_si128 (__m128i const* mem_address);
+ /// MOVDQU xmm, m128
/// </summary>
public static unsafe Vector128<ushort> LoadVector128(ushort* address) => LoadVector128(address);
/// <summary>
- /// __m128i _mm_loadu_si128 (__m128i const* mem_address); MOVDQU xmm, m128
+ /// __m128i _mm_loadu_si128 (__m128i const* mem_address);
+ /// MOVDQU xmm, m128
/// </summary>
public static unsafe Vector128<int> LoadVector128(int* address) => LoadVector128(address);
/// <summary>
- /// __m128i _mm_loadu_si128 (__m128i const* mem_address); MOVDQU xmm, m128
+ /// __m128i _mm_loadu_si128 (__m128i const* mem_address);
+ /// MOVDQU xmm, m128
/// </summary>
public static unsafe Vector128<uint> LoadVector128(uint* address) => LoadVector128(address);
/// <summary>
- /// __m128i _mm_loadu_si128 (__m128i const* mem_address); MOVDQU xmm, m128
+ /// __m128i _mm_loadu_si128 (__m128i const* mem_address);
+ /// MOVDQU xmm, m128
/// </summary>
public static unsafe Vector128<long> LoadVector128(long* address) => LoadVector128(address);
/// <summary>
- /// __m128i _mm_loadu_si128 (__m128i const* mem_address); MOVDQU xmm, m128
+ /// __m128i _mm_loadu_si128 (__m128i const* mem_address);
+ /// MOVDQU xmm, m128
/// </summary>
public static unsafe Vector128<ulong> LoadVector128(ulong* address) => LoadVector128(address);
/// <summary>
- /// __m128d _mm_loadu_pd (double const* mem_address); MOVUPD xmm, m128
+ /// __m128d _mm_loadu_pd (double const* mem_address);
+ /// MOVUPD xmm, m128
/// </summary>
public static unsafe Vector128<double> LoadVector128(double* address) => LoadVector128(address);
/// <summary>
- /// __m128d _mm_load_sd (double const* mem_address); MOVSD xmm, m64
+ /// __m128d _mm_load_sd (double const* mem_address);
+ /// MOVSD xmm, m64
/// </summary>
public static unsafe Vector128<double> LoadScalar(double* address) => LoadScalar(address);
/// <summary>
- /// __m128i _mm_load_si128 (__m128i const* mem_address); MOVDQA xmm, m128
+ /// __m128i _mm_load_si128 (__m128i const* mem_address);
+ /// MOVDQA xmm, m128
/// </summary>
public static unsafe Vector128<sbyte> LoadAlignedVector128(sbyte* address) => LoadAlignedVector128(address);
/// <summary>
- /// __m128i _mm_load_si128 (__m128i const* mem_address); MOVDQA xmm, m128
+ /// __m128i _mm_load_si128 (__m128i const* mem_address);
+ /// MOVDQA xmm, m128
/// </summary>
public static unsafe Vector128<byte> LoadAlignedVector128(byte* address) => LoadAlignedVector128(address);
/// <summary>
- /// __m128i _mm_load_si128 (__m128i const* mem_address); MOVDQA xmm, m128
+ /// __m128i _mm_load_si128 (__m128i const* mem_address);
+ /// MOVDQA xmm, m128
/// </summary>
public static unsafe Vector128<short> LoadAlignedVector128(short* address) => LoadAlignedVector128(address);
/// <summary>
- /// __m128i _mm_load_si128 (__m128i const* mem_address); MOVDQA xmm, m128
+ /// __m128i _mm_load_si128 (__m128i const* mem_address);
+ /// MOVDQA xmm, m128
/// </summary>
public static unsafe Vector128<ushort> LoadAlignedVector128(ushort* address) => LoadAlignedVector128(address);
/// <summary>
- /// __m128i _mm_load_si128 (__m128i const* mem_address); MOVDQA xmm, m128
+ /// __m128i _mm_load_si128 (__m128i const* mem_address);
+ /// MOVDQA xmm, m128
/// </summary>
public static unsafe Vector128<int> LoadAlignedVector128(int* address) => LoadAlignedVector128(address);
/// <summary>
- /// __m128i _mm_load_si128 (__m128i const* mem_address); MOVDQA xmm, m128
+ /// __m128i _mm_load_si128 (__m128i const* mem_address);
+ /// MOVDQA xmm, m128
/// </summary>
public static unsafe Vector128<uint> LoadAlignedVector128(uint* address) => LoadAlignedVector128(address);
/// <summary>
- /// __m128i _mm_load_si128 (__m128i const* mem_address); MOVDQA xmm, m128
+ /// __m128i _mm_load_si128 (__m128i const* mem_address);
+ /// MOVDQA xmm, m128
/// </summary>
public static unsafe Vector128<long> LoadAlignedVector128(long* address) => LoadAlignedVector128(address);
/// <summary>
- /// __m128i _mm_load_si128 (__m128i const* mem_address); MOVDQA xmm, m128
+ /// __m128i _mm_load_si128 (__m128i const* mem_address);
+ /// MOVDQA xmm, m128
/// </summary>
public static unsafe Vector128<ulong> LoadAlignedVector128(ulong* address) => LoadAlignedVector128(address);
/// <summary>
- /// __m128d _mm_load_pd (double const* mem_address); MOVAPD xmm, m128
+ /// __m128d _mm_load_pd (double const* mem_address);
+ /// MOVAPD xmm, m128
/// </summary>
public static unsafe Vector128<double> LoadAlignedVector128(double* address) => LoadAlignedVector128(address);
/// <summary>
- /// __m128d _mm_loadh_pd (__m128d a, double const* mem_addr); MOVHPD xmm, m64
+ /// __m128d _mm_loadh_pd (__m128d a, double const* mem_addr);
+ /// MOVHPD xmm, m64
/// </summary>
public static unsafe Vector128<double> LoadHigh(Vector128<double> lower, double* address) => LoadHigh(lower, address);
/// <summary>
- /// __m128d _mm_loadl_pd (__m128d a, double const* mem_addr); MOVLPD xmm, m64
+ /// __m128d _mm_loadl_pd (__m128d a, double const* mem_addr);
+ /// MOVLPD xmm, m64
/// </summary>
public static unsafe Vector128<double> LoadLow(Vector128<double> upper, double* address) => LoadLow(upper, address);
/// <summary>
- /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr); MOVQ xmm, reg/m64
+ /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr);
+ /// MOVQ xmm, reg/m64
/// </summary>
public static unsafe Vector128<sbyte> LoadScalar(sbyte* address) => LoadScalar(address);
/// <summary>
- /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr); MOVQ xmm, reg/m64
+ /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr);
+ /// MOVQ xmm, reg/m64
/// </summary>
public static unsafe Vector128<byte> LoadScalar(byte* address) => LoadScalar(address);
/// <summary>
- /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr); MOVQ xmm, reg/m64
+ /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr);
+ /// MOVQ xmm, reg/m64
/// </summary>
public static unsafe Vector128<short> LoadScalar(short* address) => LoadScalar(address);
/// <summary>
- /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr); MOVQ xmm, reg/m64
+ /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr);
+ /// MOVQ xmm, reg/m64
/// </summary>
public static unsafe Vector128<ushort> LoadScalar(ushort* address) => LoadScalar(address);
/// <summary>
- /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr); MOVQ xmm, reg/m64
+ /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr);
+ /// MOVQ xmm, reg/m64
/// </summary>
public static unsafe Vector128<int> LoadScalar(int* address) => LoadScalar(address);
/// <summary>
- /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr); MOVQ xmm, reg/m64
+ /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr);
+ /// MOVQ xmm, reg/m64
/// </summary>
public static unsafe Vector128<uint> LoadScalar(uint* address) => LoadScalar(address);
/// <summary>
- /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr); MOVQ xmm, reg/m64
+ /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr);
+ /// MOVQ xmm, reg/m64
/// </summary>
public static unsafe Vector128<long> LoadScalar(long* address) => LoadScalar(address);
/// <summary>
- /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr); MOVQ xmm, reg/m64
+ /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr);
+ /// MOVQ xmm, reg/m64
/// </summary>
public static unsafe Vector128<ulong> LoadScalar(ulong* address) => LoadScalar(address);
/// <summary>
- /// void _mm_maskmoveu_si128 (__m128i a, __m128i mask, char* mem_address); MASKMOVDQU xmm, xmm
+ /// void _mm_maskmoveu_si128 (__m128i a, __m128i mask, char* mem_address);
+ /// MASKMOVDQU xmm, xmm
/// </summary>
public static unsafe void MaskMove(Vector128<sbyte> source, Vector128<sbyte> mask, sbyte* address) => MaskMove(source, mask, address);
/// <summary>
- /// void _mm_maskmoveu_si128 (__m128i a, __m128i mask, char* mem_address); MASKMOVDQU xmm, xmm
+ /// void _mm_maskmoveu_si128 (__m128i a, __m128i mask, char* mem_address);
+ /// MASKMOVDQU xmm, xmm
/// </summary>
public static unsafe void MaskMove(Vector128<byte> source, Vector128<byte> mask, byte* address) => MaskMove(source, mask, address);
/// <summary>
- /// __m128i _mm_max_epu8 (__m128i a, __m128i b); PMAXUB xmm, xmm/m128
+ /// __m128i _mm_max_epu8 (__m128i a, __m128i b);
+ /// PMAXUB xmm, xmm/m128
/// </summary>
public static Vector128<byte> Max(Vector128<byte> left, Vector128<byte> right) => Max(left, right);
/// <summary>
- /// __m128i _mm_max_epi16 (__m128i a, __m128i b); PMAXSW xmm, xmm/m128
+ /// __m128i _mm_max_epi16 (__m128i a, __m128i b);
+ /// PMAXSW xmm, xmm/m128
/// </summary>
public static Vector128<short> Max(Vector128<short> left, Vector128<short> right) => Max(left, right);
/// <summary>
- /// __m128d _mm_max_pd (__m128d a, __m128d b); MAXPD xmm, xmm/m128
+ /// __m128d _mm_max_pd (__m128d a, __m128d b);
+ /// MAXPD xmm, xmm/m128
/// </summary>
public static Vector128<double> Max(Vector128<double> left, Vector128<double> right) => Max(left, right);
/// <summary>
- /// __m128d _mm_max_sd (__m128d a, __m128d b); MAXSD xmm, xmm/m64
+ /// __m128d _mm_max_sd (__m128d a, __m128d b);
+ /// MAXSD xmm, xmm/m64
/// </summary>
public static Vector128<double> MaxScalar(Vector128<double> left, Vector128<double> right) => MaxScalar(left, right);
/// <summary>
- /// __m128i _mm_min_epu8 (__m128i a, __m128i b); PMINUB xmm, xmm/m128
+ /// __m128i _mm_min_epu8 (__m128i a, __m128i b);
+ /// PMINUB xmm, xmm/m128
/// </summary>
public static Vector128<byte> Min(Vector128<byte> left, Vector128<byte> right) => Min(left, right);
/// <summary>
- /// __m128i _mm_min_epi16 (__m128i a, __m128i b); PMINSW xmm, xmm/m128
+ /// __m128i _mm_min_epi16 (__m128i a, __m128i b);
+ /// PMINSW xmm, xmm/m128
/// </summary>
public static Vector128<short> Min(Vector128<short> left, Vector128<short> right) => Min(left, right);
/// <summary>
- /// __m128d _mm_min_pd (__m128d a, __m128d b); MINPD xmm, xmm/m128
+ /// __m128d _mm_min_pd (__m128d a, __m128d b);
+ /// MINPD xmm, xmm/m128
/// </summary>
public static Vector128<double> Min(Vector128<double> left, Vector128<double> right) => Min(left, right);
/// <summary>
- /// __m128d _mm_min_sd (__m128d a, __m128d b); MINSD xmm, xmm/m64
+ /// __m128d _mm_min_sd (__m128d a, __m128d b);
+ /// MINSD xmm, xmm/m64
/// </summary>
public static Vector128<double> MinScalar(Vector128<double> left, Vector128<double> right) => MinScalar(left, right);
/// <summary>
- /// __m128d _mm_move_sd (__m128d a, __m128d b); MOVSD xmm, xmm
+ /// __m128d _mm_move_sd (__m128d a, __m128d b);
+ /// MOVSD xmm, xmm
/// </summary>
public static Vector128<double> MoveScalar(Vector128<double> upper, Vector128<double> value) => MoveScalar(upper, value);
/// <summary>
- /// int _mm_movemask_epi8 (__m128i a); PMOVMSKB reg, xmm
+ /// int _mm_movemask_epi8 (__m128i a);
+ /// PMOVMSKB reg, xmm
/// </summary>
public static int MoveMask(Vector128<sbyte> value) => MoveMask(value);
/// <summary>
- /// int _mm_movemask_epi8 (__m128i a); PMOVMSKB reg, xmm
+ /// int _mm_movemask_epi8 (__m128i a);
+ /// PMOVMSKB reg, xmm
/// </summary>
public static int MoveMask(Vector128<byte> value) => MoveMask(value);
/// <summary>
- /// int _mm_movemask_pd (__m128d a); MOVMSKPD reg, xmm
+ /// int _mm_movemask_pd (__m128d a);
+ /// MOVMSKPD reg, xmm
/// </summary>
public static int MoveMask(Vector128<double> value) => MoveMask(value);
/// <summary>
- /// __m128i _mm_move_epi64 (__m128i a); MOVQ xmm, xmm
+ /// __m128i _mm_move_epi64 (__m128i a);
+ /// MOVQ xmm, xmm
/// </summary>
public static Vector128<long> MoveScalar(Vector128<long> value) => MoveScalar(value);
/// <summary>
- /// __m128i _mm_move_epi64 (__m128i a); MOVQ xmm, xmm
+ /// __m128i _mm_move_epi64 (__m128i a);
+ /// MOVQ xmm, xmm
/// </summary>
public static Vector128<ulong> MoveScalar(Vector128<ulong> value) => MoveScalar(value);
/// <summary>
- /// __m128i _mm_mul_epu32 (__m128i a, __m128i b); PMULUDQ xmm, xmm/m128
+ /// __m128i _mm_mul_epu32 (__m128i a, __m128i b);
+ /// PMULUDQ xmm, xmm/m128
/// </summary>
public static Vector128<ulong> Multiply(Vector128<uint> left, Vector128<uint> right) => Multiply(left, right);
/// <summary>
- /// __m128d _mm_mul_pd (__m128d a, __m128d b); MULPD xmm, xmm/m128
+ /// __m128d _mm_mul_pd (__m128d a, __m128d b);
+ /// MULPD xmm, xmm/m128
/// </summary>
public static Vector128<double> Multiply(Vector128<double> left, Vector128<double> right) => Multiply(left, right);
/// <summary>
- /// __m128d _mm_mul_sd (__m128d a, __m128d b); MULSD xmm, xmm/m64
+ /// __m128d _mm_mul_sd (__m128d a, __m128d b);
+ /// MULSD xmm, xmm/m64
/// </summary>
public static Vector128<double> MultiplyScalar(Vector128<double> left, Vector128<double> right) => MultiplyScalar(left, right);
/// <summary>
- /// __m128i _mm_mulhi_epi16 (__m128i a, __m128i b); PMULHW xmm, xmm/m128
+ /// __m128i _mm_mulhi_epi16 (__m128i a, __m128i b);
+ /// PMULHW xmm, xmm/m128
/// </summary>
public static Vector128<short> MultiplyHigh(Vector128<short> left, Vector128<short> right) => MultiplyHigh(left, right);
/// <summary>
- /// __m128i _mm_mulhi_epu16 (__m128i a, __m128i b); PMULHUW xmm, xmm/m128
+ /// __m128i _mm_mulhi_epu16 (__m128i a, __m128i b);
+ /// PMULHUW xmm, xmm/m128
/// </summary>
public static Vector128<ushort> MultiplyHigh(Vector128<ushort> left, Vector128<ushort> right) => MultiplyHigh(left, right);
/// <summary>
- /// __m128i _mm_madd_epi16 (__m128i a, __m128i b); PMADDWD xmm, xmm/m128
+ /// __m128i _mm_madd_epi16 (__m128i a, __m128i b);
+ /// PMADDWD xmm, xmm/m128
/// </summary>
public static Vector128<int> MultiplyHorizontalAdd(Vector128<short> left, Vector128<short> right) => MultiplyHorizontalAdd(left, right);
/// <summary>
- /// __m128i _mm_mullo_epi16 (__m128i a, __m128i b); PMULLW xmm, xmm/m128
+ /// __m128i _mm_mullo_epi16 (__m128i a, __m128i b);
+ /// PMULLW xmm, xmm/m128
/// </summary>
public static Vector128<short> MultiplyLow(Vector128<short> left, Vector128<short> right) => MultiplyLow(left, right);
/// <summary>
- /// __m128i _mm_or_si128 (__m128i a, __m128i b); POR xmm, xmm/m128
+ /// __m128i _mm_or_si128 (__m128i a, __m128i b);
+ /// POR xmm, xmm/m128
/// </summary>
public static Vector128<byte> Or(Vector128<byte> left, Vector128<byte> right) => Or(left, right);
/// <summary>
- /// __m128i _mm_or_si128 (__m128i a, __m128i b); POR xmm, xmm/m128
+ /// __m128i _mm_or_si128 (__m128i a, __m128i b);
+ /// POR xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> Or(Vector128<sbyte> left, Vector128<sbyte> right) => Or(left, right);
/// <summary>
- /// __m128i _mm_or_si128 (__m128i a, __m128i b); POR xmm, xmm/m128
+ /// __m128i _mm_or_si128 (__m128i a, __m128i b);
+ /// POR xmm, xmm/m128
/// </summary>
public static Vector128<short> Or(Vector128<short> left, Vector128<short> right) => Or(left, right);
/// <summary>
- /// __m128i _mm_or_si128 (__m128i a, __m128i b); POR xmm, xmm/m128
+ /// __m128i _mm_or_si128 (__m128i a, __m128i b);
+ /// POR xmm, xmm/m128
/// </summary>
public static Vector128<ushort> Or(Vector128<ushort> left, Vector128<ushort> right) => Or(left, right);
/// <summary>
- /// __m128i _mm_or_si128 (__m128i a, __m128i b); POR xmm, xmm/m128
+ /// __m128i _mm_or_si128 (__m128i a, __m128i b);
+ /// POR xmm, xmm/m128
/// </summary>
public static Vector128<int> Or(Vector128<int> left, Vector128<int> right) => Or(left, right);
/// <summary>
- /// __m128i _mm_or_si128 (__m128i a, __m128i b); POR xmm, xmm/m128
+ /// __m128i _mm_or_si128 (__m128i a, __m128i b);
+ /// POR xmm, xmm/m128
/// </summary>
public static Vector128<uint> Or(Vector128<uint> left, Vector128<uint> right) => Or(left, right);
/// <summary>
- /// __m128i _mm_or_si128 (__m128i a, __m128i b); POR xmm, xmm/m128
+ /// __m128i _mm_or_si128 (__m128i a, __m128i b);
+ /// POR xmm, xmm/m128
/// </summary>
public static Vector128<long> Or(Vector128<long> left, Vector128<long> right) => Or(left, right);
/// <summary>
- /// __m128i _mm_or_si128 (__m128i a, __m128i b); POR xmm, xmm/m128
+ /// __m128i _mm_or_si128 (__m128i a, __m128i b);
+ /// POR xmm, xmm/m128
/// </summary>
public static Vector128<ulong> Or(Vector128<ulong> left, Vector128<ulong> right) => Or(left, right);
/// <summary>
- /// __m128d _mm_or_pd (__m128d a, __m128d b); ORPD xmm, xmm/m128
+ /// __m128d _mm_or_pd (__m128d a, __m128d b);
+ /// ORPD xmm, xmm/m128
/// </summary>
public static Vector128<double> Or(Vector128<double> left, Vector128<double> right) => Or(left, right);
/// <summary>
- /// __m128i _mm_packs_epi16 (__m128i a, __m128i b); PACKSSWB xmm, xmm/m128
+ /// __m128i _mm_packs_epi16 (__m128i a, __m128i b);
+ /// PACKSSWB xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> PackSignedSaturate(Vector128<short> left, Vector128<short> right) => PackSignedSaturate(left, right);
/// <summary>
- /// __m128i _mm_packs_epi32 (__m128i a, __m128i b); PACKSSDW xmm, xmm/m128
+ /// __m128i _mm_packs_epi32 (__m128i a, __m128i b);
+ /// PACKSSDW xmm, xmm/m128
/// </summary>
public static Vector128<short> PackSignedSaturate(Vector128<int> left, Vector128<int> right) => PackSignedSaturate(left, right);
/// <summary>
- /// __m128i _mm_packus_epi16 (__m128i a, __m128i b); PACKUSWB xmm, xmm/m128
+ /// __m128i _mm_packus_epi16 (__m128i a, __m128i b);
+ /// PACKUSWB xmm, xmm/m128
/// </summary>
public static Vector128<byte> PackUnsignedSaturate(Vector128<short> left, Vector128<short> right) => PackUnsignedSaturate(left, right);
/// <summary>
- /// ___m128i _mm_set_epi8 (char e15, char e14, char e13, char e12, char e11, char e10, char e9, char e8, char e7, char e6, char e5, char e4, char e3, char e2, char e1, char e0); HELPER
+ /// ___m128i _mm_set_epi8 (char e15, char e14, char e13, char e12, char e11, char e10, char e9, char e8, char e7, char e6, char e5, char e4, char e3, char e2, char e1, char e0);
+ /// HELPER
/// </summary>
public static Vector128<sbyte> SetVector128(sbyte e15, sbyte e14, sbyte e13, sbyte e12, sbyte e11, sbyte e10, sbyte e9, sbyte e8, sbyte e7, sbyte e6, sbyte e5, sbyte e4, sbyte e3, sbyte e2, sbyte e1, sbyte e0) => SetVector128(e15, e14, e13, e12, e11, e10, e9, e8, e7, e6, e5, e4, e3, e2, e1, e0);
/// <summary>
- /// ___m128i _mm_set_epi8 (char e15, char e14, char e13, char e12, char e11, char e10, char e9, char e8, char e7, char e6, char e5, char e4, char e3, char e2, char e1, char e0); HELPER
+ /// ___m128i _mm_set_epi8 (char e15, char e14, char e13, char e12, char e11, char e10, char e9, char e8, char e7, char e6, char e5, char e4, char e3, char e2, char e1, char e0);
+ /// HELPER
/// </summary>
public static Vector128<byte> SetVector128(byte e15, byte e14, byte e13, byte e12, byte e11, byte e10, byte e9, byte e8, byte e7, byte e6, byte e5, byte e4, byte e3, byte e2, byte e1, byte e0) => SetVector128(e15, e14, e13, e12, e11, e10, e9, e8, e7, e6, e5, e4, e3, e2, e1, e0);
/// <summary>
- /// __m128i _mm_set_epi16 (short e7, short e6, short e5, short e4, short e3, short e2, short e1, short e0); HELPER
+ /// __m128i _mm_set_epi16 (short e7, short e6, short e5, short e4, short e3, short e2, short e1, short e0);
+ /// HELPER
/// </summary>
public static Vector128<short> SetVector128(short e7, short e6, short e5, short e4, short e3, short e2, short e1, short e0) => SetVector128(e7, e6, e5, e4, e3, e2, e1, e0);
/// <summary>
- /// __m128i _mm_set_epi16 (short e7, short e6, short e5, short e4, short e3, short e2, short e1, short e0); HELPER
+ /// __m128i _mm_set_epi16 (short e7, short e6, short e5, short e4, short e3, short e2, short e1, short e0);
+ /// HELPER
/// </summary>
public static Vector128<ushort> SetVector128(ushort e7, ushort e6, ushort e5, ushort e4, ushort e3, ushort e2, ushort e1, ushort e0) => SetVector128(e7, e6, e5, e4, e3, e2, e1, e0);
/// <summary>
- /// __m128i _mm_set_epi32 (int e3, int e2, int e1, int e0); HELPER
+ /// __m128i _mm_set_epi32 (int e3, int e2, int e1, int e0);
+ /// HELPER
/// </summary>
public static Vector128<int> SetVector128(int e3, int e2, int e1, int e0) => SetVector128(e3, e2, e1, e0);
/// <summary>
- /// __m128i _mm_set_epi32 (int e3, int e2, int e1, int e0); HELPER
+ /// __m128i _mm_set_epi32 (int e3, int e2, int e1, int e0);
+ /// HELPER
/// </summary>
public static Vector128<uint> SetVector128(uint e3, uint e2, uint e1, uint e0) => SetVector128(e3, e2, e1, e0);
/// <summary>
- /// __m128i _mm_set_epi64x (__int64 e1, __int64 e0); HELPER
+ /// __m128i _mm_set_epi64x (__int64 e1, __int64 e0);
+ /// HELPER
/// </summary>
public static Vector128<long> SetVector128(long e1, long e0) => SetVector128(e1, e0);
/// <summary>
- /// __m128i _mm_set_epi64x (__int64 e1, __int64 e0); HELPER
+ /// __m128i _mm_set_epi64x (__int64 e1, __int64 e0);
+ /// HELPER
/// </summary>
public static Vector128<ulong> SetVector128(ulong e1, ulong e0) => SetVector128(e1, e0);
/// <summary>
- /// __m128d _mm_set_pd (double e1, double e0); HELPER
+ /// __m128d _mm_set_pd (double e1, double e0);
+ /// HELPER
/// </summary>
public static Vector128<double> SetVector128(double e1, double e0) => SetVector128(e1, e0);
/// <summary>
- /// __m128d _mm_set_sd (double a); HELPER
+ /// __m128d _mm_set_sd (double a);
+ /// HELPER
/// </summary>
public static Vector128<double> SetScalarVector128(double value) => SetScalarVector128(value);
/// <summary>
- /// __m128i _mm_set1_epi8 (char a); HELPER
+ /// __m128i _mm_set1_epi8 (char a);
+ /// HELPER
/// </summary>
public static Vector128<byte> SetAllVector128(byte value) => SetAllVector128(value);
/// <summary>
- /// __m128i _mm_set1_epi8 (char a); HELPER
+ /// __m128i _mm_set1_epi8 (char a);
+ /// HELPER
/// </summary>
public static Vector128<sbyte> SetAllVector128(sbyte value) => SetAllVector128(value);
/// <summary>
- /// __m128i _mm_set1_epi16 (short a); HELPER
+ /// __m128i _mm_set1_epi16 (short a);
+ /// HELPER
/// </summary>
public static Vector128<short> SetAllVector128(short value) => SetAllVector128(value);
/// <summary>
- /// __m128i _mm_set1_epi16 (short a); HELPER
+ /// __m128i _mm_set1_epi16 (short a);
+ /// HELPER
/// </summary>
public static Vector128<ushort> SetAllVector128(ushort value) => SetAllVector128(value);
/// <summary>
- /// __m128i _mm_set1_epi32 (int a); HELPER
+ /// __m128i _mm_set1_epi32 (int a);
+ /// HELPER
/// </summary>
public static Vector128<int> SetAllVector128(int value) => SetAllVector128(value);
/// <summary>
- /// __m128i _mm_set1_epi32 (int a); HELPER
+ /// __m128i _mm_set1_epi32 (int a);
+ /// HELPER
/// </summary>
public static Vector128<uint> SetAllVector128(uint value) => SetAllVector128(value);
/// <summary>
- /// __m128i _mm_set1_epi64x (long long a); HELPER
+ /// __m128i _mm_set1_epi64x (long long a);
+ /// HELPER
/// </summary>
public static Vector128<long> SetAllVector128(long value) => SetAllVector128(value);
/// <summary>
- /// __m128i _mm_set1_epi64x (long long a); HELPER
+ /// __m128i _mm_set1_epi64x (long long a);
+ /// HELPER
/// </summary>
public static Vector128<ulong> SetAllVector128(ulong value) => SetAllVector128(value);
/// <summary>
- /// __m128d _mm_set1_pd (double a); HELPER
+ /// __m128d _mm_set1_pd (double a);
+ /// HELPER
/// </summary>
public static Vector128<double> SetAllVector128(double value) => SetAllVector128(value);
/// <summary>
- /// __m128i _mm_setzero_si128 (); HELPER: PXOR
- /// __m128d _mm_setzero_pd (void); HELPER: XORPD
+ /// __m128i _mm_setzero_si128 ();
+ /// HELPER: PXOR
+ /// __m128d _mm_setzero_pd (void);
+ /// HELPER: XORPD
/// </summary>
public static Vector128<T> SetZeroVector128<T>() where T : struct
{
@@ -886,542 +1084,670 @@ namespace System.Runtime.Intrinsics.X86
}
/// <summary>
- /// __m128i _mm_sad_epu8 (__m128i a, __m128i b); PSADBW xmm, xmm/m128
+ /// __m128i _mm_sad_epu8 (__m128i a, __m128i b);
+ /// PSADBW xmm, xmm/m128
/// </summary>
public static Vector128<long> SumAbsoluteDifferences(Vector128<byte> left, Vector128<byte> right) => SumAbsoluteDifferences(left, right);
/// <summary>
- /// __m128i _mm_shuffle_epi32 (__m128i a, int immediate); PSHUFD xmm, xmm/m128, imm8
+ /// __m128i _mm_shuffle_epi32 (__m128i a, int immediate);
+ /// PSHUFD xmm, xmm/m128, imm8
/// </summary>
public static Vector128<int> Shuffle(Vector128<int> value, byte control) => Shuffle(value, control);
/// <summary>
- /// __m128i _mm_shuffle_epi32 (__m128i a, int immediate); PSHUFD xmm, xmm/m128, imm8
+ /// __m128i _mm_shuffle_epi32 (__m128i a, int immediate);
+ /// PSHUFD xmm, xmm/m128, imm8
/// </summary>
public static Vector128<uint> Shuffle(Vector128<uint> value, byte control) => Shuffle(value, control);
/// <summary>
- /// __m128d _mm_shuffle_pd (__m128d a, __m128d b, int immediate); SHUFPD xmm, xmm/m128, imm8
+ /// __m128d _mm_shuffle_pd (__m128d a, __m128d b, int immediate);
+ /// SHUFPD xmm, xmm/m128, imm8
/// </summary>
public static Vector128<double> Shuffle(Vector128<double> left, Vector128<double> right, byte control) => Shuffle(left, right, control);
/// <summary>
- /// __m128i _mm_shufflehi_epi16 (__m128i a, int immediate); PSHUFHW xmm, xmm/m128, imm8
+ /// __m128i _mm_shufflehi_epi16 (__m128i a, int immediate);
+ /// PSHUFHW xmm, xmm/m128, imm8
/// </summary>
public static Vector128<short> ShuffleHigh(Vector128<short> value, byte control) => ShuffleHigh(value, control);
/// <summary>
- /// __m128i _mm_shufflehi_epi16 (__m128i a, int control); PSHUFHW xmm, xmm/m128, imm8
+ /// __m128i _mm_shufflehi_epi16 (__m128i a, int control);
+ /// PSHUFHW xmm, xmm/m128, imm8
/// </summary>
public static Vector128<ushort> ShuffleHigh(Vector128<ushort> value, byte control) => ShuffleHigh(value, control);
/// <summary>
- /// __m128i _mm_shufflelo_epi16 (__m128i a, int control); PSHUFLW xmm, xmm/m128, imm8
+ /// __m128i _mm_shufflelo_epi16 (__m128i a, int control);
+ /// PSHUFLW xmm, xmm/m128, imm8
/// </summary>
public static Vector128<short> ShuffleLow(Vector128<short> value, byte control) => ShuffleLow(value, control);
/// <summary>
- /// __m128i _mm_shufflelo_epi16 (__m128i a, int control); PSHUFLW xmm, xmm/m128, imm8
+ /// __m128i _mm_shufflelo_epi16 (__m128i a, int control);
+ /// PSHUFLW xmm, xmm/m128, imm8
/// </summary>
public static Vector128<ushort> ShuffleLow(Vector128<ushort> value, byte control) => ShuffleLow(value, control);
/// <summary>
- /// __m128i _mm_sll_epi16 (__m128i a, __m128i count); PSLLW xmm, xmm/m128
+ /// __m128i _mm_sll_epi16 (__m128i a, __m128i count);
+ /// PSLLW xmm, xmm/m128
/// </summary>
public static Vector128<short> ShiftLeftLogical(Vector128<short> value, Vector128<short> count) => ShiftLeftLogical(value, count);
/// <summary>
- /// __m128i _mm_sll_epi16 (__m128i a, __m128i count); PSLLW xmm, xmm/m128
+ /// __m128i _mm_sll_epi16 (__m128i a, __m128i count);
+ /// PSLLW xmm, xmm/m128
/// </summary>
public static Vector128<ushort> ShiftLeftLogical(Vector128<ushort> value, Vector128<ushort> count) => ShiftLeftLogical(value, count);
/// <summary>
- /// __m128i _mm_sll_epi32 (__m128i a, __m128i count); PSLLD xmm, xmm/m128
+ /// __m128i _mm_sll_epi32 (__m128i a, __m128i count);
+ /// PSLLD xmm, xmm/m128
/// </summary>
public static Vector128<int> ShiftLeftLogical(Vector128<int> value, Vector128<int> count) => ShiftLeftLogical(value, count);
/// <summary>
- /// __m128i _mm_sll_epi32 (__m128i a, __m128i count); PSLLD xmm, xmm/m128
+ /// __m128i _mm_sll_epi32 (__m128i a, __m128i count);
+ /// PSLLD xmm, xmm/m128
/// </summary>
public static Vector128<uint> ShiftLeftLogical(Vector128<uint> value, Vector128<uint> count) => ShiftLeftLogical(value, count);
/// <summary>
- /// __m128i _mm_sll_epi64 (__m128i a, __m128i count); PSLLQ xmm, xmm/m128
+ /// __m128i _mm_sll_epi64 (__m128i a, __m128i count);
+ /// PSLLQ xmm, xmm/m128
/// </summary>
public static Vector128<long> ShiftLeftLogical(Vector128<long> value, Vector128<long> count) => ShiftLeftLogical(value, count);
/// <summary>
- /// __m128i _mm_sll_epi64 (__m128i a, __m128i count); PSLLQ xmm, xmm/m128
+ /// __m128i _mm_sll_epi64 (__m128i a, __m128i count);
+ /// PSLLQ xmm, xmm/m128
/// </summary>
public static Vector128<ulong> ShiftLeftLogical(Vector128<ulong> value, Vector128<ulong> count) => ShiftLeftLogical(value, count);
/// <summary>
- /// __m128i _mm_slli_epi16 (__m128i a, int immediate); PSLLW xmm, imm8
+ /// __m128i _mm_slli_epi16 (__m128i a, int immediate);
+ /// PSLLW xmm, imm8
/// </summary>
public static Vector128<short> ShiftLeftLogical(Vector128<short> value, byte count) => ShiftLeftLogical(value, count);
/// <summary>
- /// __m128i _mm_slli_epi16 (__m128i a, int immediate); PSLLW xmm, imm8
+ /// __m128i _mm_slli_epi16 (__m128i a, int immediate);
+ /// PSLLW xmm, imm8
/// </summary>
public static Vector128<ushort> ShiftLeftLogical(Vector128<ushort> value, byte count) => ShiftLeftLogical(value, count);
/// <summary>
- /// __m128i _mm_slli_epi32 (__m128i a, int immediate); PSLLD xmm, imm8
+ /// __m128i _mm_slli_epi32 (__m128i a, int immediate);
+ /// PSLLD xmm, imm8
/// </summary>
public static Vector128<int> ShiftLeftLogical(Vector128<int> value, byte count) => ShiftLeftLogical(value, count);
/// <summary>
- /// __m128i _mm_slli_epi32 (__m128i a, int immediate); PSLLD xmm, imm8
+ /// __m128i _mm_slli_epi32 (__m128i a, int immediate);
+ /// PSLLD xmm, imm8
/// </summary>
public static Vector128<uint> ShiftLeftLogical(Vector128<uint> value, byte count) => ShiftLeftLogical(value, count);
/// <summary>
- /// __m128i _mm_slli_epi64 (__m128i a, int immediate); PSLLQ xmm, imm8
+ /// __m128i _mm_slli_epi64 (__m128i a, int immediate);
+ /// PSLLQ xmm, imm8
/// </summary>
public static Vector128<long> ShiftLeftLogical(Vector128<long> value, byte count) => ShiftLeftLogical(value, count);
/// <summary>
- /// __m128i _mm_slli_epi64 (__m128i a, int immediate); PSLLQ xmm, imm8
+ /// __m128i _mm_slli_epi64 (__m128i a, int immediate);
+ /// PSLLQ xmm, imm8
/// </summary>
public static Vector128<ulong> ShiftLeftLogical(Vector128<ulong> value, byte count) => ShiftLeftLogical(value, count);
/// <summary>
- /// __m128i _mm_bslli_si128 (__m128i a, int imm8); PSLLDQ xmm, imm8
+ /// __m128i _mm_bslli_si128 (__m128i a, int imm8);
+ /// PSLLDQ xmm, imm8
/// </summary>
public static Vector128<sbyte> ShiftLeftLogical128BitLane(Vector128<sbyte> value, byte numBytes) => ShiftLeftLogical128BitLane(value, numBytes);
/// <summary>
- /// __m128i _mm_bslli_si128 (__m128i a, int imm8); PSLLDQ xmm, imm8
+ /// __m128i _mm_bslli_si128 (__m128i a, int imm8);
+ /// PSLLDQ xmm, imm8
/// </summary>
public static Vector128<byte> ShiftLeftLogical128BitLane(Vector128<byte> value, byte numBytes) => ShiftLeftLogical128BitLane(value, numBytes);
/// <summary>
- /// __m128i _mm_bslli_si128 (__m128i a, int imm8); PSLLDQ xmm, imm8
+ /// __m128i _mm_bslli_si128 (__m128i a, int imm8);
+ /// PSLLDQ xmm, imm8
/// </summary>
public static Vector128<short> ShiftLeftLogical128BitLane(Vector128<short> value, byte numBytes) => ShiftLeftLogical128BitLane(value, numBytes);
/// <summary>
- /// __m128i _mm_bslli_si128 (__m128i a, int imm8); PSLLDQ xmm, imm8
+ /// __m128i _mm_bslli_si128 (__m128i a, int imm8);
+ /// PSLLDQ xmm, imm8
/// </summary>
public static Vector128<ushort> ShiftLeftLogical128BitLane(Vector128<ushort> value, byte numBytes) => ShiftLeftLogical128BitLane(value, numBytes);
/// <summary>
- /// __m128i _mm_bslli_si128 (__m128i a, int imm8); PSLLDQ xmm, imm8
+ /// __m128i _mm_bslli_si128 (__m128i a, int imm8);
+ /// PSLLDQ xmm, imm8
/// </summary>
public static Vector128<int> ShiftLeftLogical128BitLane(Vector128<int> value, byte numBytes) => ShiftLeftLogical128BitLane(value, numBytes);
/// <summary>
- /// __m128i _mm_bslli_si128 (__m128i a, int imm8); PSLLDQ xmm, imm8
+ /// __m128i _mm_bslli_si128 (__m128i a, int imm8);
+ /// PSLLDQ xmm, imm8
/// </summary>
public static Vector128<uint> ShiftLeftLogical128BitLane(Vector128<uint> value, byte numBytes) => ShiftLeftLogical128BitLane(value, numBytes);
/// <summary>
- /// __m128i _mm_bslli_si128 (__m128i a, int imm8); PSLLDQ xmm, imm8
+ /// __m128i _mm_bslli_si128 (__m128i a, int imm8);
+ /// PSLLDQ xmm, imm8
/// </summary>
public static Vector128<long> ShiftLeftLogical128BitLane(Vector128<long> value, byte numBytes) => ShiftLeftLogical128BitLane(value, numBytes);
/// <summary>
- /// __m128i _mm_bslli_si128 (__m128i a, int imm8); PSLLDQ xmm, imm8
+ /// __m128i _mm_bslli_si128 (__m128i a, int imm8);
+ /// PSLLDQ xmm, imm8
/// </summary>
public static Vector128<ulong> ShiftLeftLogical128BitLane(Vector128<ulong> value, byte numBytes) => ShiftLeftLogical128BitLane(value, numBytes);
/// <summary>
- /// __m128i _mm_sra_epi16 (__m128i a, __m128i count); PSRAW xmm, xmm/m128
+ /// __m128i _mm_sra_epi16 (__m128i a, __m128i count);
+ /// PSRAW xmm, xmm/m128
/// </summary>
public static Vector128<short> ShiftRightArithmetic(Vector128<short> value, Vector128<short> count) => ShiftRightArithmetic(value, count);
/// <summary>
- /// __m128i _mm_sra_epi32 (__m128i a, __m128i count); PSRAD xmm, xmm/m128
+ /// __m128i _mm_sra_epi32 (__m128i a, __m128i count);
+ /// PSRAD xmm, xmm/m128
/// </summary>
public static Vector128<int> ShiftRightArithmetic(Vector128<int> value, Vector128<int> count) => ShiftRightArithmetic(value, count);
/// <summary>
- /// __m128i _mm_srai_epi16 (__m128i a, int immediate); PSRAW xmm, imm8
+ /// __m128i _mm_srai_epi16 (__m128i a, int immediate);
+ /// PSRAW xmm, imm8
/// </summary>
public static Vector128<short> ShiftRightArithmetic(Vector128<short> value, byte count) => ShiftRightArithmetic(value, count);
/// <summary>
- /// __m128i _mm_srai_epi32 (__m128i a, int immediate); PSRAD xmm, imm8
+ /// __m128i _mm_srai_epi32 (__m128i a, int immediate);
+ /// PSRAD xmm, imm8
/// </summary>
public static Vector128<int> ShiftRightArithmetic(Vector128<int> value, byte count) => ShiftRightArithmetic(value, count);
/// <summary>
- /// __m128i _mm_srl_epi16 (__m128i a, __m128i count); PSRLW xmm, xmm/m128
+ /// __m128i _mm_srl_epi16 (__m128i a, __m128i count);
+ /// PSRLW xmm, xmm/m128
/// </summary>
public static Vector128<short> ShiftRightLogical(Vector128<short> value, Vector128<short> count) => ShiftRightLogical(value, count);
/// <summary>
- /// __m128i _mm_srl_epi16 (__m128i a, __m128i count); PSRLW xmm, xmm/m128
+ /// __m128i _mm_srl_epi16 (__m128i a, __m128i count);
+ /// PSRLW xmm, xmm/m128
/// </summary>
public static Vector128<ushort> ShiftRightLogical(Vector128<ushort> value, Vector128<ushort> count) => ShiftRightLogical(value, count);
/// <summary>
- /// __m128i _mm_srl_epi32 (__m128i a, __m128i count); PSRLD xmm, xmm/m128
+ /// __m128i _mm_srl_epi32 (__m128i a, __m128i count);
+ /// PSRLD xmm, xmm/m128
/// </summary>
public static Vector128<int> ShiftRightLogical(Vector128<int> value, Vector128<int> count) => ShiftRightLogical(value, count);
/// <summary>
- /// __m128i _mm_srl_epi32 (__m128i a, __m128i count); PSRLD xmm, xmm/m128
+ /// __m128i _mm_srl_epi32 (__m128i a, __m128i count);
+ /// PSRLD xmm, xmm/m128
/// </summary>
public static Vector128<uint> ShiftRightLogical(Vector128<uint> value, Vector128<uint> count) => ShiftRightLogical(value, count);
/// <summary>
- /// __m128i _mm_srl_epi64 (__m128i a, __m128i count); PSRLQ xmm, xmm/m128
+ /// __m128i _mm_srl_epi64 (__m128i a, __m128i count);
+ /// PSRLQ xmm, xmm/m128
/// </summary>
public static Vector128<long> ShiftRightLogical(Vector128<long> value, Vector128<long> count) => ShiftRightLogical(value, count);
/// <summary>
- /// __m128i _mm_srl_epi64 (__m128i a, __m128i count); PSRLQ xmm, xmm/m128
+ /// __m128i _mm_srl_epi64 (__m128i a, __m128i count);
+ /// PSRLQ xmm, xmm/m128
/// </summary>
public static Vector128<ulong> ShiftRightLogical(Vector128<ulong> value, Vector128<ulong> count) => ShiftRightLogical(value, count);
/// <summary>
- /// __m128i _mm_srli_epi16 (__m128i a, int immediate); PSRLW xmm, imm8
+ /// __m128i _mm_srli_epi16 (__m128i a, int immediate);
+ /// PSRLW xmm, imm8
/// </summary>
public static Vector128<short> ShiftRightLogical(Vector128<short> value, byte count) => ShiftRightLogical(value, count);
/// <summary>
- /// __m128i _mm_srli_epi16 (__m128i a, int immediate); PSRLW xmm, imm8
+ /// __m128i _mm_srli_epi16 (__m128i a, int immediate);
+ /// PSRLW xmm, imm8
/// </summary>
public static Vector128<ushort> ShiftRightLogical(Vector128<ushort> value, byte count) => ShiftRightLogical(value, count);
/// <summary>
- /// __m128i _mm_srli_epi32 (__m128i a, int immediate); PSRLD xmm, imm8
+ /// __m128i _mm_srli_epi32 (__m128i a, int immediate);
+ /// PSRLD xmm, imm8
/// </summary>
public static Vector128<int> ShiftRightLogical(Vector128<int> value, byte count) => ShiftRightLogical(value, count);
/// <summary>
- /// __m128i _mm_srli_epi32 (__m128i a, int immediate); PSRLD xmm, imm8
+ /// __m128i _mm_srli_epi32 (__m128i a, int immediate);
+ /// PSRLD xmm, imm8
/// </summary>
public static Vector128<uint> ShiftRightLogical(Vector128<uint> value, byte count) => ShiftRightLogical(value, count);
/// <summary>
- /// __m128i _mm_srli_epi64 (__m128i a, int immediate); PSRLQ xmm, imm8
+ /// __m128i _mm_srli_epi64 (__m128i a, int immediate);
+ /// PSRLQ xmm, imm8
/// </summary>
public static Vector128<long> ShiftRightLogical(Vector128<long> value, byte count) => ShiftRightLogical(value, count);
/// <summary>
- /// __m128i _mm_srli_epi64 (__m128i a, int immediate); PSRLQ xmm, imm8
+ /// __m128i _mm_srli_epi64 (__m128i a, int immediate);
+ /// PSRLQ xmm, imm8
/// </summary>
public static Vector128<ulong> ShiftRightLogical(Vector128<ulong> value, byte count) => ShiftRightLogical(value, count);
/// <summary>
- /// __m128i _mm_bsrli_si128 (__m128i a, int imm8); PSRLDQ xmm, imm8
+ /// __m128i _mm_bsrli_si128 (__m128i a, int imm8);
+ /// PSRLDQ xmm, imm8
/// </summary>
public static Vector128<sbyte> ShiftRightLogical128BitLane(Vector128<sbyte> value, byte numBytes) => ShiftRightLogical128BitLane(value, numBytes);
/// <summary>
- /// __m128i _mm_bsrli_si128 (__m128i a, int imm8); PSRLDQ xmm, imm8
+ /// __m128i _mm_bsrli_si128 (__m128i a, int imm8);
+ /// PSRLDQ xmm, imm8
/// </summary>
public static Vector128<byte> ShiftRightLogical128BitLane(Vector128<byte> value, byte numBytes) => ShiftRightLogical128BitLane(value, numBytes);
/// <summary>
- /// __m128i _mm_bsrli_si128 (__m128i a, int imm8); PSRLDQ xmm, imm8
+ /// __m128i _mm_bsrli_si128 (__m128i a, int imm8);
+ /// PSRLDQ xmm, imm8
/// </summary>
public static Vector128<short> ShiftRightLogical128BitLane(Vector128<short> value, byte numBytes) => ShiftRightLogical128BitLane(value, numBytes);
/// <summary>
- /// __m128i _mm_bsrli_si128 (__m128i a, int imm8); PSRLDQ xmm, imm8
+ /// __m128i _mm_bsrli_si128 (__m128i a, int imm8);
+ /// PSRLDQ xmm, imm8
/// </summary>
public static Vector128<ushort> ShiftRightLogical128BitLane(Vector128<ushort> value, byte numBytes) => ShiftRightLogical128BitLane(value, numBytes);
/// <summary>
- /// __m128i _mm_bsrli_si128 (__m128i a, int imm8); PSRLDQ xmm, imm8
+ /// __m128i _mm_bsrli_si128 (__m128i a, int imm8);
+ /// PSRLDQ xmm, imm8
/// </summary>
public static Vector128<int> ShiftRightLogical128BitLane(Vector128<int> value, byte numBytes) => ShiftRightLogical128BitLane(value, numBytes);
/// <summary>
- /// __m128i _mm_bsrli_si128 (__m128i a, int imm8); PSRLDQ xmm, imm8
+ /// __m128i _mm_bsrli_si128 (__m128i a, int imm8);
+ /// PSRLDQ xmm, imm8
/// </summary>
public static Vector128<uint> ShiftRightLogical128BitLane(Vector128<uint> value, byte numBytes) => ShiftRightLogical128BitLane(value, numBytes);
/// <summary>
- /// __m128i _mm_bsrli_si128 (__m128i a, int imm8); PSRLDQ xmm, imm8
+ /// __m128i _mm_bsrli_si128 (__m128i a, int imm8);
+ /// PSRLDQ xmm, imm8
/// </summary>
public static Vector128<long> ShiftRightLogical128BitLane(Vector128<long> value, byte numBytes) => ShiftRightLogical128BitLane(value, numBytes);
/// <summary>
- /// __m128i _mm_bsrli_si128 (__m128i a, int imm8); PSRLDQ xmm, imm8
+ /// __m128i _mm_bsrli_si128 (__m128i a, int imm8);
+ /// PSRLDQ xmm, imm8
/// </summary>
public static Vector128<ulong> ShiftRightLogical128BitLane(Vector128<ulong> value, byte numBytes) => ShiftRightLogical128BitLane(value, numBytes);
/// <summary>
- /// __m128d _mm_sqrt_pd (__m128d a); SQRTPD xmm, xmm/m128
+ /// __m128d _mm_sqrt_pd (__m128d a);
+ /// SQRTPD xmm, xmm/m128
/// </summary>
public static Vector128<double> Sqrt(Vector128<double> value) => Sqrt(value);
/// <summary>
- /// __m128d _mm_sqrt_sd (__m128d a); SQRTSD xmm, xmm/64
+ /// __m128d _mm_sqrt_sd (__m128d a);
+ /// SQRTSD xmm, xmm/64
/// The above native signature does not exist. We provide this additional overload for the recommended use case of this intrinsic.
/// </summary>
public static Vector128<double> SqrtScalar(Vector128<double> value) => SqrtScalar(value);
/// <summary>
- /// __m128d _mm_sqrt_sd (__m128d a, __m128d b); SQRTSD xmm, xmm/64
+ /// __m128d _mm_sqrt_sd (__m128d a, __m128d b);
+ /// SQRTSD xmm, xmm/64
/// </summary>
public static Vector128<double> SqrtScalar(Vector128<double> upper, Vector128<double> value) => SqrtScalar(upper, value);
/// <summary>
- /// void _mm_store_sd (double* mem_addr, __m128d a); MOVSD m64, xmm
+ /// void _mm_store_sd (double* mem_addr, __m128d a);
+ /// MOVSD m64, xmm
/// </summary>
public static unsafe void StoreScalar(double* address, Vector128<double> source) => StoreScalar(address, source);
/// <summary>
- /// void _mm_store_si128 (__m128i* mem_addr, __m128i a); MOVDQA m128, xmm
+ /// void _mm_store_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQA m128, xmm
/// </summary>
public static unsafe void StoreAligned(sbyte* address, Vector128<sbyte> source) => StoreAligned(address, source);
/// <summary>
- /// void _mm_store_si128 (__m128i* mem_addr, __m128i a); MOVDQA m128, xmm
+ /// void _mm_store_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQA m128, xmm
/// </summary>
public static unsafe void StoreAligned(byte* address, Vector128<byte> source) => StoreAligned(address, source);
/// <summary>
- /// void _mm_store_si128 (__m128i* mem_addr, __m128i a); MOVDQA m128, xmm
+ /// void _mm_store_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQA m128, xmm
/// </summary>
public static unsafe void StoreAligned(short* address, Vector128<short> source) => StoreAligned(address, source);
/// <summary>
- /// void _mm_store_si128 (__m128i* mem_addr, __m128i a); MOVDQA m128, xmm
+ /// void _mm_store_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQA m128, xmm
/// </summary>
public static unsafe void StoreAligned(ushort* address, Vector128<ushort> source) => StoreAligned(address, source);
/// <summary>
- /// void _mm_store_si128 (__m128i* mem_addr, __m128i a); MOVDQA m128, xmm
+ /// void _mm_store_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQA m128, xmm
/// </summary>
public static unsafe void StoreAligned(int* address, Vector128<int> source) => StoreAligned(address, source);
/// <summary>
- /// void _mm_store_si128 (__m128i* mem_addr, __m128i a); MOVDQA m128, xmm
+ /// void _mm_store_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQA m128, xmm
/// </summary>
public static unsafe void StoreAligned(uint* address, Vector128<uint> source) => StoreAligned(address, source);
/// <summary>
- /// void _mm_store_si128 (__m128i* mem_addr, __m128i a); MOVDQA m128, xmm
+ /// void _mm_store_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQA m128, xmm
/// </summary>
public static unsafe void StoreAligned(long* address, Vector128<long> source) => StoreAligned(address, source);
/// <summary>
- /// void _mm_store_si128 (__m128i* mem_addr, __m128i a); MOVDQA m128, xmm
+ /// void _mm_store_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQA m128, xmm
/// </summary>
public static unsafe void StoreAligned(ulong* address, Vector128<ulong> source) => StoreAligned(address, source);
/// <summary>
- /// void _mm_store_pd (double* mem_addr, __m128d a); MOVAPD m128, xmm
+ /// void _mm_store_pd (double* mem_addr, __m128d a);
+ /// MOVAPD m128, xmm
/// </summary>
public static unsafe void StoreAligned(double* address, Vector128<double> source) => StoreAligned(address, source);
/// <summary>
- /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a); MOVNTDQ m128, xmm
+ /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVNTDQ m128, xmm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(sbyte* address, Vector128<sbyte> source) => StoreAlignedNonTemporal(address, source);
/// <summary>
- /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a); MOVNTDQ m128, xmm
+ /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVNTDQ m128, xmm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(byte* address, Vector128<byte> source) => StoreAlignedNonTemporal(address, source);
/// <summary>
- /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a); MOVNTDQ m128, xmm
+ /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVNTDQ m128, xmm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(short* address, Vector128<short> source) => StoreAlignedNonTemporal(address, source);
/// <summary>
- /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a); MOVNTDQ m128, xmm
+ /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVNTDQ m128, xmm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(ushort* address, Vector128<ushort> source) => StoreAlignedNonTemporal(address, source);
/// <summary>
- /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a); MOVNTDQ m128, xmm
+ /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVNTDQ m128, xmm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(int* address, Vector128<int> source) => StoreAlignedNonTemporal(address, source);
/// <summary>
- /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a); MOVNTDQ m128, xmm
+ /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVNTDQ m128, xmm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(uint* address, Vector128<uint> source) => StoreAlignedNonTemporal(address, source);
/// <summary>
- /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a); MOVNTDQ m128, xmm
+ /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVNTDQ m128, xmm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(long* address, Vector128<long> source) => StoreAlignedNonTemporal(address, source);
/// <summary>
- /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a); MOVNTDQ m128, xmm
+ /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVNTDQ m128, xmm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(ulong* address, Vector128<ulong> source) => StoreAlignedNonTemporal(address, source);
/// <summary>
- /// void _mm_stream_pd (double* mem_addr, __m128d a); MOVNTPD m128, xmm
+ /// void _mm_stream_pd (double* mem_addr, __m128d a);
+ /// MOVNTPD m128, xmm
/// </summary>
public static unsafe void StoreAlignedNonTemporal(double* address, Vector128<double> source) => StoreAlignedNonTemporal(address, source);
/// <summary>
- /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a); MOVDQU m128, xmm
+ /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQU m128, xmm
/// </summary>
public static unsafe void Store(sbyte* address, Vector128<sbyte> source) => Store(address, source);
/// <summary>
- /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a); MOVDQU m128, xmm
+ /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQU m128, xmm
/// </summary>
public static unsafe void Store(byte* address, Vector128<byte> source) => Store(address, source);
/// <summary>
- /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a); MOVDQU m128, xmm
+ /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQU m128, xmm
/// </summary>
public static unsafe void Store(short* address, Vector128<short> source) => Store(address, source);
/// <summary>
- /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a); MOVDQU m128, xmm
+ /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQU m128, xmm
/// </summary>
public static unsafe void Store(ushort* address, Vector128<ushort> source) => Store(address, source);
/// <summary>
- /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a); MOVDQU m128, xmm
+ /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQU m128, xmm
/// </summary>
public static unsafe void Store(int* address, Vector128<int> source) => Store(address, source);
/// <summary>
- /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a); MOVDQU m128, xmm
+ /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQU m128, xmm
/// </summary>
public static unsafe void Store(uint* address, Vector128<uint> source) => Store(address, source);
/// <summary>
- /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a); MOVDQU m128, xmm
+ /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQU m128, xmm
/// </summary>
public static unsafe void Store(long* address, Vector128<long> source) => Store(address, source);
/// <summary>
- /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a); MOVDQU m128, xmm
+ /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a);
+ /// MOVDQU m128, xmm
/// </summary>
public static unsafe void Store(ulong* address, Vector128<ulong> source) => Store(address, source);
/// <summary>
- /// void _mm_storeu_pd (double* mem_addr, __m128d a); MOVUPD m128, xmm
+ /// void _mm_storeu_pd (double* mem_addr, __m128d a);
+ /// MOVUPD m128, xmm
/// </summary>
public static unsafe void Store(double* address, Vector128<double> source) => Store(address, source);
/// <summary>
- /// void _mm_storeh_pd (double* mem_addr, __m128d a); MOVHPD m64, xmm
+ /// void _mm_storeh_pd (double* mem_addr, __m128d a);
+ /// MOVHPD m64, xmm
/// </summary>
public static unsafe void StoreHigh(double* address, Vector128<double> source) => StoreHigh(address, source);
/// <summary>
- /// void _mm_storel_epi64 (__m128i* mem_addr, __m128i a); MOVQ m64, xmm
+ /// void _mm_storel_epi64 (__m128i* mem_addr, __m128i a);
+ /// MOVQ m64, xmm
/// </summary>
public static unsafe void StoreLow(long* address, Vector128<long> source) => StoreLow(address, source);
/// <summary>
- /// void _mm_storel_epi64 (__m128i* mem_addr, __m128i a); MOVQ m64, xmm
+ /// void _mm_storel_epi64 (__m128i* mem_addr, __m128i a);
+ /// MOVQ m64, xmm
/// </summary>
public static unsafe void StoreLow(ulong* address, Vector128<ulong> source) => StoreLow(address, source);
/// <summary>
- /// void _mm_storel_pd (double* mem_addr, __m128d a); MOVLPD m64, xmm
+ /// void _mm_storel_pd (double* mem_addr, __m128d a);
+ /// MOVLPD m64, xmm
/// </summary>
public static unsafe void StoreLow(double* address, Vector128<double> source) => StoreLow(address, source);
/// <summary>
- /// __m128i _mm_sub_epi8 (__m128i a, __m128i b); PSUBB xmm, xmm/m128
+ /// __m128i _mm_sub_epi8 (__m128i a, __m128i b);
+ /// PSUBB xmm, xmm/m128
/// </summary>
public static Vector128<byte> Subtract(Vector128<byte> left, Vector128<byte> right) => Subtract(left, right);
/// <summary>
- /// __m128i _mm_sub_epi8 (__m128i a, __m128i b); PSUBB xmm, xmm/m128
+ /// __m128i _mm_sub_epi8 (__m128i a, __m128i b);
+ /// PSUBB xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> Subtract(Vector128<sbyte> left, Vector128<sbyte> right) => Subtract(left, right);
/// <summary>
- /// __m128i _mm_sub_epi16 (__m128i a, __m128i b); PSUBW xmm, xmm/m128
+ /// __m128i _mm_sub_epi16 (__m128i a, __m128i b);
+ /// PSUBW xmm, xmm/m128
/// </summary>
public static Vector128<short> Subtract(Vector128<short> left, Vector128<short> right) => Subtract(left, right);
/// <summary>
- /// __m128i _mm_sub_epi16 (__m128i a, __m128i b); PSUBW xmm, xmm/m128
+ /// __m128i _mm_sub_epi16 (__m128i a, __m128i b);
+ /// PSUBW xmm, xmm/m128
/// </summary>
public static Vector128<ushort> Subtract(Vector128<ushort> left, Vector128<ushort> right) => Subtract(left, right);
/// <summary>
- /// __m128i _mm_sub_epi32 (__m128i a, __m128i b); PSUBD xmm, xmm/m128
+ /// __m128i _mm_sub_epi32 (__m128i a, __m128i b);
+ /// PSUBD xmm, xmm/m128
/// </summary>
public static Vector128<int> Subtract(Vector128<int> left, Vector128<int> right) => Subtract(left, right);
/// <summary>
- /// __m128i _mm_sub_epi32 (__m128i a, __m128i b); PSUBD xmm, xmm/m128
+ /// __m128i _mm_sub_epi32 (__m128i a, __m128i b);
+ /// PSUBD xmm, xmm/m128
/// </summary>
public static Vector128<uint> Subtract(Vector128<uint> left, Vector128<uint> right) => Subtract(left, right);
/// <summary>
- /// __m128i _mm_sub_epi64 (__m128i a, __m128i b); PSUBQ xmm, xmm/m128
+ /// __m128i _mm_sub_epi64 (__m128i a, __m128i b);
+ /// PSUBQ xmm, xmm/m128
/// </summary>
public static Vector128<long> Subtract(Vector128<long> left, Vector128<long> right) => Subtract(left, right);
/// <summary>
- /// __m128i _mm_sub_epi64 (__m128i a, __m128i b); PSUBQ xmm, xmm/m128
+ /// __m128i _mm_sub_epi64 (__m128i a, __m128i b);
+ /// PSUBQ xmm, xmm/m128
/// </summary>
public static Vector128<ulong> Subtract(Vector128<ulong> left, Vector128<ulong> right) => Subtract(left, right);
/// <summary>
- /// __m128d _mm_sub_pd (__m128d a, __m128d b); SUBPD xmm, xmm/m128
+ /// __m128d _mm_sub_pd (__m128d a, __m128d b);
+ /// SUBPD xmm, xmm/m128
/// </summary>
public static Vector128<double> Subtract(Vector128<double> left, Vector128<double> right) => Subtract(left, right);
/// <summary>
- /// __m128d _mm_sub_sd (__m128d a, __m128d b); SUBSD xmm, xmm/m64
+ /// __m128d _mm_sub_sd (__m128d a, __m128d b);
+ /// SUBSD xmm, xmm/m64
/// </summary>
public static Vector128<double> SubtractScalar(Vector128<double> left, Vector128<double> right) => SubtractScalar(left, right);
/// <summary>
- /// __m128i _mm_subs_epi8 (__m128i a, __m128i b); PSUBSB xmm, xmm/m128
+ /// __m128i _mm_subs_epi8 (__m128i a, __m128i b);
+ /// PSUBSB xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> SubtractSaturate(Vector128<sbyte> left, Vector128<sbyte> right) => SubtractSaturate(left, right);
/// <summary>
- /// __m128i _mm_subs_epi16 (__m128i a, __m128i b); PSUBSW xmm, xmm/m128
+ /// __m128i _mm_subs_epi16 (__m128i a, __m128i b);
+ /// PSUBSW xmm, xmm/m128
/// </summary>
public static Vector128<short> SubtractSaturate(Vector128<short> left, Vector128<short> right) => SubtractSaturate(left, right);
/// <summary>
- /// __m128i _mm_subs_epu8 (__m128i a, __m128i b); PSUBUSB xmm, xmm/m128
+ /// __m128i _mm_subs_epu8 (__m128i a, __m128i b);
+ /// PSUBUSB xmm, xmm/m128
/// </summary>
public static Vector128<byte> SubtractSaturate(Vector128<byte> left, Vector128<byte> right) => SubtractSaturate(left, right);
/// <summary>
- /// __m128i _mm_subs_epu16 (__m128i a, __m128i b); PSUBUSW xmm, xmm/m128
+ /// __m128i _mm_subs_epu16 (__m128i a, __m128i b);
+ /// PSUBUSW xmm, xmm/m128
/// </summary>
public static Vector128<ushort> SubtractSaturate(Vector128<ushort> left, Vector128<ushort> right) => SubtractSaturate(left, right);
/// <summary>
- /// __m128i _mm_unpackhi_epi8 (__m128i a, __m128i b); PUNPCKHBW xmm, xmm/m128
+ /// __m128i _mm_unpackhi_epi8 (__m128i a, __m128i b);
+ /// PUNPCKHBW xmm, xmm/m128
/// </summary>
public static Vector128<byte> UnpackHigh(Vector128<byte> left, Vector128<byte> right) => UnpackHigh(left, right);
/// <summary>
- /// __m128i _mm_unpackhi_epi8 (__m128i a, __m128i b); PUNPCKHBW xmm, xmm/m128
+ /// __m128i _mm_unpackhi_epi8 (__m128i a, __m128i b);
+ /// PUNPCKHBW xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> UnpackHigh(Vector128<sbyte> left, Vector128<sbyte> right) => UnpackHigh(left, right);
/// <summary>
- /// __m128i _mm_unpackhi_epi16 (__m128i a, __m128i b); PUNPCKHWD xmm, xmm/m128
+ /// __m128i _mm_unpackhi_epi16 (__m128i a, __m128i b);
+ /// PUNPCKHWD xmm, xmm/m128
/// </summary>
public static Vector128<short> UnpackHigh(Vector128<short> left, Vector128<short> right) => UnpackHigh(left, right);
/// <summary>
- /// __m128i _mm_unpackhi_epi16 (__m128i a, __m128i b); PUNPCKHWD xmm, xmm/m128
+ /// __m128i _mm_unpackhi_epi16 (__m128i a, __m128i b);
+ /// PUNPCKHWD xmm, xmm/m128
/// </summary
public static Vector128<ushort> UnpackHigh(Vector128<ushort> left, Vector128<ushort> right) => UnpackHigh(left, right);
/// <summary>
- /// __m128i _mm_unpackhi_epi32 (__m128i a, __m128i b); PUNPCKHDQ xmm, xmm/m128
+ /// __m128i _mm_unpackhi_epi32 (__m128i a, __m128i b);
+ /// PUNPCKHDQ xmm, xmm/m128
/// </summary>
public static Vector128<int> UnpackHigh(Vector128<int> left, Vector128<int> right) => UnpackHigh(left, right);
/// <summary>
- /// __m128i _mm_unpackhi_epi32 (__m128i a, __m128i b); PUNPCKHDQ xmm, xmm/m128
+ /// __m128i _mm_unpackhi_epi32 (__m128i a, __m128i b);
+ /// PUNPCKHDQ xmm, xmm/m128
/// </summary>
public static Vector128<uint> UnpackHigh(Vector128<uint> left, Vector128<uint> right) => UnpackHigh(left, right);
/// <summary>
- /// __m128i _mm_unpackhi_epi64 (__m128i a, __m128i b); PUNPCKHQDQ xmm, xmm/m128
+ /// __m128i _mm_unpackhi_epi64 (__m128i a, __m128i b);
+ /// PUNPCKHQDQ xmm, xmm/m128
/// </summary>
public static Vector128<long> UnpackHigh(Vector128<long> left, Vector128<long> right) => UnpackHigh(left, right);
/// <summary>
- /// __m128i _mm_unpackhi_epi64 (__m128i a, __m128i b); PUNPCKHQDQ xmm, xmm/m128
+ /// __m128i _mm_unpackhi_epi64 (__m128i a, __m128i b);
+ /// PUNPCKHQDQ xmm, xmm/m128
/// </summary>
public static Vector128<ulong> UnpackHigh(Vector128<ulong> left, Vector128<ulong> right) => UnpackHigh(left, right);
/// <summary>
- /// __m128d _mm_unpackhi_pd (__m128d a, __m128d b); UNPCKHPD xmm, xmm/m128
+ /// __m128d _mm_unpackhi_pd (__m128d a, __m128d b);
+ /// UNPCKHPD xmm, xmm/m128
/// </summary>
public static Vector128<double> UnpackHigh(Vector128<double> left, Vector128<double> right) => UnpackHigh(left, right);
/// <summary>
- /// __m128i _mm_unpacklo_epi8 (__m128i a, __m128i b); PUNPCKLBW xmm, xmm/m128
+ /// __m128i _mm_unpacklo_epi8 (__m128i a, __m128i b);
+ /// PUNPCKLBW xmm, xmm/m128
/// </summary>
public static Vector128<byte> UnpackLow(Vector128<byte> left, Vector128<byte> right) => UnpackLow(left, right);
/// <summary>
- /// __m128i _mm_unpacklo_epi8 (__m128i a, __m128i b); PUNPCKLBW xmm, xmm/m128
+ /// __m128i _mm_unpacklo_epi8 (__m128i a, __m128i b);
+ /// PUNPCKLBW xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> UnpackLow(Vector128<sbyte> left, Vector128<sbyte> right) => UnpackLow(left, right);
/// <summary>
- /// __m128i _mm_unpacklo_epi16 (__m128i a, __m128i b); PUNPCKLWD xmm, xmm/m128
+ /// __m128i _mm_unpacklo_epi16 (__m128i a, __m128i b);
+ /// PUNPCKLWD xmm, xmm/m128
/// </summary>
public static Vector128<short> UnpackLow(Vector128<short> left, Vector128<short> right) => UnpackLow(left, right);
/// <summary>
- /// __m128i _mm_unpacklo_epi16 (__m128i a, __m128i b); PUNPCKLWD xmm, xmm/m128
+ /// __m128i _mm_unpacklo_epi16 (__m128i a, __m128i b);
+ /// PUNPCKLWD xmm, xmm/m128
/// </summary>
public static Vector128<ushort> UnpackLow(Vector128<ushort> left, Vector128<ushort> right) => UnpackLow(left, right);
/// <summary>
- /// __m128i _mm_unpacklo_epi32 (__m128i a, __m128i b); PUNPCKLDQ xmm, xmm/m128
+ /// __m128i _mm_unpacklo_epi32 (__m128i a, __m128i b);
+ /// PUNPCKLDQ xmm, xmm/m128
/// </summary>
public static Vector128<int> UnpackLow(Vector128<int> left, Vector128<int> right) => UnpackLow(left, right);
/// <summary>
- /// __m128i _mm_unpacklo_epi32 (__m128i a, __m128i b); PUNPCKLDQ xmm, xmm/m128
+ /// __m128i _mm_unpacklo_epi32 (__m128i a, __m128i b);
+ /// PUNPCKLDQ xmm, xmm/m128
/// </summary>
public static Vector128<uint> UnpackLow(Vector128<uint> left, Vector128<uint> right) => UnpackLow(left, right);
/// <summary>
- /// __m128i _mm_unpacklo_epi64 (__m128i a, __m128i b); PUNPCKLQDQ xmm, xmm/m128
+ /// __m128i _mm_unpacklo_epi64 (__m128i a, __m128i b);
+ /// PUNPCKLQDQ xmm, xmm/m128
/// </summary>
public static Vector128<long> UnpackLow(Vector128<long> left, Vector128<long> right) => UnpackLow(left, right);
/// <summary>
- /// __m128i _mm_unpacklo_epi64 (__m128i a, __m128i b); PUNPCKLQDQ xmm, xmm/m128
+ /// __m128i _mm_unpacklo_epi64 (__m128i a, __m128i b);
+ /// PUNPCKLQDQ xmm, xmm/m128
/// </summary>
public static Vector128<ulong> UnpackLow(Vector128<ulong> left, Vector128<ulong> right) => UnpackLow(left, right);
/// <summary>
- /// __m128d _mm_unpacklo_pd (__m128d a, __m128d b); UNPCKLPD xmm, xmm/m128
+ /// __m128d _mm_unpacklo_pd (__m128d a, __m128d b);
+ /// UNPCKLPD xmm, xmm/m128
/// </summary>
public static Vector128<double> UnpackLow(Vector128<double> left, Vector128<double> right) => UnpackLow(left, right);
/// <summary>
- /// __m128i _mm_xor_si128 (__m128i a, __m128i b); PXOR xmm, xmm/m128
+ /// __m128i _mm_xor_si128 (__m128i a, __m128i b);
+ /// PXOR xmm, xmm/m128
/// </summary>
public static Vector128<byte> Xor(Vector128<byte> left, Vector128<byte> right) => Xor(left, right);
/// <summary>
- /// __m128i _mm_xor_si128 (__m128i a, __m128i b); PXOR xmm, xmm/m128
+ /// __m128i _mm_xor_si128 (__m128i a, __m128i b);
+ /// PXOR xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> Xor(Vector128<sbyte> left, Vector128<sbyte> right) => Xor(left, right);
/// <summary>
- /// __m128i _mm_xor_si128 (__m128i a, __m128i b); PXOR xmm, xmm/m128
+ /// __m128i _mm_xor_si128 (__m128i a, __m128i b);
+ /// PXOR xmm, xmm/m128
/// </summary>
public static Vector128<short> Xor(Vector128<short> left, Vector128<short> right) => Xor(left, right);
/// <summary>
- /// __m128i _mm_xor_si128 (__m128i a, __m128i b); PXOR xmm, xmm/m128
+ /// __m128i _mm_xor_si128 (__m128i a, __m128i b);
+ /// PXOR xmm, xmm/m128
/// </summary>
public static Vector128<ushort> Xor(Vector128<ushort> left, Vector128<ushort> right) => Xor(left, right);
/// <summary>
- /// __m128i _mm_xor_si128 (__m128i a, __m128i b); PXOR xmm, xmm/m128
+ /// __m128i _mm_xor_si128 (__m128i a, __m128i b);
+ /// PXOR xmm, xmm/m128
/// </summary>
public static Vector128<int> Xor(Vector128<int> left, Vector128<int> right) => Xor(left, right);
/// <summary>
- /// __m128i _mm_xor_si128 (__m128i a, __m128i b); PXOR xmm, xmm/m128
+ /// __m128i _mm_xor_si128 (__m128i a, __m128i b);
+ /// PXOR xmm, xmm/m128
/// </summary>
public static Vector128<uint> Xor(Vector128<uint> left, Vector128<uint> right) => Xor(left, right);
/// <summary>
- /// __m128i _mm_xor_si128 (__m128i a, __m128i b); PXOR xmm, xmm/m128
+ /// __m128i _mm_xor_si128 (__m128i a, __m128i b);
+ /// PXOR xmm, xmm/m128
/// </summary>
public static Vector128<long> Xor(Vector128<long> left, Vector128<long> right) => Xor(left, right);
/// <summary>
- /// __m128i _mm_xor_si128 (__m128i a, __m128i b); PXOR xmm, xmm/m128
+ /// __m128i _mm_xor_si128 (__m128i a, __m128i b);
+ /// PXOR xmm, xmm/m128
/// </summary>
public static Vector128<ulong> Xor(Vector128<ulong> left, Vector128<ulong> right) => Xor(left, right);
/// <summary>
- /// __m128d _mm_xor_pd (__m128d a, __m128d b); XORPD xmm, xmm/m128
+ /// __m128d _mm_xor_pd (__m128d a, __m128d b);
+ /// XORPD xmm, xmm/m128
/// </summary>
public static Vector128<double> Xor(Vector128<double> left, Vector128<double> right) => Xor(left, right);
}
diff --git a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse3.PlatformNotSupported.cs b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse3.PlatformNotSupported.cs
index f61273bda8..9f9662a399 100644
--- a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse3.PlatformNotSupported.cs
+++ b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse3.PlatformNotSupported.cs
@@ -16,29 +16,35 @@ namespace System.Runtime.Intrinsics.X86
public static bool IsSupported { get { return false; } }
/// <summary>
- /// __m128 _mm_addsub_ps (__m128 a, __m128 b); ADDSUBPS xmm, xmm/m128
+ /// __m128 _mm_addsub_ps (__m128 a, __m128 b);
+ /// ADDSUBPS xmm, xmm/m128
/// </summary>
public static Vector128<float> AddSubtract(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_addsub_pd (__m128d a, __m128d b); ADDSUBPD xmm, xmm/m128
+ /// __m128d _mm_addsub_pd (__m128d a, __m128d b);
+ /// ADDSUBPD xmm, xmm/m128
/// </summary>
public static Vector128<double> AddSubtract(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_hadd_ps (__m128 a, __m128 b); HADDPS xmm, xmm/m128
+ /// __m128 _mm_hadd_ps (__m128 a, __m128 b);
+ /// HADDPS xmm, xmm/m128
/// </summary>
public static Vector128<float> HorizontalAdd(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_hadd_pd (__m128d a, __m128d b); HADDPD xmm, xmm/m128
+ /// __m128d _mm_hadd_pd (__m128d a, __m128d b);
+ /// HADDPD xmm, xmm/m128
/// </summary>
public static Vector128<double> HorizontalAdd(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_hsub_ps (__m128 a, __m128 b); HSUBPS xmm, xmm/m128
+ /// __m128 _mm_hsub_ps (__m128 a, __m128 b);
+ /// HSUBPS xmm, xmm/m128
/// </summary>
public static Vector128<float> HorizontalSubtract(Vector128<float> left, Vector128<float> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_hsub_pd (__m128d a, __m128d b); HSUBPD xmm, xmm/m128
+ /// __m128d _mm_hsub_pd (__m128d a, __m128d b);
+ /// HSUBPD xmm, xmm/m128
/// </summary>
public static Vector128<double> HorizontalSubtract(Vector128<double> left, Vector128<double> right) { throw new PlatformNotSupportedException(); }
@@ -48,7 +54,8 @@ namespace System.Runtime.Intrinsics.X86
public static unsafe Vector128<double> LoadAndDuplicateToVector128(double* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_lddqu_si128 (__m128i const* mem_addr); LDDQU xmm, m128
+ /// __m128i _mm_lddqu_si128 (__m128i const* mem_addr);
+ /// LDDQU xmm, m128
/// </summary>
public static unsafe Vector128<sbyte> LoadDquVector128(sbyte* address) { throw new PlatformNotSupportedException(); }
public static unsafe Vector128<byte> LoadDquVector128(byte* address) { throw new PlatformNotSupportedException(); }
@@ -60,17 +67,20 @@ namespace System.Runtime.Intrinsics.X86
public static unsafe Vector128<ulong> LoadDquVector128(ulong* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_movedup_pd (__m128d a); MOVDDUP xmm, xmm/m64
+ /// __m128d _mm_movedup_pd (__m128d a);
+ /// MOVDDUP xmm, xmm/m64
/// </summary>
public static Vector128<double> MoveAndDuplicate(Vector128<double> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_movehdup_ps (__m128 a); MOVSHDUP xmm, xmm/m128
+ /// __m128 _mm_movehdup_ps (__m128 a);
+ /// MOVSHDUP xmm, xmm/m128
/// </summary>
public static Vector128<float> MoveHighAndDuplicate(Vector128<float> source) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_moveldup_ps (__m128 a); MOVSLDUP xmm, xmm/m128
+ /// __m128 _mm_moveldup_ps (__m128 a);
+ /// MOVSLDUP xmm, xmm/m128
/// </summary>
public static Vector128<float> MoveLowAndDuplicate(Vector128<float> source) { throw new PlatformNotSupportedException(); }
diff --git a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse3.cs b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse3.cs
index 264b113772..245d756c77 100644
--- a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse3.cs
+++ b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse3.cs
@@ -16,29 +16,35 @@ namespace System.Runtime.Intrinsics.X86
public static bool IsSupported { get => IsSupported; }
/// <summary>
- /// __m128 _mm_addsub_ps (__m128 a, __m128 b); ADDSUBPS xmm, xmm/m128
+ /// __m128 _mm_addsub_ps (__m128 a, __m128 b);
+ /// ADDSUBPS xmm, xmm/m128
/// </summary>
public static Vector128<float> AddSubtract(Vector128<float> left, Vector128<float> right) => AddSubtract(left, right);
/// <summary>
- /// __m128d _mm_addsub_pd (__m128d a, __m128d b); ADDSUBPD xmm, xmm/m128
+ /// __m128d _mm_addsub_pd (__m128d a, __m128d b);
+ /// ADDSUBPD xmm, xmm/m128
/// </summary>
public static Vector128<double> AddSubtract(Vector128<double> left, Vector128<double> right) => AddSubtract(left, right);
/// <summary>
- /// __m128 _mm_hadd_ps (__m128 a, __m128 b); HADDPS xmm, xmm/m128
+ /// __m128 _mm_hadd_ps (__m128 a, __m128 b);
+ /// HADDPS xmm, xmm/m128
/// </summary>
public static Vector128<float> HorizontalAdd(Vector128<float> left, Vector128<float> right) => HorizontalAdd(left, right);
/// <summary>
- /// __m128d _mm_hadd_pd (__m128d a, __m128d b); HADDPD xmm, xmm/m128
+ /// __m128d _mm_hadd_pd (__m128d a, __m128d b);
+ /// HADDPD xmm, xmm/m128
/// </summary>
public static Vector128<double> HorizontalAdd(Vector128<double> left, Vector128<double> right) => HorizontalAdd(left, right);
/// <summary>
- /// __m128 _mm_hsub_ps (__m128 a, __m128 b); HSUBPS xmm, xmm/m128
+ /// __m128 _mm_hsub_ps (__m128 a, __m128 b);
+ /// HSUBPS xmm, xmm/m128
/// </summary>
public static Vector128<float> HorizontalSubtract(Vector128<float> left, Vector128<float> right) => HorizontalSubtract(left, right);
/// <summary>
- /// __m128d _mm_hsub_pd (__m128d a, __m128d b); HSUBPD xmm, xmm/m128
+ /// __m128d _mm_hsub_pd (__m128d a, __m128d b);
+ /// HSUBPD xmm, xmm/m128
/// </summary>
public static Vector128<double> HorizontalSubtract(Vector128<double> left, Vector128<double> right) => HorizontalSubtract(left, right);
@@ -48,7 +54,8 @@ namespace System.Runtime.Intrinsics.X86
public static unsafe Vector128<double> LoadAndDuplicateToVector128(double* address) => LoadAndDuplicateToVector128(address);
/// <summary>
- /// __m128i _mm_lddqu_si128 (__m128i const* mem_addr); LDDQU xmm, m128
+ /// __m128i _mm_lddqu_si128 (__m128i const* mem_addr);
+ /// LDDQU xmm, m128
/// </summary>
public static unsafe Vector128<sbyte> LoadDquVector128(sbyte* address) => LoadDquVector128(address);
public static unsafe Vector128<byte> LoadDquVector128(byte* address) => LoadDquVector128(address);
@@ -60,17 +67,20 @@ namespace System.Runtime.Intrinsics.X86
public static unsafe Vector128<ulong> LoadDquVector128(ulong* address) => LoadDquVector128(address);
/// <summary>
- /// __m128d _mm_movedup_pd (__m128d a); MOVDDUP xmm, xmm/m64
+ /// __m128d _mm_movedup_pd (__m128d a);
+ /// MOVDDUP xmm, xmm/m64
/// </summary>
public static Vector128<double> MoveAndDuplicate(Vector128<double> source) => MoveAndDuplicate(source);
/// <summary>
- /// __m128 _mm_movehdup_ps (__m128 a); MOVSHDUP xmm, xmm/m128
+ /// __m128 _mm_movehdup_ps (__m128 a);
+ /// MOVSHDUP xmm, xmm/m128
/// </summary>
public static Vector128<float> MoveHighAndDuplicate(Vector128<float> source) => MoveHighAndDuplicate(source);
/// <summary>
- /// __m128 _mm_moveldup_ps (__m128 a); MOVSLDUP xmm, xmm/m128
+ /// __m128 _mm_moveldup_ps (__m128 a);
+ /// MOVSLDUP xmm, xmm/m128
/// </summary>
public static Vector128<float> MoveLowAndDuplicate(Vector128<float> source) => MoveLowAndDuplicate(source);
}
diff --git a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse41.PlatformNotSupported.cs b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse41.PlatformNotSupported.cs
index 9db5da2377..c0893cb101 100644
--- a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse41.PlatformNotSupported.cs
+++ b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse41.PlatformNotSupported.cs
@@ -16,286 +16,350 @@ namespace System.Runtime.Intrinsics.X86
public static bool IsSupported { get { return false; } }
/// <summary>
- /// __m128i _mm_blend_epi16 (__m128i a, __m128i b, const int imm8); PBLENDW xmm, xmm/m128 imm8
+ /// __m128i _mm_blend_epi16 (__m128i a, __m128i b, const int imm8);
+ /// PBLENDW xmm, xmm/m128 imm8
/// </summary>
public static Vector128<short> Blend(Vector128<short> left, Vector128<short> right, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_blend_epi16 (__m128i a, __m128i b, const int imm8); PBLENDW xmm, xmm/m128 imm8
+ /// __m128i _mm_blend_epi16 (__m128i a, __m128i b, const int imm8);
+ /// PBLENDW xmm, xmm/m128 imm8
/// </summary>
public static Vector128<ushort> Blend(Vector128<ushort> left, Vector128<ushort> right, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_blend_ps (__m128 a, __m128 b, const int imm8); BLENDPS xmm, xmm/m128, imm8
+ /// __m128 _mm_blend_ps (__m128 a, __m128 b, const int imm8);
+ /// BLENDPS xmm, xmm/m128, imm8
/// </summary>
public static Vector128<float> Blend(Vector128<float> left, Vector128<float> right, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_blend_pd (__m128d a, __m128d b, const int imm8); BLENDPD xmm, xmm/m128, imm8
+ /// __m128d _mm_blend_pd (__m128d a, __m128d b, const int imm8);
+ /// BLENDPD xmm, xmm/m128, imm8
/// </summary>
public static Vector128<double> Blend(Vector128<double> left, Vector128<double> right, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_blendv_epi8 (__m128i a, __m128i b, __m128i mask); PBLENDVB xmm, xmm/m128, xmm
+ /// __m128i _mm_blendv_epi8 (__m128i a, __m128i b, __m128i mask);
+ /// PBLENDVB xmm, xmm/m128, xmm
/// </summary>
public static Vector128<sbyte> BlendVariable(Vector128<sbyte> left, Vector128<sbyte> right, Vector128<sbyte> mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_blendv_epi8 (__m128i a, __m128i b, __m128i mask); PBLENDVB xmm, xmm/m128, xmm
+ /// __m128i _mm_blendv_epi8 (__m128i a, __m128i b, __m128i mask);
+ /// PBLENDVB xmm, xmm/m128, xmm
/// </summary>
public static Vector128<byte> BlendVariable(Vector128<byte> left, Vector128<byte> right, Vector128<byte> mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_blendv_ps (__m128 a, __m128 b, __m128 mask); BLENDVPS xmm, xmm/m128, xmm0
+ /// __m128 _mm_blendv_ps (__m128 a, __m128 b, __m128 mask);
+ /// BLENDVPS xmm, xmm/m128, xmm0
/// </summary>
public static Vector128<float> BlendVariable(Vector128<float> left, Vector128<float> right, Vector128<float> mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_blendv_pd (__m128d a, __m128d b, __m128d mask); BLENDVPD xmm, xmm/m128, xmm0
+ /// __m128d _mm_blendv_pd (__m128d a, __m128d b, __m128d mask);
+ /// BLENDVPD xmm, xmm/m128, xmm0
/// </summary>
public static Vector128<double> BlendVariable(Vector128<double> left, Vector128<double> right, Vector128<double> mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_ceil_ps (__m128 a); ROUNDPS xmm, xmm/m128, imm8(10)
+ /// __m128 _mm_ceil_ps (__m128 a);
+ /// ROUNDPS xmm, xmm/m128, imm8(10)
/// </summary>
public static Vector128<float> Ceiling(Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_ceil_pd (__m128d a); ROUNDPD xmm, xmm/m128, imm8(10)
+ /// __m128d _mm_ceil_pd (__m128d a);
+ /// ROUNDPD xmm, xmm/m128, imm8(10)
/// </summary>
public static Vector128<double> Ceiling(Vector128<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_ceil_sd (__m128d a); ROUNDSD xmm, xmm/m128, imm8(10)
+ /// __m128d _mm_ceil_sd (__m128d a);
+ /// ROUNDSD xmm, xmm/m128, imm8(10)
/// The above native signature does not exist. We provide this additional overload for the recommended use case of this intrinsic.
/// </summary>
public static Vector128<double> CeilingScalar(Vector128<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_ceil_ss (__m128 a); ROUNDSD xmm, xmm/m128, imm8(10)
+ /// __m128 _mm_ceil_ss (__m128 a);
+ /// ROUNDSD xmm, xmm/m128, imm8(10)
/// The above native signature does not exist. We provide this additional overload for the recommended use case of this intrinsic.
/// </summary>
public static Vector128<float> CeilingScalar(Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_ceil_sd (__m128d a, __m128d b); ROUNDSD xmm, xmm/m128, imm8(10)
+ /// __m128d _mm_ceil_sd (__m128d a, __m128d b);
+ /// ROUNDSD xmm, xmm/m128, imm8(10)
/// </summary>
public static Vector128<double> CeilingScalar(Vector128<double> upper, Vector128<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_ceil_ss (__m128 a, __m128 b); ROUNDSS xmm, xmm/m128, imm8(10)
+ /// __m128 _mm_ceil_ss (__m128 a, __m128 b);
+ /// ROUNDSS xmm, xmm/m128, imm8(10)
/// </summary>
public static Vector128<float> CeilingScalar(Vector128<float> upper, Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cmpeq_epi64 (__m128i a, __m128i b); PCMPEQQ xmm, xmm/m128
+ /// __m128i _mm_cmpeq_epi64 (__m128i a, __m128i b);
+ /// PCMPEQQ xmm, xmm/m128
/// </summary>
public static Vector128<long> CompareEqual(Vector128<long> left, Vector128<long> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cmpeq_epi64 (__m128i a, __m128i b); PCMPEQQ xmm, xmm/m128
+ /// __m128i _mm_cmpeq_epi64 (__m128i a, __m128i b);
+ /// PCMPEQQ xmm, xmm/m128
/// </summary>
public static Vector128<ulong> CompareEqual(Vector128<ulong> left, Vector128<ulong> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cvtepi8_epi16 (__m128i a); PMOVSXBW xmm, xmm/m64
+ /// __m128i _mm_cvtepi8_epi16 (__m128i a);
+ /// PMOVSXBW xmm, xmm/m64
/// </summary>
public static Vector128<short> ConvertToVector128Int16(Vector128<sbyte> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cvtepu8_epi16 (__m128i a); PMOVZXBW xmm, xmm/m64
+ /// __m128i _mm_cvtepu8_epi16 (__m128i a);
+ /// PMOVZXBW xmm, xmm/m64
/// </summary>
public static Vector128<short> ConvertToVector128Int16(Vector128<byte> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cvtepi8_epi32 (__m128i a); PMOVSXBD xmm, xmm/m32
+ /// __m128i _mm_cvtepi8_epi32 (__m128i a);
+ /// PMOVSXBD xmm, xmm/m32
/// </summary>
public static Vector128<int> ConvertToVector128Int32(Vector128<sbyte> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cvtepu8_epi32 (__m128i a); PMOVZXBD xmm, xmm/m32
+ /// __m128i _mm_cvtepu8_epi32 (__m128i a);
+ /// PMOVZXBD xmm, xmm/m32
/// </summary>
public static Vector128<int> ConvertToVector128Int32(Vector128<byte> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cvtepi16_epi32 (__m128i a); PMOVSXWD xmm, xmm/m64
+ /// __m128i _mm_cvtepi16_epi32 (__m128i a);
+ /// PMOVSXWD xmm, xmm/m64
/// </summary>
public static Vector128<int> ConvertToVector128Int32(Vector128<short> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cvtepu16_epi32 (__m128i a); PMOVZXWD xmm, xmm/m64
+ /// __m128i _mm_cvtepu16_epi32 (__m128i a);
+ /// PMOVZXWD xmm, xmm/m64
/// </summary>
public static Vector128<int> ConvertToVector128Int32(Vector128<ushort> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cvtepi8_epi64 (__m128i a); PMOVSXBQ xmm, xmm/m16
+ /// __m128i _mm_cvtepi8_epi64 (__m128i a);
+ /// PMOVSXBQ xmm, xmm/m16
/// </summary>
public static Vector128<long> ConvertToVector128Int64(Vector128<sbyte> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cvtepu8_epi64 (__m128i a); PMOVZXBQ xmm, xmm/m16
+ /// __m128i _mm_cvtepu8_epi64 (__m128i a);
+ /// PMOVZXBQ xmm, xmm/m16
/// </summary>
public static Vector128<long> ConvertToVector128Int64(Vector128<byte> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cvtepi16_epi64 (__m128i a); PMOVSXWQ xmm, xmm/m32
+ /// __m128i _mm_cvtepi16_epi64 (__m128i a);
+ /// PMOVSXWQ xmm, xmm/m32
/// </summary>
public static Vector128<long> ConvertToVector128Int64(Vector128<short> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cvtepu16_epi64 (__m128i a); PMOVZXWQ xmm, xmm/m32
+ /// __m128i _mm_cvtepu16_epi64 (__m128i a);
+ /// PMOVZXWQ xmm, xmm/m32
/// </summary>
public static Vector128<long> ConvertToVector128Int64(Vector128<ushort> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cvtepi32_epi64 (__m128i a); PMOVSXDQ xmm, xmm/m64
+ /// __m128i _mm_cvtepi32_epi64 (__m128i a);
+ /// PMOVSXDQ xmm, xmm/m64
/// </summary>
public static Vector128<long> ConvertToVector128Int64(Vector128<int> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cvtepu32_epi64 (__m128i a); PMOVZXDQ xmm, xmm/m64
+ /// __m128i _mm_cvtepu32_epi64 (__m128i a);
+ /// PMOVZXDQ xmm, xmm/m64
/// </summary>
public static Vector128<long> ConvertToVector128Int64(Vector128<uint> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_dp_ps (__m128 a, __m128 b, const int imm8); DPPS xmm, xmm/m128, imm8
+ /// __m128 _mm_dp_ps (__m128 a, __m128 b, const int imm8);
+ /// DPPS xmm, xmm/m128, imm8
/// </summary>
public static Vector128<float> DotProduct(Vector128<float> left, Vector128<float> right, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_dp_pd (__m128d a, __m128d b, const int imm8); DPPD xmm, xmm/m128, imm8
+ /// __m128d _mm_dp_pd (__m128d a, __m128d b, const int imm8);
+ /// DPPD xmm, xmm/m128, imm8
/// </summary>
public static Vector128<double> DotProduct(Vector128<double> left, Vector128<double> right, byte control) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_extract_epi8 (__m128i a, const int imm8); PEXTRB reg/m8, xmm, imm8
+ /// int _mm_extract_epi8 (__m128i a, const int imm8);
+ /// PEXTRB reg/m8, xmm, imm8
/// </summary>
public static sbyte Extract(Vector128<sbyte> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_extract_epi8 (__m128i a, const int imm8); PEXTRB reg/m8, xmm, imm8
+ /// int _mm_extract_epi8 (__m128i a, const int imm8);
+ /// PEXTRB reg/m8, xmm, imm8
/// </summary>
public static byte Extract(Vector128<byte> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_extract_epi32 (__m128i a, const int imm8); PEXTRD reg/m32, xmm, imm8
+ /// int _mm_extract_epi32 (__m128i a, const int imm8);
+ /// PEXTRD reg/m32, xmm, imm8
/// </summary>
public static int Extract(Vector128<int> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_extract_epi32 (__m128i a, const int imm8); PEXTRD reg/m32, xmm, imm8
+ /// int _mm_extract_epi32 (__m128i a, const int imm8);
+ /// PEXTRD reg/m32, xmm, imm8
/// </summary>
public static uint Extract(Vector128<uint> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __int64 _mm_extract_epi64 (__m128i a, const int imm8); PEXTRQ reg/m64, xmm, imm8
+ /// __int64 _mm_extract_epi64 (__m128i a, const int imm8);
+ /// PEXTRQ reg/m64, xmm, imm8
/// </summary>
public static long Extract(Vector128<long> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __int64 _mm_extract_epi64 (__m128i a, const int imm8); PEXTRQ reg/m64, xmm, imm8
+ /// __int64 _mm_extract_epi64 (__m128i a, const int imm8);
+ /// PEXTRQ reg/m64, xmm, imm8
/// </summary>
public static ulong Extract(Vector128<ulong> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_extract_ps (__m128 a, const int imm8); EXTRACTPS xmm, xmm/m32, imm8
+ /// int _mm_extract_ps (__m128 a, const int imm8);
+ /// EXTRACTPS xmm, xmm/m32, imm8
/// </summary>
public static float Extract(Vector128<float> value, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_floor_ps (__m128 a); ROUNDPS xmm, xmm/m128, imm8(9)
+ /// __m128 _mm_floor_ps (__m128 a);
+ /// ROUNDPS xmm, xmm/m128, imm8(9)
/// </summary>
public static Vector128<float> Floor(Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_floor_pd (__m128d a); ROUNDPD xmm, xmm/m128, imm8(9)
+ /// __m128d _mm_floor_pd (__m128d a);
+ /// ROUNDPD xmm, xmm/m128, imm8(9)
/// </summary>
public static Vector128<double> Floor(Vector128<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_floor_sd (__m128d a); ROUNDSD xmm, xmm/m128, imm8(9)
+ /// __m128d _mm_floor_sd (__m128d a);
+ /// ROUNDSD xmm, xmm/m128, imm8(9)
/// The above native signature does not exist. We provide this additional overload for the recommended use case of this intrinsic.
/// </summary>
public static Vector128<double> FloorScalar(Vector128<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_floor_ss (__m128 a); ROUNDSS xmm, xmm/m128, imm8(9)
+ /// __m128 _mm_floor_ss (__m128 a);
+ /// ROUNDSS xmm, xmm/m128, imm8(9)
/// The above native signature does not exist. We provide this additional overload for the recommended use case of this intrinsic.
/// </summary>
public static Vector128<float> FloorScalar(Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_floor_sd (__m128d a, __m128d b); ROUNDSD xmm, xmm/m128, imm8(9)
+ /// __m128d _mm_floor_sd (__m128d a, __m128d b);
+ /// ROUNDSD xmm, xmm/m128, imm8(9)
/// </summary>
public static Vector128<double> FloorScalar(Vector128<double> upper, Vector128<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_floor_ss (__m128 a, __m128 b); ROUNDSS xmm, xmm/m128, imm8(9)
+ /// __m128 _mm_floor_ss (__m128 a, __m128 b);
+ /// ROUNDSS xmm, xmm/m128, imm8(9)
/// </summary>
public static Vector128<float> FloorScalar(Vector128<float> upper, Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_insert_epi8 (__m128i a, int i, const int imm8); PINSRB xmm, reg/m8, imm8
+ /// __m128i _mm_insert_epi8 (__m128i a, int i, const int imm8);
+ /// PINSRB xmm, reg/m8, imm8
/// </summary>
public static Vector128<sbyte> Insert(Vector128<sbyte> value, sbyte data, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_insert_epi8 (__m128i a, int i, const int imm8); PINSRB xmm, reg/m8, imm8
+ /// __m128i _mm_insert_epi8 (__m128i a, int i, const int imm8);
+ /// PINSRB xmm, reg/m8, imm8
/// </summary>
public static Vector128<byte> Insert(Vector128<byte> value, byte data, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_insert_epi32 (__m128i a, int i, const int imm8); PINSRD xmm, reg/m32, imm8
+ /// __m128i _mm_insert_epi32 (__m128i a, int i, const int imm8);
+ /// PINSRD xmm, reg/m32, imm8
/// </summary>
public static Vector128<int> Insert(Vector128<int> value, int data, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_insert_epi32 (__m128i a, int i, const int imm8); PINSRD xmm, reg/m32, imm8
+ /// __m128i _mm_insert_epi32 (__m128i a, int i, const int imm8);
+ /// PINSRD xmm, reg/m32, imm8
/// </summary>
public static Vector128<uint> Insert(Vector128<uint> value, uint data, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_insert_epi64 (__m128i a, __int64 i, const int imm8); PINSRQ xmm, reg/m64, imm8
+ /// __m128i _mm_insert_epi64 (__m128i a, __int64 i, const int imm8);
+ /// PINSRQ xmm, reg/m64, imm8
/// </summary>
public static Vector128<long> Insert(Vector128<long> value, long data, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_insert_epi64 (__m128i a, __int64 i, const int imm8); PINSRQ xmm, reg/m64, imm8
+ /// __m128i _mm_insert_epi64 (__m128i a, __int64 i, const int imm8);
+ /// PINSRQ xmm, reg/m64, imm8
/// </summary>
public static Vector128<ulong> Insert(Vector128<ulong> value, ulong data, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_insert_ps (__m128 a, __m128 b, const int imm8); INSERTPS xmm, xmm/m32, imm8
+ /// __m128 _mm_insert_ps (__m128 a, __m128 b, const int imm8);
+ /// INSERTPS xmm, xmm/m32, imm8
/// </summary>
public static Vector128<float> Insert(Vector128<float> value, float data, byte index) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_max_epi8 (__m128i a, __m128i b); PMAXSB xmm, xmm/m128
+ /// __m128i _mm_max_epi8 (__m128i a, __m128i b);
+ /// PMAXSB xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> Max(Vector128<sbyte> left, Vector128<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_max_epu16 (__m128i a, __m128i b); PMAXUW xmm, xmm/m128
+ /// __m128i _mm_max_epu16 (__m128i a, __m128i b);
+ /// PMAXUW xmm, xmm/m128
/// </summary>
public static Vector128<ushort> Max(Vector128<ushort> left, Vector128<ushort> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_max_epi32 (__m128i a, __m128i b); PMAXSD xmm, xmm/m128
+ /// __m128i _mm_max_epi32 (__m128i a, __m128i b);
+ /// PMAXSD xmm, xmm/m128
/// </summary>
public static Vector128<int> Max(Vector128<int> left, Vector128<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_max_epu32 (__m128i a, __m128i b); PMAXUD xmm, xmm/m128
+ /// __m128i _mm_max_epu32 (__m128i a, __m128i b);
+ /// PMAXUD xmm, xmm/m128
/// </summary>
public static Vector128<uint> Max(Vector128<uint> left, Vector128<uint> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_min_epi8 (__m128i a, __m128i b); PMINSB xmm, xmm/m128
+ /// __m128i _mm_min_epi8 (__m128i a, __m128i b);
+ /// PMINSB xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> Min(Vector128<sbyte> left, Vector128<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_min_epu16 (__m128i a, __m128i b); PMINUW xmm, xmm/m128
+ /// __m128i _mm_min_epu16 (__m128i a, __m128i b);
+ /// PMINUW xmm, xmm/m128
/// </summary>
public static Vector128<ushort> Min(Vector128<ushort> left, Vector128<ushort> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_min_epi32 (__m128i a, __m128i b); PMINSD xmm, xmm/m128
+ /// __m128i _mm_min_epi32 (__m128i a, __m128i b);
+ /// PMINSD xmm, xmm/m128
/// </summary>
public static Vector128<int> Min(Vector128<int> left, Vector128<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_min_epu32 (__m128i a, __m128i b); PMINUD xmm, xmm/m128
+ /// __m128i _mm_min_epu32 (__m128i a, __m128i b);
+ /// PMINUD xmm, xmm/m128
/// </summary>
public static Vector128<uint> Min(Vector128<uint> left, Vector128<uint> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_minpos_epu16 (__m128i a); PHMINPOSUW xmm, xmm/m128
+ /// __m128i _mm_minpos_epu16 (__m128i a);
+ /// PHMINPOSUW xmm, xmm/m128
/// </summary>
public static Vector128<ushort> MinHorizontal(Vector128<ushort> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_mpsadbw_epu8 (__m128i a, __m128i b, const int imm8); MPSADBW xmm, xmm/m128, imm8
+ /// __m128i _mm_mpsadbw_epu8 (__m128i a, __m128i b, const int imm8);
+ /// MPSADBW xmm, xmm/m128, imm8
/// </summary>
public static Vector128<ushort> MultipleSumAbsoluteDifferences(Vector128<byte> left, Vector128<byte> right, byte mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_mul_epi32 (__m128i a, __m128i b); PMULDQ xmm, xmm/m128
+ /// __m128i _mm_mul_epi32 (__m128i a, __m128i b);
+ /// PMULDQ xmm, xmm/m128
/// </summary>
public static Vector128<long> Multiply(Vector128<int> left, Vector128<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_mullo_epi32 (__m128i a, __m128i b); PMULLD xmm, xmm/m128
+ /// __m128i _mm_mullo_epi32 (__m128i a, __m128i b);
+ /// PMULLD xmm, xmm/m128
/// </summary>
public static Vector128<int> MultiplyLow(Vector128<int> left, Vector128<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_packus_epi32 (__m128i a, __m128i b); PACKUSDW xmm, xmm/m128
+ /// __m128i _mm_packus_epi32 (__m128i a, __m128i b);
+ /// PACKUSDW xmm, xmm/m128
/// </summary>
public static Vector128<ushort> PackUnsignedSaturate(Vector128<int> left, Vector128<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_round_ps (__m128 a, int rounding); ROUNDPS xmm, xmm/m128, imm8(8)
+ /// __m128 _mm_round_ps (__m128 a, int rounding);
+ /// ROUNDPS xmm, xmm/m128, imm8(8)
/// _MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC
/// </summary>
public static Vector128<float> RoundToNearestInteger(Vector128<float> value) { throw new PlatformNotSupportedException(); }
@@ -317,7 +381,8 @@ namespace System.Runtime.Intrinsics.X86
public static Vector128<float> RoundCurrentDirection(Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_round_pd (__m128d a, int rounding); ROUNDPD xmm, xmm/m128, imm8(8)
+ /// __m128d _mm_round_pd (__m128d a, int rounding);
+ /// ROUNDPD xmm, xmm/m128, imm8(8)
/// _MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC
/// </summary>
public static Vector128<double> RoundToNearestInteger(Vector128<double> value) { throw new PlatformNotSupportedException(); }
@@ -339,134 +404,163 @@ namespace System.Runtime.Intrinsics.X86
public static Vector128<double> RoundCurrentDirection(Vector128<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_round_sd (__m128d a, _MM_FROUND_CUR_DIRECTION); ROUNDSD xmm, xmm/m128, imm8(4)
+ /// __m128d _mm_round_sd (__m128d a, _MM_FROUND_CUR_DIRECTION);
+ /// ROUNDSD xmm, xmm/m128, imm8(4)
/// The above native signature does not exist. We provide this additional overload for the recommended use case of this intrinsic.
/// </summary>
public static Vector128<double> RoundCurrentDirectionScalar(Vector128<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_round_sd (__m128d a, _MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC); ROUNDSD xmm, xmm/m128, imm8(8)
+ /// __m128d _mm_round_sd (__m128d a, _MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC);
+ /// ROUNDSD xmm, xmm/m128, imm8(8)
/// The above native signature does not exist. We provide this additional overload for the recommended use case of this intrinsic.
/// </summary>
public static Vector128<double> RoundToNearestIntegerScalar(Vector128<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_round_sd (__m128d a, _MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC); ROUNDSD xmm, xmm/m128, imm8(9)
+ /// __m128d _mm_round_sd (__m128d a, _MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC);
+ /// ROUNDSD xmm, xmm/m128, imm8(9)
/// The above native signature does not exist. We provide this additional overload for the recommended use case of this intrinsic.
/// </summary>
public static Vector128<double> RoundToNegativeInfinityScalar(Vector128<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_round_sd (__m128d a, _MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC); ROUNDSD xmm, xmm/m128, imm8(10)
+ /// __m128d _mm_round_sd (__m128d a, _MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC);
+ /// ROUNDSD xmm, xmm/m128, imm8(10)
/// The above native signature does not exist. We provide this additional overload for the recommended use case of this intrinsic.
/// </summary>
public static Vector128<double> RoundToPositiveInfinityScalar(Vector128<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_round_sd (__m128d a, _MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC); ROUNDSD xmm, xmm/m128, imm8(11)
+ /// __m128d _mm_round_sd (__m128d a, _MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC);
+ /// ROUNDSD xmm, xmm/m128, imm8(11)
/// The above native signature does not exist. We provide this additional overload for the recommended use case of this intrinsic.
/// </summary>
public static Vector128<double> RoundToZeroScalar(Vector128<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_round_sd (__m128d a, __m128d b, _MM_FROUND_CUR_DIRECTION); ROUNDSD xmm, xmm/m128, imm8(4)
+ /// __m128d _mm_round_sd (__m128d a, __m128d b, _MM_FROUND_CUR_DIRECTION);
+ /// ROUNDSD xmm, xmm/m128, imm8(4)
/// </summary>
public static Vector128<double> RoundCurrentDirectionScalar(Vector128<double> upper, Vector128<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_round_sd (__m128d a, __m128d b, _MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC); ROUNDSD xmm, xmm/m128, imm8(8)
+ /// __m128d _mm_round_sd (__m128d a, __m128d b, _MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC);
+ /// ROUNDSD xmm, xmm/m128, imm8(8)
/// </summary>
public static Vector128<double> RoundToNearestIntegerScalar(Vector128<double> upper, Vector128<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_round_sd (__m128d a, __m128d b, _MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC); ROUNDSD xmm, xmm/m128, imm8(9)
+ /// __m128d _mm_round_sd (__m128d a, __m128d b, _MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC);
+ /// ROUNDSD xmm, xmm/m128, imm8(9)
/// </summary>
public static Vector128<double> RoundToNegativeInfinityScalar(Vector128<double> upper, Vector128<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_round_sd (__m128d a, __m128d b, _MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC); ROUNDSD xmm, xmm/m128, imm8(10)
+ /// __m128d _mm_round_sd (__m128d a, __m128d b, _MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC);
+ /// ROUNDSD xmm, xmm/m128, imm8(10)
/// </summary>
public static Vector128<double> RoundToPositiveInfinityScalar(Vector128<double> upper, Vector128<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128d _mm_round_sd (__m128d a, __m128d b, _MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC); ROUNDSD xmm, xmm/m128, imm8(11)
+ /// __m128d _mm_round_sd (__m128d a, __m128d b, _MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC);
+ /// ROUNDSD xmm, xmm/m128, imm8(11)
/// </summary>
public static Vector128<double> RoundToZeroScalar(Vector128<double> upper, Vector128<double> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_round_ss (__m128 a, _MM_FROUND_CUR_DIRECTION); ROUNDSS xmm, xmm/m128, imm8(4)
+ /// __m128 _mm_round_ss (__m128 a, _MM_FROUND_CUR_DIRECTION);
+ /// ROUNDSS xmm, xmm/m128, imm8(4)
/// The above native signature does not exist. We provide this additional overload for the recommended use case of this intrinsic.
/// </summary>
public static Vector128<float> RoundCurrentDirectionScalar(Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_round_ss (__m128 a, _MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC); ROUNDSS xmm, xmm/m128, imm8(8)
+ /// __m128 _mm_round_ss (__m128 a, _MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC);
+ /// ROUNDSS xmm, xmm/m128, imm8(8)
/// The above native signature does not exist. We provide this additional overload for the recommended use case of this intrinsic.
/// </summary>
public static Vector128<float> RoundToNearestIntegerScalar(Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_round_ss (__m128 a, _MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC); ROUNDSS xmm, xmm/m128, imm8(9)
+ /// __m128 _mm_round_ss (__m128 a, _MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC);
+ /// ROUNDSS xmm, xmm/m128, imm8(9)
/// The above native signature does not exist. We provide this additional overload for the recommended use case of this intrinsic.
/// </summary>
public static Vector128<float> RoundToNegativeInfinityScalar(Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_round_ss (__m128 a, _MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC); ROUNDSS xmm, xmm/m128, imm8(10)
+ /// __m128 _mm_round_ss (__m128 a, _MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC);
+ /// ROUNDSS xmm, xmm/m128, imm8(10)
/// The above native signature does not exist. We provide this additional overload for the recommended use case of this intrinsic.
/// </summary>
public static Vector128<float> RoundToPositiveInfinityScalar(Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_round_ss (__m128 a, _MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC); ROUNDSS xmm, xmm/m128, imm8(11)
+ /// __m128 _mm_round_ss (__m128 a, _MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC);
+ /// ROUNDSS xmm, xmm/m128, imm8(11)
/// The above native signature does not exist. We provide this additional overload for the recommended use case of this intrinsic.
/// </summary>
public static Vector128<float> RoundToZeroScalar(Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_round_ss (__m128 a, __m128 b, _MM_FROUND_CUR_DIRECTION); ROUNDSS xmm, xmm/m128, imm8(4)
+ /// __m128 _mm_round_ss (__m128 a, __m128 b, _MM_FROUND_CUR_DIRECTION);
+ /// ROUNDSS xmm, xmm/m128, imm8(4)
/// </summary>
public static Vector128<float> RoundCurrentDirectionScalar(Vector128<float> upper, Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_round_ss (__m128 a, __m128 b, _MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC); ROUNDSS xmm, xmm/m128, imm8(8)
+ /// __m128 _mm_round_ss (__m128 a, __m128 b, _MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC);
+ /// ROUNDSS xmm, xmm/m128, imm8(8)
/// </summary>
public static Vector128<float> RoundToNearestIntegerScalar(Vector128<float> upper, Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_round_ss (__m128 a, __m128 b, _MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC); ROUNDSS xmm, xmm/m128, imm8(9)
+ /// __m128 _mm_round_ss (__m128 a, __m128 b, _MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC);
+ /// ROUNDSS xmm, xmm/m128, imm8(9)
/// </summary>
public static Vector128<float> RoundToNegativeInfinityScalar(Vector128<float> upper, Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_round_ss (__m128 a, __m128 b, _MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC); ROUNDSS xmm, xmm/m128, imm8(10)
+ /// __m128 _mm_round_ss (__m128 a, __m128 b, _MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC);
+ /// ROUNDSS xmm, xmm/m128, imm8(10)
/// </summary>
public static Vector128<float> RoundToPositiveInfinityScalar(Vector128<float> upper, Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128 _mm_round_ss (__m128 a, __m128 b, _MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC); ROUNDSS xmm, xmm/m128, imm8(11)
+ /// __m128 _mm_round_ss (__m128 a, __m128 b, _MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC);
+ /// ROUNDSS xmm, xmm/m128, imm8(11)
/// </summary>
public static Vector128<float> RoundToZeroScalar(Vector128<float> upper, Vector128<float> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr); MOVNTDQA xmm, m128
+ /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr);
+ /// MOVNTDQA xmm, m128
/// </summary>
public static unsafe Vector128<sbyte> LoadAlignedVector128NonTemporal(sbyte* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr); MOVNTDQA xmm, m128
+ /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr);
+ /// MOVNTDQA xmm, m128
/// </summary>
public static unsafe Vector128<byte> LoadAlignedVector128NonTemporal(byte* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr); MOVNTDQA xmm, m128
+ /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr);
+ /// MOVNTDQA xmm, m128
/// </summary>
public static unsafe Vector128<short> LoadAlignedVector128NonTemporal(short* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr); MOVNTDQA xmm, m128
+ /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr);
+ /// MOVNTDQA xmm, m128
/// </summary>
public static unsafe Vector128<ushort> LoadAlignedVector128NonTemporal(ushort* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr); MOVNTDQA xmm, m128
+ /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr);
+ /// MOVNTDQA xmm, m128
/// </summary>
public static unsafe Vector128<int> LoadAlignedVector128NonTemporal(int* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr); MOVNTDQA xmm, m128
+ /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr);
+ /// MOVNTDQA xmm, m128
/// </summary>
public static unsafe Vector128<uint> LoadAlignedVector128NonTemporal(uint* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr); MOVNTDQA xmm, m128
+ /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr);
+ /// MOVNTDQA xmm, m128
/// </summary>
public static unsafe Vector128<long> LoadAlignedVector128NonTemporal(long* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr); MOVNTDQA xmm, m128
+ /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr);
+ /// MOVNTDQA xmm, m128
/// </summary>
public static unsafe Vector128<ulong> LoadAlignedVector128NonTemporal(ulong* address) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_test_all_ones (__m128i a); HELPER
+ /// int _mm_test_all_ones (__m128i a);
+ /// HELPER
/// </summary>
public static bool TestAllOnes(Vector128<sbyte> value) { throw new PlatformNotSupportedException(); }
public static bool TestAllOnes(Vector128<byte> value) { throw new PlatformNotSupportedException(); }
@@ -478,7 +572,8 @@ namespace System.Runtime.Intrinsics.X86
public static bool TestAllOnes(Vector128<ulong> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_test_all_zeros (__m128i a, __m128i mask); PTEST xmm, xmm/m128
+ /// int _mm_test_all_zeros (__m128i a, __m128i mask);
+ /// PTEST xmm, xmm/m128
/// </summary>
public static bool TestAllZeros(Vector128<sbyte> left, Vector128<sbyte> right) { throw new PlatformNotSupportedException(); }
public static bool TestAllZeros(Vector128<byte> left, Vector128<byte> right) { throw new PlatformNotSupportedException(); }
@@ -490,7 +585,8 @@ namespace System.Runtime.Intrinsics.X86
public static bool TestAllZeros(Vector128<ulong> left, Vector128<ulong> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_testc_si128 (__m128i a, __m128i b); PTEST xmm, xmm/m128
+ /// int _mm_testc_si128 (__m128i a, __m128i b);
+ /// PTEST xmm, xmm/m128
/// </summary>
public static bool TestC(Vector128<sbyte> left, Vector128<sbyte> right) { throw new PlatformNotSupportedException(); }
public static bool TestC(Vector128<byte> left, Vector128<byte> right) { throw new PlatformNotSupportedException(); }
@@ -502,7 +598,8 @@ namespace System.Runtime.Intrinsics.X86
public static bool TestC(Vector128<ulong> left, Vector128<ulong> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_test_mix_ones_zeros (__m128i a, __m128i mask); PTEST xmm, xmm/m128
+ /// int _mm_test_mix_ones_zeros (__m128i a, __m128i mask);
+ /// PTEST xmm, xmm/m128
/// </summary>
public static bool TestMixOnesZeros(Vector128<sbyte> left, Vector128<sbyte> right) { throw new PlatformNotSupportedException(); }
public static bool TestMixOnesZeros(Vector128<byte> left, Vector128<byte> right) { throw new PlatformNotSupportedException(); }
@@ -514,7 +611,8 @@ namespace System.Runtime.Intrinsics.X86
public static bool TestMixOnesZeros(Vector128<ulong> left, Vector128<ulong> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_testnzc_si128 (__m128i a, __m128i b); PTEST xmm, xmm/m128
+ /// int _mm_testnzc_si128 (__m128i a, __m128i b);
+ /// PTEST xmm, xmm/m128
/// </summary>
public static bool TestNotZAndNotC(Vector128<sbyte> left, Vector128<sbyte> right) { throw new PlatformNotSupportedException(); }
public static bool TestNotZAndNotC(Vector128<byte> left, Vector128<byte> right) { throw new PlatformNotSupportedException(); }
@@ -526,7 +624,8 @@ namespace System.Runtime.Intrinsics.X86
public static bool TestNotZAndNotC(Vector128<ulong> left, Vector128<ulong> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_testz_si128 (__m128i a, __m128i b); PTEST xmm, xmm/m128
+ /// int _mm_testz_si128 (__m128i a, __m128i b);
+ /// PTEST xmm, xmm/m128
/// </summary>
public static bool TestZ(Vector128<sbyte> left, Vector128<sbyte> right) { throw new PlatformNotSupportedException(); }
public static bool TestZ(Vector128<byte> left, Vector128<byte> right) { throw new PlatformNotSupportedException(); }
diff --git a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse41.cs b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse41.cs
index b7e463c4fb..1672adb083 100644
--- a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse41.cs
+++ b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse41.cs
@@ -16,286 +16,350 @@ namespace System.Runtime.Intrinsics.X86
public static bool IsSupported { get => IsSupported; }
/// <summary>
- /// __m128i _mm_blend_epi16 (__m128i a, __m128i b, const int imm8); PBLENDW xmm, xmm/m128 imm8
+ /// __m128i _mm_blend_epi16 (__m128i a, __m128i b, const int imm8);
+ /// PBLENDW xmm, xmm/m128 imm8
/// </summary>
public static Vector128<short> Blend(Vector128<short> left, Vector128<short> right, byte control) => Blend(left, right, control);
/// <summary>
- /// __m128i _mm_blend_epi16 (__m128i a, __m128i b, const int imm8); PBLENDW xmm, xmm/m128 imm8
+ /// __m128i _mm_blend_epi16 (__m128i a, __m128i b, const int imm8);
+ /// PBLENDW xmm, xmm/m128 imm8
/// </summary>
public static Vector128<ushort> Blend(Vector128<ushort> left, Vector128<ushort> right, byte control) => Blend(left, right, control);
/// <summary>
- /// __m128 _mm_blend_ps (__m128 a, __m128 b, const int imm8); BLENDPS xmm, xmm/m128, imm8
+ /// __m128 _mm_blend_ps (__m128 a, __m128 b, const int imm8);
+ /// BLENDPS xmm, xmm/m128, imm8
/// </summary>
public static Vector128<float> Blend(Vector128<float> left, Vector128<float> right, byte control) => Blend(left, right, control);
/// <summary>
- /// __m128d _mm_blend_pd (__m128d a, __m128d b, const int imm8); BLENDPD xmm, xmm/m128, imm8
+ /// __m128d _mm_blend_pd (__m128d a, __m128d b, const int imm8);
+ /// BLENDPD xmm, xmm/m128, imm8
/// </summary>
public static Vector128<double> Blend(Vector128<double> left, Vector128<double> right, byte control) => Blend(left, right, control);
/// <summary>
- /// __m128i _mm_blendv_epi8 (__m128i a, __m128i b, __m128i mask); PBLENDVB xmm, xmm/m128, xmm
+ /// __m128i _mm_blendv_epi8 (__m128i a, __m128i b, __m128i mask);
+ /// PBLENDVB xmm, xmm/m128, xmm
/// </summary>
public static Vector128<sbyte> BlendVariable(Vector128<sbyte> left, Vector128<sbyte> right, Vector128<sbyte> mask) => BlendVariable(left, right, mask);
/// <summary>
- /// __m128i _mm_blendv_epi8 (__m128i a, __m128i b, __m128i mask); PBLENDVB xmm, xmm/m128, xmm
+ /// __m128i _mm_blendv_epi8 (__m128i a, __m128i b, __m128i mask);
+ /// PBLENDVB xmm, xmm/m128, xmm
/// </summary>
public static Vector128<byte> BlendVariable(Vector128<byte> left, Vector128<byte> right, Vector128<byte> mask) => BlendVariable(left, right, mask);
/// <summary>
- /// __m128 _mm_blendv_ps (__m128 a, __m128 b, __m128 mask); BLENDVPS xmm, xmm/m128, xmm0
+ /// __m128 _mm_blendv_ps (__m128 a, __m128 b, __m128 mask);
+ /// BLENDVPS xmm, xmm/m128, xmm0
/// </summary>
public static Vector128<float> BlendVariable(Vector128<float> left, Vector128<float> right, Vector128<float> mask) => BlendVariable(left, right, mask);
/// <summary>
- /// __m128d _mm_blendv_pd (__m128d a, __m128d b, __m128d mask); BLENDVPD xmm, xmm/m128, xmm0
+ /// __m128d _mm_blendv_pd (__m128d a, __m128d b, __m128d mask);
+ /// BLENDVPD xmm, xmm/m128, xmm0
/// </summary>
public static Vector128<double> BlendVariable(Vector128<double> left, Vector128<double> right, Vector128<double> mask) => BlendVariable(left, right, mask);
/// <summary>
- /// __m128 _mm_ceil_ps (__m128 a); ROUNDPS xmm, xmm/m128, imm8(10)
+ /// __m128 _mm_ceil_ps (__m128 a);
+ /// ROUNDPS xmm, xmm/m128, imm8(10)
/// </summary>
public static Vector128<float> Ceiling(Vector128<float> value) => Ceiling(value);
/// <summary>
- /// __m128d _mm_ceil_pd (__m128d a); ROUNDPD xmm, xmm/m128, imm8(10)
+ /// __m128d _mm_ceil_pd (__m128d a);
+ /// ROUNDPD xmm, xmm/m128, imm8(10)
/// </summary>
public static Vector128<double> Ceiling(Vector128<double> value) => Ceiling(value);
/// <summary>
- /// __m128d _mm_ceil_sd (__m128d a); ROUNDSD xmm, xmm/m128, imm8(10)
+ /// __m128d _mm_ceil_sd (__m128d a);
+ /// ROUNDSD xmm, xmm/m128, imm8(10)
/// The above native signature does not exist. We provide this additional overload for the recommended use case of this intrinsic.
/// </summary>
public static Vector128<double> CeilingScalar(Vector128<double> value) => CeilingScalar(value);
/// <summary>
- /// __m128 _mm_ceil_ss (__m128 a); ROUNDSD xmm, xmm/m128, imm8(10)
+ /// __m128 _mm_ceil_ss (__m128 a);
+ /// ROUNDSD xmm, xmm/m128, imm8(10)
/// The above native signature does not exist. We provide this additional overload for the recommended use case of this intrinsic.
/// </summary>
public static Vector128<float> CeilingScalar(Vector128<float> value) => CeilingScalar(value);
/// <summary>
- /// __m128d _mm_ceil_sd (__m128d a, __m128d b); ROUNDSD xmm, xmm/m128, imm8(10)
+ /// __m128d _mm_ceil_sd (__m128d a, __m128d b);
+ /// ROUNDSD xmm, xmm/m128, imm8(10)
/// </summary>
public static Vector128<double> CeilingScalar(Vector128<double> upper, Vector128<double> value) => CeilingScalar(upper, value);
/// <summary>
- /// __m128 _mm_ceil_ss (__m128 a, __m128 b); ROUNDSS xmm, xmm/m128, imm8(10)
+ /// __m128 _mm_ceil_ss (__m128 a, __m128 b);
+ /// ROUNDSS xmm, xmm/m128, imm8(10)
/// </summary>
public static Vector128<float> CeilingScalar(Vector128<float> upper, Vector128<float> value) => CeilingScalar(upper, value);
/// <summary>
- /// __m128i _mm_cmpeq_epi64 (__m128i a, __m128i b); PCMPEQQ xmm, xmm/m128
+ /// __m128i _mm_cmpeq_epi64 (__m128i a, __m128i b);
+ /// PCMPEQQ xmm, xmm/m128
/// </summary>
public static Vector128<long> CompareEqual(Vector128<long> left, Vector128<long> right) => CompareEqual(left, right);
/// <summary>
- /// __m128i _mm_cmpeq_epi64 (__m128i a, __m128i b); PCMPEQQ xmm, xmm/m128
+ /// __m128i _mm_cmpeq_epi64 (__m128i a, __m128i b);
+ /// PCMPEQQ xmm, xmm/m128
/// </summary>
public static Vector128<ulong> CompareEqual(Vector128<ulong> left, Vector128<ulong> right) => CompareEqual(left, right);
/// <summary>
- /// __m128i _mm_cvtepi8_epi16 (__m128i a); PMOVSXBW xmm, xmm/m64
+ /// __m128i _mm_cvtepi8_epi16 (__m128i a);
+ /// PMOVSXBW xmm, xmm/m64
/// </summary>
public static Vector128<short> ConvertToVector128Int16(Vector128<sbyte> value) => ConvertToVector128Int16(value);
/// <summary>
- /// __m128i _mm_cvtepu8_epi16 (__m128i a); PMOVZXBW xmm, xmm/m64
+ /// __m128i _mm_cvtepu8_epi16 (__m128i a);
+ /// PMOVZXBW xmm, xmm/m64
/// </summary>
public static Vector128<short> ConvertToVector128Int16(Vector128<byte> value) => ConvertToVector128Int16(value);
/// <summary>
- /// __m128i _mm_cvtepi8_epi32 (__m128i a); PMOVSXBD xmm, xmm/m32
+ /// __m128i _mm_cvtepi8_epi32 (__m128i a);
+ /// PMOVSXBD xmm, xmm/m32
/// </summary>
public static Vector128<int> ConvertToVector128Int32(Vector128<sbyte> value) => ConvertToVector128Int32(value);
/// <summary>
- /// __m128i _mm_cvtepu8_epi32 (__m128i a); PMOVZXBD xmm, xmm/m32
+ /// __m128i _mm_cvtepu8_epi32 (__m128i a);
+ /// PMOVZXBD xmm, xmm/m32
/// </summary>
public static Vector128<int> ConvertToVector128Int32(Vector128<byte> value) => ConvertToVector128Int32(value);
/// <summary>
- /// __m128i _mm_cvtepi16_epi32 (__m128i a); PMOVSXWD xmm, xmm/m64
+ /// __m128i _mm_cvtepi16_epi32 (__m128i a);
+ /// PMOVSXWD xmm, xmm/m64
/// </summary>
public static Vector128<int> ConvertToVector128Int32(Vector128<short> value) => ConvertToVector128Int32(value);
/// <summary>
- /// __m128i _mm_cvtepu16_epi32 (__m128i a); PMOVZXWD xmm, xmm/m64
+ /// __m128i _mm_cvtepu16_epi32 (__m128i a);
+ /// PMOVZXWD xmm, xmm/m64
/// </summary>
public static Vector128<int> ConvertToVector128Int32(Vector128<ushort> value) => ConvertToVector128Int32(value);
/// <summary>
- /// __m128i _mm_cvtepi8_epi64 (__m128i a); PMOVSXBQ xmm, xmm/m16
+ /// __m128i _mm_cvtepi8_epi64 (__m128i a);
+ /// PMOVSXBQ xmm, xmm/m16
/// </summary>
public static Vector128<long> ConvertToVector128Int64(Vector128<sbyte> value) => ConvertToVector128Int64(value);
/// <summary>
- /// __m128i _mm_cvtepu8_epi64 (__m128i a); PMOVZXBQ xmm, xmm/m16
+ /// __m128i _mm_cvtepu8_epi64 (__m128i a);
+ /// PMOVZXBQ xmm, xmm/m16
/// </summary>
public static Vector128<long> ConvertToVector128Int64(Vector128<byte> value) => ConvertToVector128Int64(value);
/// <summary>
- /// __m128i _mm_cvtepi16_epi64 (__m128i a); PMOVSXWQ xmm, xmm/m32
+ /// __m128i _mm_cvtepi16_epi64 (__m128i a);
+ /// PMOVSXWQ xmm, xmm/m32
/// </summary>
public static Vector128<long> ConvertToVector128Int64(Vector128<short> value) => ConvertToVector128Int64(value);
/// <summary>
- /// __m128i _mm_cvtepu16_epi64 (__m128i a); PMOVZXWQ xmm, xmm/m32
+ /// __m128i _mm_cvtepu16_epi64 (__m128i a);
+ /// PMOVZXWQ xmm, xmm/m32
/// </summary>
public static Vector128<long> ConvertToVector128Int64(Vector128<ushort> value) => ConvertToVector128Int64(value);
/// <summary>
- /// __m128i _mm_cvtepi32_epi64 (__m128i a); PMOVSXDQ xmm, xmm/m64
+ /// __m128i _mm_cvtepi32_epi64 (__m128i a);
+ /// PMOVSXDQ xmm, xmm/m64
/// </summary>
public static Vector128<long> ConvertToVector128Int64(Vector128<int> value) => ConvertToVector128Int64(value);
/// <summary>
- /// __m128i _mm_cvtepu32_epi64 (__m128i a); PMOVZXDQ xmm, xmm/m64
+ /// __m128i _mm_cvtepu32_epi64 (__m128i a);
+ /// PMOVZXDQ xmm, xmm/m64
/// </summary>
public static Vector128<long> ConvertToVector128Int64(Vector128<uint> value) => ConvertToVector128Int64(value);
/// <summary>
- /// __m128 _mm_dp_ps (__m128 a, __m128 b, const int imm8); DPPS xmm, xmm/m128, imm8
+ /// __m128 _mm_dp_ps (__m128 a, __m128 b, const int imm8);
+ /// DPPS xmm, xmm/m128, imm8
/// </summary>
public static Vector128<float> DotProduct(Vector128<float> left, Vector128<float> right, byte control) => DotProduct(left, right, control);
/// <summary>
- /// __m128d _mm_dp_pd (__m128d a, __m128d b, const int imm8); DPPD xmm, xmm/m128, imm8
+ /// __m128d _mm_dp_pd (__m128d a, __m128d b, const int imm8);
+ /// DPPD xmm, xmm/m128, imm8
/// </summary>
public static Vector128<double> DotProduct(Vector128<double> left, Vector128<double> right, byte control) => DotProduct(left, right, control);
/// <summary>
- /// int _mm_extract_epi8 (__m128i a, const int imm8); PEXTRB reg/m8, xmm, imm8
+ /// int _mm_extract_epi8 (__m128i a, const int imm8);
+ /// PEXTRB reg/m8, xmm, imm8
/// </summary>
public static sbyte Extract(Vector128<sbyte> value, byte index) => Extract(value, index);
/// <summary>
- /// int _mm_extract_epi8 (__m128i a, const int imm8); PEXTRB reg/m8, xmm, imm8
+ /// int _mm_extract_epi8 (__m128i a, const int imm8);
+ /// PEXTRB reg/m8, xmm, imm8
/// </summary>
public static byte Extract(Vector128<byte> value, byte index) => Extract(value, index);
/// <summary>
- /// int _mm_extract_epi32 (__m128i a, const int imm8); PEXTRD reg/m32, xmm, imm8
+ /// int _mm_extract_epi32 (__m128i a, const int imm8);
+ /// PEXTRD reg/m32, xmm, imm8
/// </summary>
public static int Extract(Vector128<int> value, byte index) => Extract(value, index);
/// <summary>
- /// int _mm_extract_epi32 (__m128i a, const int imm8); PEXTRD reg/m32, xmm, imm8
+ /// int _mm_extract_epi32 (__m128i a, const int imm8);
+ /// PEXTRD reg/m32, xmm, imm8
/// </summary>
public static uint Extract(Vector128<uint> value, byte index) => Extract(value, index);
/// <summary>
- /// __int64 _mm_extract_epi64 (__m128i a, const int imm8); PEXTRQ reg/m64, xmm, imm8
+ /// __int64 _mm_extract_epi64 (__m128i a, const int imm8);
+ /// PEXTRQ reg/m64, xmm, imm8
/// </summary>
public static long Extract(Vector128<long> value, byte index) => Extract(value, index);
/// <summary>
- /// __int64 _mm_extract_epi64 (__m128i a, const int imm8); PEXTRQ reg/m64, xmm, imm8
+ /// __int64 _mm_extract_epi64 (__m128i a, const int imm8);
+ /// PEXTRQ reg/m64, xmm, imm8
/// </summary>
public static ulong Extract(Vector128<ulong> value, byte index) => Extract(value, index);
/// <summary>
- /// int _mm_extract_ps (__m128 a, const int imm8); EXTRACTPS xmm, xmm/m32, imm8
+ /// int _mm_extract_ps (__m128 a, const int imm8);
+ /// EXTRACTPS xmm, xmm/m32, imm8
/// </summary>
public static float Extract(Vector128<float> value, byte index) => Extract(value, index);
/// <summary>
- /// __m128 _mm_floor_ps (__m128 a); ROUNDPS xmm, xmm/m128, imm8(9)
+ /// __m128 _mm_floor_ps (__m128 a);
+ /// ROUNDPS xmm, xmm/m128, imm8(9)
/// </summary>
public static Vector128<float> Floor(Vector128<float> value) => Floor(value);
/// <summary>
- /// __m128d _mm_floor_pd (__m128d a); ROUNDPD xmm, xmm/m128, imm8(9)
+ /// __m128d _mm_floor_pd (__m128d a);
+ /// ROUNDPD xmm, xmm/m128, imm8(9)
/// </summary>
public static Vector128<double> Floor(Vector128<double> value) => Floor(value);
/// <summary>
- /// __m128d _mm_floor_sd (__m128d a); ROUNDSD xmm, xmm/m128, imm8(9)
+ /// __m128d _mm_floor_sd (__m128d a);
+ /// ROUNDSD xmm, xmm/m128, imm8(9)
/// The above native signature does not exist. We provide this additional overload for the recommended use case of this intrinsic.
/// </summary>
public static Vector128<double> FloorScalar(Vector128<double> value) => FloorScalar(value);
/// <summary>
- /// __m128 _mm_floor_ss (__m128 a); ROUNDSS xmm, xmm/m128, imm8(9)
+ /// __m128 _mm_floor_ss (__m128 a);
+ /// ROUNDSS xmm, xmm/m128, imm8(9)
/// The above native signature does not exist. We provide this additional overload for the recommended use case of this intrinsic.
/// </summary>
public static Vector128<float> FloorScalar(Vector128<float> value) => FloorScalar(value);
/// <summary>
- /// __m128d _mm_floor_sd (__m128d a, __m128d b); ROUNDSD xmm, xmm/m128, imm8(9)
+ /// __m128d _mm_floor_sd (__m128d a, __m128d b);
+ /// ROUNDSD xmm, xmm/m128, imm8(9)
/// </summary>
public static Vector128<double> FloorScalar(Vector128<double> upper, Vector128<double> value) => FloorScalar(upper, value);
/// <summary>
- /// __m128 _mm_floor_ss (__m128 a, __m128 b); ROUNDSS xmm, xmm/m128, imm8(9)
+ /// __m128 _mm_floor_ss (__m128 a, __m128 b);
+ /// ROUNDSS xmm, xmm/m128, imm8(9)
/// </summary>
public static Vector128<float> FloorScalar(Vector128<float> upper, Vector128<float> value) => FloorScalar(upper, value);
/// <summary>
- /// __m128i _mm_insert_epi8 (__m128i a, int i, const int imm8); PINSRB xmm, reg/m8, imm8
+ /// __m128i _mm_insert_epi8 (__m128i a, int i, const int imm8);
+ /// PINSRB xmm, reg/m8, imm8
/// </summary>
public static Vector128<sbyte> Insert(Vector128<sbyte> value, sbyte data, byte index) => Insert(value, data, index);
/// <summary>
- /// __m128i _mm_insert_epi8 (__m128i a, int i, const int imm8); PINSRB xmm, reg/m8, imm8
+ /// __m128i _mm_insert_epi8 (__m128i a, int i, const int imm8);
+ /// PINSRB xmm, reg/m8, imm8
/// </summary>
public static Vector128<byte> Insert(Vector128<byte> value, byte data, byte index) => Insert(value, data, index);
/// <summary>
- /// __m128i _mm_insert_epi32 (__m128i a, int i, const int imm8); PINSRD xmm, reg/m32, imm8
+ /// __m128i _mm_insert_epi32 (__m128i a, int i, const int imm8);
+ /// PINSRD xmm, reg/m32, imm8
/// </summary>
public static Vector128<int> Insert(Vector128<int> value, int data, byte index) => Insert(value, data, index);
/// <summary>
- /// __m128i _mm_insert_epi32 (__m128i a, int i, const int imm8); PINSRD xmm, reg/m32, imm8
+ /// __m128i _mm_insert_epi32 (__m128i a, int i, const int imm8);
+ /// PINSRD xmm, reg/m32, imm8
/// </summary>
public static Vector128<uint> Insert(Vector128<uint> value, uint data, byte index) => Insert(value, data, index);
/// <summary>
- /// __m128i _mm_insert_epi64 (__m128i a, __int64 i, const int imm8); PINSRQ xmm, reg/m64, imm8
+ /// __m128i _mm_insert_epi64 (__m128i a, __int64 i, const int imm8);
+ /// PINSRQ xmm, reg/m64, imm8
/// </summary>
public static Vector128<long> Insert(Vector128<long> value, long data, byte index) => Insert(value, data, index);
/// <summary>
- /// __m128i _mm_insert_epi64 (__m128i a, __int64 i, const int imm8); PINSRQ xmm, reg/m64, imm8
+ /// __m128i _mm_insert_epi64 (__m128i a, __int64 i, const int imm8);
+ /// PINSRQ xmm, reg/m64, imm8
/// </summary>
public static Vector128<ulong> Insert(Vector128<ulong> value, ulong data, byte index) => Insert(value, data, index);
/// <summary>
- /// __m128 _mm_insert_ps (__m128 a, __m128 b, const int imm8); INSERTPS xmm, xmm/m32, imm8
+ /// __m128 _mm_insert_ps (__m128 a, __m128 b, const int imm8);
+ /// INSERTPS xmm, xmm/m32, imm8
/// </summary>
public static Vector128<float> Insert(Vector128<float> value, float data, byte index) => Insert(value, data, index);
/// <summary>
- /// __m128i _mm_max_epi8 (__m128i a, __m128i b); PMAXSB xmm, xmm/m128
+ /// __m128i _mm_max_epi8 (__m128i a, __m128i b);
+ /// PMAXSB xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> Max(Vector128<sbyte> left, Vector128<sbyte> right) => Max(left, right);
/// <summary>
- /// __m128i _mm_max_epu16 (__m128i a, __m128i b); PMAXUW xmm, xmm/m128
+ /// __m128i _mm_max_epu16 (__m128i a, __m128i b);
+ /// PMAXUW xmm, xmm/m128
/// </summary>
public static Vector128<ushort> Max(Vector128<ushort> left, Vector128<ushort> right) => Max(left, right);
/// <summary>
- /// __m128i _mm_max_epi32 (__m128i a, __m128i b); PMAXSD xmm, xmm/m128
+ /// __m128i _mm_max_epi32 (__m128i a, __m128i b);
+ /// PMAXSD xmm, xmm/m128
/// </summary>
public static Vector128<int> Max(Vector128<int> left, Vector128<int> right) => Max(left, right);
/// <summary>
- /// __m128i _mm_max_epu32 (__m128i a, __m128i b); PMAXUD xmm, xmm/m128
+ /// __m128i _mm_max_epu32 (__m128i a, __m128i b);
+ /// PMAXUD xmm, xmm/m128
/// </summary>
public static Vector128<uint> Max(Vector128<uint> left, Vector128<uint> right) => Max(left, right);
/// <summary>
- /// __m128i _mm_min_epi8 (__m128i a, __m128i b); PMINSB xmm, xmm/m128
+ /// __m128i _mm_min_epi8 (__m128i a, __m128i b);
+ /// PMINSB xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> Min(Vector128<sbyte> left, Vector128<sbyte> right) => Min(left, right);
/// <summary>
- /// __m128i _mm_min_epu16 (__m128i a, __m128i b); PMINUW xmm, xmm/m128
+ /// __m128i _mm_min_epu16 (__m128i a, __m128i b);
+ /// PMINUW xmm, xmm/m128
/// </summary>
public static Vector128<ushort> Min(Vector128<ushort> left, Vector128<ushort> right) => Min(left, right);
/// <summary>
- /// __m128i _mm_min_epi32 (__m128i a, __m128i b); PMINSD xmm, xmm/m128
+ /// __m128i _mm_min_epi32 (__m128i a, __m128i b);
+ /// PMINSD xmm, xmm/m128
/// </summary>
public static Vector128<int> Min(Vector128<int> left, Vector128<int> right) => Min(left, right);
/// <summary>
- /// __m128i _mm_min_epu32 (__m128i a, __m128i b); PMINUD xmm, xmm/m128
+ /// __m128i _mm_min_epu32 (__m128i a, __m128i b);
+ /// PMINUD xmm, xmm/m128
/// </summary>
public static Vector128<uint> Min(Vector128<uint> left, Vector128<uint> right) => Min(left, right);
/// <summary>
- /// __m128i _mm_minpos_epu16 (__m128i a); PHMINPOSUW xmm, xmm/m128
+ /// __m128i _mm_minpos_epu16 (__m128i a);
+ /// PHMINPOSUW xmm, xmm/m128
/// </summary>
public static Vector128<ushort> MinHorizontal(Vector128<ushort> value) => MinHorizontal(value);
/// <summary>
- /// __m128i _mm_mpsadbw_epu8 (__m128i a, __m128i b, const int imm8); MPSADBW xmm, xmm/m128, imm8
+ /// __m128i _mm_mpsadbw_epu8 (__m128i a, __m128i b, const int imm8);
+ /// MPSADBW xmm, xmm/m128, imm8
/// </summary>
public static Vector128<ushort> MultipleSumAbsoluteDifferences(Vector128<byte> left, Vector128<byte> right, byte mask) => MultipleSumAbsoluteDifferences(left, right, mask);
/// <summary>
- /// __m128i _mm_mul_epi32 (__m128i a, __m128i b); PMULDQ xmm, xmm/m128
+ /// __m128i _mm_mul_epi32 (__m128i a, __m128i b);
+ /// PMULDQ xmm, xmm/m128
/// </summary>
public static Vector128<long> Multiply(Vector128<int> left, Vector128<int> right) => Multiply(left, right);
/// <summary>
- /// __m128i _mm_mullo_epi32 (__m128i a, __m128i b); PMULLD xmm, xmm/m128
+ /// __m128i _mm_mullo_epi32 (__m128i a, __m128i b);
+ /// PMULLD xmm, xmm/m128
/// </summary>
public static Vector128<int> MultiplyLow(Vector128<int> left, Vector128<int> right) => MultiplyLow(left, right);
/// <summary>
- /// __m128i _mm_packus_epi32 (__m128i a, __m128i b); PACKUSDW xmm, xmm/m128
+ /// __m128i _mm_packus_epi32 (__m128i a, __m128i b);
+ /// PACKUSDW xmm, xmm/m128
/// </summary>
public static Vector128<ushort> PackUnsignedSaturate(Vector128<int> left, Vector128<int> right) => PackUnsignedSaturate(left, right);
/// <summary>
- /// __m128 _mm_round_ps (__m128 a, int rounding); ROUNDPS xmm, xmm/m128, imm8(8)
+ /// __m128 _mm_round_ps (__m128 a, int rounding);
+ /// ROUNDPS xmm, xmm/m128, imm8(8)
/// _MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC
/// </summary>
public static Vector128<float> RoundToNearestInteger(Vector128<float> value) => RoundToNearestInteger(value);
@@ -317,7 +381,8 @@ namespace System.Runtime.Intrinsics.X86
public static Vector128<float> RoundCurrentDirection(Vector128<float> value) => RoundCurrentDirection(value);
/// <summary>
- /// __m128d _mm_round_pd (__m128d a, int rounding); ROUNDPD xmm, xmm/m128, imm8(8)
+ /// __m128d _mm_round_pd (__m128d a, int rounding);
+ /// ROUNDPD xmm, xmm/m128, imm8(8)
/// _MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC
/// </summary>
public static Vector128<double> RoundToNearestInteger(Vector128<double> value) => RoundToNearestInteger(value);
@@ -339,134 +404,163 @@ namespace System.Runtime.Intrinsics.X86
public static Vector128<double> RoundCurrentDirection(Vector128<double> value) => RoundCurrentDirection(value);
/// <summary>
- /// __m128d _mm_round_sd (__m128d a, _MM_FROUND_CUR_DIRECTION); ROUNDSD xmm, xmm/m128, imm8(4)
+ /// __m128d _mm_round_sd (__m128d a, _MM_FROUND_CUR_DIRECTION);
+ /// ROUNDSD xmm, xmm/m128, imm8(4)
/// The above native signature does not exist. We provide this additional overload for the recommended use case of this intrinsic.
/// </summary>
public static Vector128<double> RoundCurrentDirectionScalar(Vector128<double> value) => RoundCurrentDirectionScalar(value);
/// <summary>
- /// __m128d _mm_round_sd (__m128d a, _MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC); ROUNDSD xmm, xmm/m128, imm8(8)
+ /// __m128d _mm_round_sd (__m128d a, _MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC);
+ /// ROUNDSD xmm, xmm/m128, imm8(8)
/// The above native signature does not exist. We provide this additional overload for the recommended use case of this intrinsic.
/// </summary>
public static Vector128<double> RoundToNearestIntegerScalar(Vector128<double> value) => RoundToNearestIntegerScalar(value);
/// <summary>
- /// __m128d _mm_round_sd (__m128d a, _MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC); ROUNDSD xmm, xmm/m128, imm8(9)
+ /// __m128d _mm_round_sd (__m128d a, _MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC);
+ /// ROUNDSD xmm, xmm/m128, imm8(9)
/// The above native signature does not exist. We provide this additional overload for the recommended use case of this intrinsic.
/// </summary>
public static Vector128<double> RoundToNegativeInfinityScalar(Vector128<double> value) => RoundToNegativeInfinityScalar(value);
/// <summary>
- /// __m128d _mm_round_sd (__m128d a, _MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC); ROUNDSD xmm, xmm/m128, imm8(10)
+ /// __m128d _mm_round_sd (__m128d a, _MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC);
+ /// ROUNDSD xmm, xmm/m128, imm8(10)
/// The above native signature does not exist. We provide this additional overload for the recommended use case of this intrinsic.
/// </summary>
public static Vector128<double> RoundToPositiveInfinityScalar(Vector128<double> value) => RoundToPositiveInfinityScalar(value);
/// <summary>
- /// __m128d _mm_round_sd (__m128d a, _MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC); ROUNDSD xmm, xmm/m128, imm8(11)
+ /// __m128d _mm_round_sd (__m128d a, _MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC);
+ /// ROUNDSD xmm, xmm/m128, imm8(11)
/// The above native signature does not exist. We provide this additional overload for the recommended use case of this intrinsic.
/// </summary>
public static Vector128<double> RoundToZeroScalar(Vector128<double> value) => RoundToZeroScalar(value);
/// <summary>
- /// __m128d _mm_round_sd (__m128d a, __m128d b, _MM_FROUND_CUR_DIRECTION); ROUNDSD xmm, xmm/m128, imm8(4)
+ /// __m128d _mm_round_sd (__m128d a, __m128d b, _MM_FROUND_CUR_DIRECTION);
+ /// ROUNDSD xmm, xmm/m128, imm8(4)
/// </summary>
public static Vector128<double> RoundCurrentDirectionScalar(Vector128<double> upper, Vector128<double> value) => RoundCurrentDirectionScalar(upper, value);
/// <summary>
- /// __m128d _mm_round_sd (__m128d a, __m128d b, _MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC); ROUNDSD xmm, xmm/m128, imm8(8)
+ /// __m128d _mm_round_sd (__m128d a, __m128d b, _MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC);
+ /// ROUNDSD xmm, xmm/m128, imm8(8)
/// </summary>
public static Vector128<double> RoundToNearestIntegerScalar(Vector128<double> upper, Vector128<double> value) => RoundToNearestIntegerScalar(upper, value);
/// <summary>
- /// __m128d _mm_round_sd (__m128d a, __m128d b, _MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC); ROUNDSD xmm, xmm/m128, imm8(9)
+ /// __m128d _mm_round_sd (__m128d a, __m128d b, _MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC);
+ /// ROUNDSD xmm, xmm/m128, imm8(9)
/// </summary>
public static Vector128<double> RoundToNegativeInfinityScalar(Vector128<double> upper, Vector128<double> value) => RoundToNegativeInfinityScalar(upper, value);
/// <summary>
- /// __m128d _mm_round_sd (__m128d a, __m128d b, _MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC); ROUNDSD xmm, xmm/m128, imm8(10)
+ /// __m128d _mm_round_sd (__m128d a, __m128d b, _MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC);
+ /// ROUNDSD xmm, xmm/m128, imm8(10)
/// </summary>
public static Vector128<double> RoundToPositiveInfinityScalar(Vector128<double> upper, Vector128<double> value) => RoundToPositiveInfinityScalar(upper, value);
/// <summary>
- /// __m128d _mm_round_sd (__m128d a, __m128d b, _MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC); ROUNDSD xmm, xmm/m128, imm8(11)
+ /// __m128d _mm_round_sd (__m128d a, __m128d b, _MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC);
+ /// ROUNDSD xmm, xmm/m128, imm8(11)
/// </summary>
public static Vector128<double> RoundToZeroScalar(Vector128<double> upper, Vector128<double> value) => RoundToZeroScalar(upper, value);
/// <summary>
- /// __m128 _mm_round_ss (__m128 a, _MM_FROUND_CUR_DIRECTION); ROUNDSS xmm, xmm/m128, imm8(4)
+ /// __m128 _mm_round_ss (__m128 a, _MM_FROUND_CUR_DIRECTION);
+ /// ROUNDSS xmm, xmm/m128, imm8(4)
/// The above native signature does not exist. We provide this additional overload for the recommended use case of this intrinsic.
/// </summary>
public static Vector128<float> RoundCurrentDirectionScalar(Vector128<float> value) => RoundCurrentDirectionScalar(value);
/// <summary>
- /// __m128 _mm_round_ss (__m128 a, _MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC); ROUNDSS xmm, xmm/m128, imm8(8)
+ /// __m128 _mm_round_ss (__m128 a, _MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC);
+ /// ROUNDSS xmm, xmm/m128, imm8(8)
/// The above native signature does not exist. We provide this additional overload for the recommended use case of this intrinsic.
/// </summary>
public static Vector128<float> RoundToNearestIntegerScalar(Vector128<float> value) => RoundToNearestIntegerScalar(value);
/// <summary>
- /// __m128 _mm_round_ss (__m128 a, _MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC); ROUNDSS xmm, xmm/m128, imm8(9)
+ /// __m128 _mm_round_ss (__m128 a, _MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC);
+ /// ROUNDSS xmm, xmm/m128, imm8(9)
/// The above native signature does not exist. We provide this additional overload for the recommended use case of this intrinsic.
/// </summary>
public static Vector128<float> RoundToNegativeInfinityScalar(Vector128<float> value) => RoundToNegativeInfinityScalar(value);
/// <summary>
- /// __m128 _mm_round_ss (__m128 a, _MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC); ROUNDSS xmm, xmm/m128, imm8(10)
+ /// __m128 _mm_round_ss (__m128 a, _MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC);
+ /// ROUNDSS xmm, xmm/m128, imm8(10)
/// The above native signature does not exist. We provide this additional overload for the recommended use case of this intrinsic.
/// </summary>
public static Vector128<float> RoundToPositiveInfinityScalar(Vector128<float> value) => RoundToPositiveInfinityScalar(value);
/// <summary>
- /// __m128 _mm_round_ss (__m128 a, _MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC); ROUNDSS xmm, xmm/m128, imm8(11)
+ /// __m128 _mm_round_ss (__m128 a, _MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC);
+ /// ROUNDSS xmm, xmm/m128, imm8(11)
/// The above native signature does not exist. We provide this additional overload for the recommended use case of this intrinsic.
/// </summary>
public static Vector128<float> RoundToZeroScalar(Vector128<float> value) => RoundToZeroScalar(value);
/// <summary>
- /// __m128 _mm_round_ss (__m128 a, __m128 b, _MM_FROUND_CUR_DIRECTION); ROUNDSS xmm, xmm/m128, imm8(4)
+ /// __m128 _mm_round_ss (__m128 a, __m128 b, _MM_FROUND_CUR_DIRECTION);
+ /// ROUNDSS xmm, xmm/m128, imm8(4)
/// </summary>
public static Vector128<float> RoundCurrentDirectionScalar(Vector128<float> upper, Vector128<float> value) => RoundCurrentDirectionScalar(upper, value);
/// <summary>
- /// __m128 _mm_round_ss (__m128 a, __m128 b, _MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC); ROUNDSS xmm, xmm/m128, imm8(8)
+ /// __m128 _mm_round_ss (__m128 a, __m128 b, _MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC);
+ /// ROUNDSS xmm, xmm/m128, imm8(8)
/// </summary>
public static Vector128<float> RoundToNearestIntegerScalar(Vector128<float> upper, Vector128<float> value) => RoundToNearestIntegerScalar(upper, value);
/// <summary>
- /// __m128 _mm_round_ss (__m128 a, __m128 b, _MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC); ROUNDSS xmm, xmm/m128, imm8(9)
+ /// __m128 _mm_round_ss (__m128 a, __m128 b, _MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC);
+ /// ROUNDSS xmm, xmm/m128, imm8(9)
/// </summary>
public static Vector128<float> RoundToNegativeInfinityScalar(Vector128<float> upper, Vector128<float> value) => RoundToNegativeInfinityScalar(upper, value);
/// <summary>
- /// __m128 _mm_round_ss (__m128 a, __m128 b, _MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC); ROUNDSS xmm, xmm/m128, imm8(10)
+ /// __m128 _mm_round_ss (__m128 a, __m128 b, _MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC);
+ /// ROUNDSS xmm, xmm/m128, imm8(10)
/// </summary>
public static Vector128<float> RoundToPositiveInfinityScalar(Vector128<float> upper, Vector128<float> value) => RoundToPositiveInfinityScalar(upper, value);
/// <summary>
- /// __m128 _mm_round_ss (__m128 a, __m128 b, _MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC); ROUNDSS xmm, xmm/m128, imm8(11)
+ /// __m128 _mm_round_ss (__m128 a, __m128 b, _MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC);
+ /// ROUNDSS xmm, xmm/m128, imm8(11)
/// </summary>
public static Vector128<float> RoundToZeroScalar(Vector128<float> upper, Vector128<float> value) => RoundToZeroScalar(upper, value);
/// <summary>
- /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr); MOVNTDQA xmm, m128
+ /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr);
+ /// MOVNTDQA xmm, m128
/// </summary>
public static unsafe Vector128<sbyte> LoadAlignedVector128NonTemporal(sbyte* address) => LoadAlignedVector128NonTemporal(address);
/// <summary>
- /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr); MOVNTDQA xmm, m128
+ /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr);
+ /// MOVNTDQA xmm, m128
/// </summary>
public static unsafe Vector128<byte> LoadAlignedVector128NonTemporal(byte* address) => LoadAlignedVector128NonTemporal(address);
/// <summary>
- /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr); MOVNTDQA xmm, m128
+ /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr);
+ /// MOVNTDQA xmm, m128
/// </summary>
public static unsafe Vector128<short> LoadAlignedVector128NonTemporal(short* address) => LoadAlignedVector128NonTemporal(address);
/// <summary>
- /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr); MOVNTDQA xmm, m128
+ /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr);
+ /// MOVNTDQA xmm, m128
/// </summary>
public static unsafe Vector128<ushort> LoadAlignedVector128NonTemporal(ushort* address) => LoadAlignedVector128NonTemporal(address);
/// <summary>
- /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr); MOVNTDQA xmm, m128
+ /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr);
+ /// MOVNTDQA xmm, m128
/// </summary>
public static unsafe Vector128<int> LoadAlignedVector128NonTemporal(int* address) => LoadAlignedVector128NonTemporal(address);
/// <summary>
- /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr); MOVNTDQA xmm, m128
+ /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr);
+ /// MOVNTDQA xmm, m128
/// </summary>
public static unsafe Vector128<uint> LoadAlignedVector128NonTemporal(uint* address) => LoadAlignedVector128NonTemporal(address);
/// <summary>
- /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr); MOVNTDQA xmm, m128
+ /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr);
+ /// MOVNTDQA xmm, m128
/// </summary>
public static unsafe Vector128<long> LoadAlignedVector128NonTemporal(long* address) => LoadAlignedVector128NonTemporal(address);
/// <summary>
- /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr); MOVNTDQA xmm, m128
+ /// __m128i _mm_stream_load_si128 (const __m128i* mem_addr);
+ /// MOVNTDQA xmm, m128
/// </summary>
public static unsafe Vector128<ulong> LoadAlignedVector128NonTemporal(ulong* address) => LoadAlignedVector128NonTemporal(address);
/// <summary>
- /// int _mm_test_all_ones (__m128i a); HELPER
+ /// int _mm_test_all_ones (__m128i a);
+ /// HELPER
/// </summary>
public static bool TestAllOnes(Vector128<sbyte> value) => TestAllOnes(value);
public static bool TestAllOnes(Vector128<byte> value) => TestAllOnes(value);
@@ -478,7 +572,8 @@ namespace System.Runtime.Intrinsics.X86
public static bool TestAllOnes(Vector128<ulong> value) => TestAllOnes(value);
/// <summary>
- /// int _mm_test_all_zeros (__m128i a, __m128i mask); PTEST xmm, xmm/m128
+ /// int _mm_test_all_zeros (__m128i a, __m128i mask);
+ /// PTEST xmm, xmm/m128
/// </summary>
public static bool TestAllZeros(Vector128<sbyte> left, Vector128<sbyte> right) => TestAllZeros(left, right);
public static bool TestAllZeros(Vector128<byte> left, Vector128<byte> right) => TestAllZeros(left, right);
@@ -490,7 +585,8 @@ namespace System.Runtime.Intrinsics.X86
public static bool TestAllZeros(Vector128<ulong> left, Vector128<ulong> right) => TestAllZeros(left, right);
/// <summary>
- /// int _mm_testc_si128 (__m128i a, __m128i b); PTEST xmm, xmm/m128
+ /// int _mm_testc_si128 (__m128i a, __m128i b);
+ /// PTEST xmm, xmm/m128
/// </summary>
public static bool TestC(Vector128<sbyte> left, Vector128<sbyte> right) => TestC(left, right);
public static bool TestC(Vector128<byte> left, Vector128<byte> right) => TestC(left, right);
@@ -502,7 +598,8 @@ namespace System.Runtime.Intrinsics.X86
public static bool TestC(Vector128<ulong> left, Vector128<ulong> right) => TestC(left, right);
/// <summary>
- /// int _mm_test_mix_ones_zeros (__m128i a, __m128i mask); PTEST xmm, xmm/m128
+ /// int _mm_test_mix_ones_zeros (__m128i a, __m128i mask);
+ /// PTEST xmm, xmm/m128
/// </summary>
public static bool TestMixOnesZeros(Vector128<sbyte> left, Vector128<sbyte> right) => TestMixOnesZeros(left, right);
public static bool TestMixOnesZeros(Vector128<byte> left, Vector128<byte> right) => TestMixOnesZeros(left, right);
@@ -514,7 +611,8 @@ namespace System.Runtime.Intrinsics.X86
public static bool TestMixOnesZeros(Vector128<ulong> left, Vector128<ulong> right) => TestMixOnesZeros(left, right);
/// <summary>
- /// int _mm_testnzc_si128 (__m128i a, __m128i b); PTEST xmm, xmm/m128
+ /// int _mm_testnzc_si128 (__m128i a, __m128i b);
+ /// PTEST xmm, xmm/m128
/// </summary>
public static bool TestNotZAndNotC(Vector128<sbyte> left, Vector128<sbyte> right) => TestNotZAndNotC(left, right);
public static bool TestNotZAndNotC(Vector128<byte> left, Vector128<byte> right) => TestNotZAndNotC(left, right);
@@ -526,7 +624,8 @@ namespace System.Runtime.Intrinsics.X86
public static bool TestNotZAndNotC(Vector128<ulong> left, Vector128<ulong> right) => TestNotZAndNotC(left, right);
/// <summary>
- /// int _mm_testz_si128 (__m128i a, __m128i b); PTEST xmm, xmm/m128
+ /// int _mm_testz_si128 (__m128i a, __m128i b);
+ /// PTEST xmm, xmm/m128
/// </summary>
public static bool TestZ(Vector128<sbyte> left, Vector128<sbyte> right) => TestZ(left, right);
public static bool TestZ(Vector128<byte> left, Vector128<byte> right) => TestZ(left, right);
diff --git a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse42.PlatformNotSupported.cs b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse42.PlatformNotSupported.cs
index 887dca9b01..7dbb2954e4 100644
--- a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse42.PlatformNotSupported.cs
+++ b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse42.PlatformNotSupported.cs
@@ -16,217 +16,286 @@ namespace System.Runtime.Intrinsics.X86
public static bool IsSupported { get { return false; } }
/// <summary>
- /// int _mm_cmpistra (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistrc (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistro (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistrs (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistrz (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistra (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistrc (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistro (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistrs (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistrz (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
/// </summary>
public static bool CompareImplicitLength(Vector128<sbyte> left, Vector128<sbyte> right, ResultsFlag flag, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_cmpistra (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistrc (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistro (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistrs (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistrz (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistra (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistrc (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistro (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistrs (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistrz (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
/// </summary>
public static bool CompareImplicitLength(Vector128<byte> left, Vector128<byte> right, ResultsFlag flag, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_cmpistra (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistrc (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistro (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistrs (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistrz (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistra (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistrc (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistro (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistrs (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistrz (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
/// </summary>
public static bool CompareImplicitLength(Vector128<short> left, Vector128<short> right, ResultsFlag flag, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_cmpistra (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistrc (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistro (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistrs (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistrz (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistra (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistrc (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistro (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistrs (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistrz (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
/// </summary>
public static bool CompareImplicitLength(Vector128<ushort> left, Vector128<ushort> right, ResultsFlag flag, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_cmpestra (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestrc (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestro (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestrs (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestrz (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestra (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestrc (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestro (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestrs (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestrz (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
/// </summary>
public static bool CompareExplicitLength(Vector128<sbyte> left, byte leftLength, Vector128<sbyte> right, byte rightLength, ResultsFlag flag, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_cmpestra (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestrc (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestro (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestrs (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestrz (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestra (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestrc (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestro (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestrs (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestrz (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
/// </summary>
public static bool CompareExplicitLength(Vector128<byte> left, byte leftLength, Vector128<byte> right, byte rightLength, ResultsFlag flag, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_cmpestra (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestrc (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestro (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestrs (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestrz (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestra (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestrc (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestro (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestrs (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestrz (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
/// </summary>
public static bool CompareExplicitLength(Vector128<short> left, byte leftLength, Vector128<short> right, byte rightLength, ResultsFlag flag, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_cmpestra (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestrc (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestro (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestrs (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestrz (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestra (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestrc (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestro (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestrs (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestrz (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
/// </summary>
public static bool CompareExplicitLength(Vector128<ushort> left, byte leftLength, Vector128<ushort> right, byte rightLength, ResultsFlag flag, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_cmpistri (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistri (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
/// </summary>
public static int CompareImplicitLengthIndex(Vector128<sbyte> left, Vector128<sbyte> right, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_cmpistri (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistri (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
/// </summary>
public static int CompareImplicitLengthIndex(Vector128<byte> left, Vector128<byte> right, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_cmpistri (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistri (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
/// </summary>
public static int CompareImplicitLengthIndex(Vector128<short> left, Vector128<short> right, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_cmpistri (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistri (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
/// </summary>
public static int CompareImplicitLengthIndex(Vector128<ushort> left, Vector128<ushort> right, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_cmpestri (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestri (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
/// </summary>
public static int CompareExplicitLengthIndex(Vector128<sbyte> left, byte leftLength, Vector128<sbyte> right, byte rightLength, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_cmpestri (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestri (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
/// </summary>
public static int CompareExplicitLengthIndex(Vector128<byte> left, byte leftLength, Vector128<byte> right, byte rightLength, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_cmpestri (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestri (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
/// </summary>
public static int CompareExplicitLengthIndex(Vector128<short> left, byte leftLength, Vector128<short> right, byte rightLength, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// int _mm_cmpestri (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestri (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
/// </summary>
public static int CompareExplicitLengthIndex(Vector128<ushort> left, byte leftLength, Vector128<ushort> right, byte rightLength, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8); PCMPISTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<ushort> CompareImplicitLengthBitMask(Vector128<sbyte> left, Vector128<sbyte> right, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8); PCMPISTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<ushort> CompareImplicitLengthBitMask(Vector128<byte> left, Vector128<byte> right, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8); PCMPISTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<byte> CompareImplicitLengthBitMask(Vector128<short> left, Vector128<short> right, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8); PCMPISTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<byte> CompareImplicitLengthBitMask(Vector128<ushort> left, Vector128<ushort> right, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8); PCMPISTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<byte> CompareImplicitLengthUnitMask(Vector128<sbyte> left, Vector128<sbyte> right, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8); PCMPISTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<byte> CompareImplicitLengthUnitMask(Vector128<byte> left, Vector128<byte> right, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8); PCMPISTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<ushort> CompareImplicitLengthUnitMask(Vector128<short> left, Vector128<short> right, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8); PCMPISTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<ushort> CompareImplicitLengthUnitMask(Vector128<ushort> left, Vector128<ushort> right, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<ushort> CompareExplicitLengthBitMask(Vector128<sbyte> left, byte leftLength, Vector128<sbyte> right, byte rightLength, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<ushort> CompareExplicitLengthBitMask(Vector128<byte> left, byte leftLength, Vector128<byte> right, byte rightLength, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<byte> CompareExplicitLengthBitMask(Vector128<short> left, byte leftLength, Vector128<short> right, byte rightLength, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<byte> CompareExplicitLengthBitMask(Vector128<ushort> left, byte leftLength, Vector128<ushort> right, byte rightLength, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<byte> CompareExplicitLengthUnitMask(Vector128<sbyte> left, byte leftLength, Vector128<sbyte> right, byte rightLength, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<byte> CompareExplicitLengthUnitMask(Vector128<byte> left, byte leftLength, Vector128<byte> right, byte rightLength, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<ushort> CompareExplicitLengthUnitMask(Vector128<short> left, byte leftLength, Vector128<short> right, byte rightLength, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<ushort> CompareExplicitLengthUnitMask(Vector128<ushort> left, byte leftLength, Vector128<ushort> right, byte rightLength, StringComparisonMode mode) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_cmpgt_epi64 (__m128i a, __m128i b); PCMPGTQ xmm, xmm/m128
+ /// __m128i _mm_cmpgt_epi64 (__m128i a, __m128i b);
+ /// PCMPGTQ xmm, xmm/m128
/// </summary>
public static Vector128<long> CompareGreaterThan(Vector128<long> left, Vector128<long> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// unsigned int _mm_crc32_u8 (unsigned int crc, unsigned char v); CRC32 reg, reg/m8
+ /// unsigned int _mm_crc32_u8 (unsigned int crc, unsigned char v);
+ /// CRC32 reg, reg/m8
/// </summary>
public static uint Crc32(uint crc, byte data) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// unsigned int _mm_crc32_u16 (unsigned int crc, unsigned short v); CRC32 reg, reg/m16
+ /// unsigned int _mm_crc32_u16 (unsigned int crc, unsigned short v);
+ /// CRC32 reg, reg/m16
/// </summary>
public static uint Crc32(uint crc, ushort data) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// unsigned int _mm_crc32_u32 (unsigned int crc, unsigned int v); CRC32 reg, reg/m32
+ /// unsigned int _mm_crc32_u32 (unsigned int crc, unsigned int v);
+ /// CRC32 reg, reg/m32
/// </summary>
public static uint Crc32(uint crc, uint data) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// unsigned __int64 _mm_crc32_u64 (unsigned __int64 crc, unsigned __int64 v); CRC32 reg, reg/m64
+ /// unsigned __int64 _mm_crc32_u64 (unsigned __int64 crc, unsigned __int64 v);
+ /// CRC32 reg, reg/m64
/// </summary>
public static ulong Crc32(ulong crc, ulong data) { throw new PlatformNotSupportedException(); }
}
diff --git a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse42.cs b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse42.cs
index 5620d239f7..1b626184e0 100644
--- a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse42.cs
+++ b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Sse42.cs
@@ -16,217 +16,286 @@ namespace System.Runtime.Intrinsics.X86
public static bool IsSupported { get => IsSupported; }
/// <summary>
- /// int _mm_cmpistra (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistrc (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistro (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistrs (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistrz (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistra (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistrc (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistro (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistrs (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistrz (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
/// </summary>
public static bool CompareImplicitLength(Vector128<sbyte> left, Vector128<sbyte> right, ResultsFlag flag, StringComparisonMode mode) => CompareImplicitLength(left, right, flag, mode);
/// <summary>
- /// int _mm_cmpistra (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistrc (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistro (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistrs (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistrz (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistra (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistrc (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistro (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistrs (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistrz (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
/// </summary>
public static bool CompareImplicitLength(Vector128<byte> left, Vector128<byte> right, ResultsFlag flag, StringComparisonMode mode) => CompareImplicitLength(left, right, flag, mode);
/// <summary>
- /// int _mm_cmpistra (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistrc (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistro (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistrs (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistrz (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistra (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistrc (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistro (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistrs (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistrz (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
/// </summary>
public static bool CompareImplicitLength(Vector128<short> left, Vector128<short> right, ResultsFlag flag, StringComparisonMode mode) => CompareImplicitLength(left, right, flag, mode);
/// <summary>
- /// int _mm_cmpistra (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistrc (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistro (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistrs (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
- /// int _mm_cmpistrz (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistra (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistrc (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistro (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistrs (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistrz (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
/// </summary>
public static bool CompareImplicitLength(Vector128<ushort> left, Vector128<ushort> right, ResultsFlag flag, StringComparisonMode mode) => CompareImplicitLength(left, right, flag, mode);
/// <summary>
- /// int _mm_cmpestra (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestrc (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestro (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestrs (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestrz (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestra (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestrc (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestro (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestrs (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestrz (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
/// </summary>
public static bool CompareExplicitLength(Vector128<sbyte> left, byte leftLength, Vector128<sbyte> right, byte rightLength, ResultsFlag flag, StringComparisonMode mode) => CompareExplicitLength(left, leftLength, right, rightLength, flag, mode);
/// <summary>
- /// int _mm_cmpestra (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestrc (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestro (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestrs (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestrz (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestra (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestrc (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestro (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestrs (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestrz (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
/// </summary>
public static bool CompareExplicitLength(Vector128<byte> left, byte leftLength, Vector128<byte> right, byte rightLength, ResultsFlag flag, StringComparisonMode mode) => CompareExplicitLength(left, leftLength, right, rightLength, flag, mode);
/// <summary>
- /// int _mm_cmpestra (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestrc (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestro (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestrs (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestrz (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestra (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestrc (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestro (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestrs (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestrz (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
/// </summary>
public static bool CompareExplicitLength(Vector128<short> left, byte leftLength, Vector128<short> right, byte rightLength, ResultsFlag flag, StringComparisonMode mode) => CompareExplicitLength(left, leftLength, right, rightLength, flag, mode);
/// <summary>
- /// int _mm_cmpestra (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestrc (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestro (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestrs (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
- /// int _mm_cmpestrz (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestra (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestrc (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestro (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestrs (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestrz (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
/// </summary>
public static bool CompareExplicitLength(Vector128<ushort> left, byte leftLength, Vector128<ushort> right, byte rightLength, ResultsFlag flag, StringComparisonMode mode) => CompareExplicitLength(left, leftLength, right, rightLength, flag, mode);
/// <summary>
- /// int _mm_cmpistri (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistri (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
/// </summary>
public static int CompareImplicitLengthIndex(Vector128<sbyte> left, Vector128<sbyte> right, StringComparisonMode mode) => CompareImplicitLengthIndex(left, right, mode);
/// <summary>
- /// int _mm_cmpistri (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistri (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
/// </summary>
public static int CompareImplicitLengthIndex(Vector128<byte> left, Vector128<byte> right, StringComparisonMode mode) => CompareImplicitLengthIndex(left, right, mode);
/// <summary>
- /// int _mm_cmpistri (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistri (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
/// </summary>
public static int CompareImplicitLengthIndex(Vector128<short> left, Vector128<short> right, StringComparisonMode mode) => CompareImplicitLengthIndex(left, right, mode);
/// <summary>
- /// int _mm_cmpistri (__m128i a, __m128i b, const int imm8); PCMPISTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpistri (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRI xmm, xmm/m128, imm8
/// </summary>
public static int CompareImplicitLengthIndex(Vector128<ushort> left, Vector128<ushort> right, StringComparisonMode mode) => CompareImplicitLengthIndex(left, right, mode);
/// <summary>
- /// int _mm_cmpestri (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestri (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
/// </summary>
public static int CompareExplicitLengthIndex(Vector128<sbyte> left, byte leftLength, Vector128<sbyte> right, byte rightLength, StringComparisonMode mode) => CompareExplicitLengthIndex(left, leftLength, right, rightLength, mode);
/// <summary>
- /// int _mm_cmpestri (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestri (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
/// </summary>
public static int CompareExplicitLengthIndex(Vector128<byte> left, byte leftLength, Vector128<byte> right, byte rightLength, StringComparisonMode mode) => CompareExplicitLengthIndex(left, leftLength, right, rightLength, mode);
/// <summary>
- /// int _mm_cmpestri (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestri (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
/// </summary>
public static int CompareExplicitLengthIndex(Vector128<short> left, byte leftLength, Vector128<short> right, byte rightLength, StringComparisonMode mode) => CompareExplicitLengthIndex(left, leftLength, right, rightLength, mode);
/// <summary>
- /// int _mm_cmpestri (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRI xmm, xmm/m128, imm8
+ /// int _mm_cmpestri (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRI xmm, xmm/m128, imm8
/// </summary>
public static int CompareExplicitLengthIndex(Vector128<ushort> left, byte leftLength, Vector128<ushort> right, byte rightLength, StringComparisonMode mode) => CompareExplicitLengthIndex(left, leftLength, right, rightLength, mode);
/// <summary>
- /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8); PCMPISTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<ushort> CompareImplicitLengthBitMask(Vector128<sbyte> left, Vector128<sbyte> right, StringComparisonMode mode) => CompareImplicitLengthBitMask(left, right, mode);
/// <summary>
- /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8); PCMPISTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<ushort> CompareImplicitLengthBitMask(Vector128<byte> left, Vector128<byte> right, StringComparisonMode mode) => CompareImplicitLengthBitMask(left, right, mode);
/// <summary>
- /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8); PCMPISTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<byte> CompareImplicitLengthBitMask(Vector128<short> left, Vector128<short> right, StringComparisonMode mode) => CompareImplicitLengthBitMask(left, right, mode);
/// <summary>
- /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8); PCMPISTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<byte> CompareImplicitLengthBitMask(Vector128<ushort> left, Vector128<ushort> right, StringComparisonMode mode) => CompareImplicitLengthBitMask(left, right, mode);
/// <summary>
- /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8); PCMPISTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<byte> CompareImplicitLengthUnitMask(Vector128<sbyte> left, Vector128<sbyte> right, StringComparisonMode mode) => CompareImplicitLengthUnitMask(left, right, mode);
/// <summary>
- /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8); PCMPISTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<byte> CompareImplicitLengthUnitMask(Vector128<byte> left, Vector128<byte> right, StringComparisonMode mode) => CompareImplicitLengthUnitMask(left, right, mode);
/// <summary>
- /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8); PCMPISTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<ushort> CompareImplicitLengthUnitMask(Vector128<short> left, Vector128<short> right, StringComparisonMode mode) => CompareImplicitLengthUnitMask(left, right, mode);
/// <summary>
- /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8); PCMPISTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpistrm (__m128i a, __m128i b, const int imm8);
+ /// PCMPISTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<ushort> CompareImplicitLengthUnitMask(Vector128<ushort> left, Vector128<ushort> right, StringComparisonMode mode) => CompareImplicitLengthUnitMask(left, right, mode);
/// <summary>
- /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<ushort> CompareExplicitLengthBitMask(Vector128<sbyte> left, byte leftLength, Vector128<sbyte> right, byte rightLength, StringComparisonMode mode) => CompareExplicitLengthBitMask(left, leftLength, right, rightLength, mode);
/// <summary>
- /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<ushort> CompareExplicitLengthBitMask(Vector128<byte> left, byte leftLength, Vector128<byte> right, byte rightLength, StringComparisonMode mode) => CompareExplicitLengthBitMask(left, leftLength, right, rightLength, mode);
/// <summary>
- /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<byte> CompareExplicitLengthBitMask(Vector128<short> left, byte leftLength, Vector128<short> right, byte rightLength, StringComparisonMode mode) => CompareExplicitLengthBitMask(left, leftLength, right, rightLength, mode);
/// <summary>
- /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<byte> CompareExplicitLengthBitMask(Vector128<ushort> left, byte leftLength, Vector128<ushort> right, byte rightLength, StringComparisonMode mode) => CompareExplicitLengthBitMask(left, leftLength, right, rightLength, mode);
/// <summary>
- /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<byte> CompareExplicitLengthUnitMask(Vector128<sbyte> left, byte leftLength, Vector128<sbyte> right, byte rightLength, StringComparisonMode mode) => CompareExplicitLengthUnitMask(left, leftLength, right, rightLength, mode);
/// <summary>
- /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<byte> CompareExplicitLengthUnitMask(Vector128<byte> left, byte leftLength, Vector128<byte> right, byte rightLength, StringComparisonMode mode) => CompareExplicitLengthUnitMask(left, leftLength, right, rightLength, mode);
/// <summary>
- /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<ushort> CompareExplicitLengthUnitMask(Vector128<short> left, byte leftLength, Vector128<short> right, byte rightLength, StringComparisonMode mode) => CompareExplicitLengthUnitMask(left, leftLength, right, rightLength, mode);
/// <summary>
- /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8); PCMPESTRM xmm, xmm/m128, imm8
+ /// __m128i _mm_cmpestrm (__m128i a, int la, __m128i b, int lb, const int imm8);
+ /// PCMPESTRM xmm, xmm/m128, imm8
/// </summary>
public static Vector128<ushort> CompareExplicitLengthUnitMask(Vector128<ushort> left, byte leftLength, Vector128<ushort> right, byte rightLength, StringComparisonMode mode) => CompareExplicitLengthUnitMask(left, leftLength, right, rightLength, mode);
/// <summary>
- /// __m128i _mm_cmpgt_epi64 (__m128i a, __m128i b); PCMPGTQ xmm, xmm/m128
+ /// __m128i _mm_cmpgt_epi64 (__m128i a, __m128i b);
+ /// PCMPGTQ xmm, xmm/m128
/// </summary>
public static Vector128<long> CompareGreaterThan(Vector128<long> left, Vector128<long> right) => CompareGreaterThan(left, right);
/// <summary>
- /// unsigned int _mm_crc32_u8 (unsigned int crc, unsigned char v); CRC32 reg, reg/m8
+ /// unsigned int _mm_crc32_u8 (unsigned int crc, unsigned char v);
+ /// CRC32 reg, reg/m8
/// </summary>
public static uint Crc32(uint crc, byte data) => Crc32(crc, data);
/// <summary>
- /// unsigned int _mm_crc32_u16 (unsigned int crc, unsigned short v); CRC32 reg, reg/m16
+ /// unsigned int _mm_crc32_u16 (unsigned int crc, unsigned short v);
+ /// CRC32 reg, reg/m16
/// </summary>
public static uint Crc32(uint crc, ushort data) => Crc32(crc, data);
/// <summary>
- /// unsigned int _mm_crc32_u32 (unsigned int crc, unsigned int v); CRC32 reg, reg/m32
+ /// unsigned int _mm_crc32_u32 (unsigned int crc, unsigned int v);
+ /// CRC32 reg, reg/m32
/// </summary>
public static uint Crc32(uint crc, uint data) => Crc32(crc, data);
/// <summary>
- /// unsigned __int64 _mm_crc32_u64 (unsigned __int64 crc, unsigned __int64 v); CRC32 reg, reg/m64
+ /// unsigned __int64 _mm_crc32_u64 (unsigned __int64 crc, unsigned __int64 v);
+ /// CRC32 reg, reg/m64
/// </summary>
public static ulong Crc32(ulong crc, ulong data) => Crc32(crc, data);
}
diff --git a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Ssse3.PlatformNotSupported.cs b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Ssse3.PlatformNotSupported.cs
index 5f82815ba1..0f492c8eb2 100644
--- a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Ssse3.PlatformNotSupported.cs
+++ b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Ssse3.PlatformNotSupported.cs
@@ -16,76 +16,92 @@ namespace System.Runtime.Intrinsics.X86
public static bool IsSupported { get { return false; } }
/// <summary>
- /// __m128i _mm_abs_epi8 (__m128i a); PABSB xmm, xmm/m128
+ /// __m128i _mm_abs_epi8 (__m128i a);
+ /// PABSB xmm, xmm/m128
/// </summary>
public static Vector128<byte> Abs(Vector128<sbyte> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_abs_epi16 (__m128i a); PABSW xmm, xmm/m128
+ /// __m128i _mm_abs_epi16 (__m128i a);
+ /// PABSW xmm, xmm/m128
/// </summary>
public static Vector128<ushort> Abs(Vector128<short> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_abs_epi32 (__m128i a); PABSD xmm, xmm/m128
+ /// __m128i _mm_abs_epi32 (__m128i a);
+ /// PABSD xmm, xmm/m128
/// </summary>
public static Vector128<uint> Abs(Vector128<int> value) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_alignr_epi8 (__m128i a, __m128i b, int count); PALIGNR xmm, xmm/m128, imm8
+ /// __m128i _mm_alignr_epi8 (__m128i a, __m128i b, int count);
+ /// PALIGNR xmm, xmm/m128, imm8
/// </summary>
public static Vector128<sbyte> AlignRight(Vector128<sbyte> left, Vector128<sbyte> right, byte mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_hadd_epi16 (__m128i a, __m128i b); PHADDW xmm, xmm/m128
+ /// __m128i _mm_hadd_epi16 (__m128i a, __m128i b);
+ /// PHADDW xmm, xmm/m128
/// </summary>
public static Vector128<short> HorizontalAdd(Vector128<short> left, Vector128<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_hadd_epi32 (__m128i a, __m128i b); PHADDD xmm, xmm/m128
+ /// __m128i _mm_hadd_epi32 (__m128i a, __m128i b);
+ /// PHADDD xmm, xmm/m128
/// </summary>
public static Vector128<int> HorizontalAdd(Vector128<int> left, Vector128<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_hadds_epi16 (__m128i a, __m128i b); PHADDSW xmm, xmm/m128
+ /// __m128i _mm_hadds_epi16 (__m128i a, __m128i b);
+ /// PHADDSW xmm, xmm/m128
/// </summary>
public static Vector128<short> HorizontalAddSaturate(Vector128<short> left, Vector128<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_hsub_epi16 (__m128i a, __m128i b); PHSUBW xmm, xmm/m128
+ /// __m128i _mm_hsub_epi16 (__m128i a, __m128i b);
+ /// PHSUBW xmm, xmm/m128
/// </summary>
public static Vector128<short> HorizontalSubtract(Vector128<short> left, Vector128<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_hsub_epi32 (__m128i a, __m128i b); PHSUBD xmm, xmm/m128
+ /// __m128i _mm_hsub_epi32 (__m128i a, __m128i b);
+ /// PHSUBD xmm, xmm/m128
/// </summary>
public static Vector128<int> HorizontalSubtract(Vector128<int> left, Vector128<int> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_hsubs_epi16 (__m128i a, __m128i b); PHSUBSW xmm, xmm/m128
+ /// __m128i _mm_hsubs_epi16 (__m128i a, __m128i b);
+ /// PHSUBSW xmm, xmm/m128
/// </summary>
public static Vector128<short> HorizontalSubtractSaturate(Vector128<short> left, Vector128<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_maddubs_epi16 (__m128i a, __m128i b); PMADDUBSW xmm, xmm/m128
+ /// __m128i _mm_maddubs_epi16 (__m128i a, __m128i b);
+ /// PMADDUBSW xmm, xmm/m128
/// </summary>
public static Vector128<short> MultiplyAddAdjacent(Vector128<byte> left, Vector128<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_mulhrs_epi16 (__m128i a, __m128i b); PMULHRSW xmm, xmm/m128
+ /// __m128i _mm_mulhrs_epi16 (__m128i a, __m128i b);
+ /// PMULHRSW xmm, xmm/m128
/// </summary>
public static Vector128<short> MultiplyHighRoundScale(Vector128<short> left, Vector128<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_shuffle_epi8 (__m128i a, __m128i b); PSHUFB xmm, xmm/m128
+ /// __m128i _mm_shuffle_epi8 (__m128i a, __m128i b);
+ /// PSHUFB xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> Shuffle(Vector128<sbyte> value, Vector128<sbyte> mask) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_sign_epi8 (__m128i a, __m128i b); PSIGNB xmm, xmm/m128
+ /// __m128i _mm_sign_epi8 (__m128i a, __m128i b);
+ /// PSIGNB xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> Sign(Vector128<sbyte> left, Vector128<sbyte> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_sign_epi16 (__m128i a, __m128i b); PSIGNW xmm, xmm/m128
+ /// __m128i _mm_sign_epi16 (__m128i a, __m128i b);
+ /// PSIGNW xmm, xmm/m128
/// </summary>
public static Vector128<short> Sign(Vector128<short> left, Vector128<short> right) { throw new PlatformNotSupportedException(); }
/// <summary>
- /// __m128i _mm_sign_epi32 (__m128i a, __m128i b); PSIGND xmm, xmm/m128
+ /// __m128i _mm_sign_epi32 (__m128i a, __m128i b);
+ /// PSIGND xmm, xmm/m128
/// </summary>
public static Vector128<int> Sign(Vector128<int> left, Vector128<int> right) { throw new PlatformNotSupportedException(); }
}
diff --git a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Ssse3.cs b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Ssse3.cs
index 6ac1aa10d4..bfe40518c0 100644
--- a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Ssse3.cs
+++ b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Ssse3.cs
@@ -16,76 +16,92 @@ namespace System.Runtime.Intrinsics.X86
public static bool IsSupported { get => IsSupported; }
/// <summary>
- /// __m128i _mm_abs_epi8 (__m128i a); PABSB xmm, xmm/m128
+ /// __m128i _mm_abs_epi8 (__m128i a);
+ /// PABSB xmm, xmm/m128
/// </summary>
public static Vector128<byte> Abs(Vector128<sbyte> value) => Abs(value);
/// <summary>
- /// __m128i _mm_abs_epi16 (__m128i a); PABSW xmm, xmm/m128
+ /// __m128i _mm_abs_epi16 (__m128i a);
+ /// PABSW xmm, xmm/m128
/// </summary>
public static Vector128<ushort> Abs(Vector128<short> value) => Abs(value);
/// <summary>
- /// __m128i _mm_abs_epi32 (__m128i a); PABSD xmm, xmm/m128
+ /// __m128i _mm_abs_epi32 (__m128i a);
+ /// PABSD xmm, xmm/m128
/// </summary>
public static Vector128<uint> Abs(Vector128<int> value) => Abs(value);
/// <summary>
- /// __m128i _mm_alignr_epi8 (__m128i a, __m128i b, int count); PALIGNR xmm, xmm/m128, imm8
+ /// __m128i _mm_alignr_epi8 (__m128i a, __m128i b, int count);
+ /// PALIGNR xmm, xmm/m128, imm8
/// </summary>
public static Vector128<sbyte> AlignRight(Vector128<sbyte> left, Vector128<sbyte> right, byte mask) => AlignRight(left, right, mask);
/// <summary>
- /// __m128i _mm_hadd_epi16 (__m128i a, __m128i b); PHADDW xmm, xmm/m128
+ /// __m128i _mm_hadd_epi16 (__m128i a, __m128i b);
+ /// PHADDW xmm, xmm/m128
/// </summary>
public static Vector128<short> HorizontalAdd(Vector128<short> left, Vector128<short> right) => HorizontalAdd(left, right);
/// <summary>
- /// __m128i _mm_hadd_epi32 (__m128i a, __m128i b); PHADDD xmm, xmm/m128
+ /// __m128i _mm_hadd_epi32 (__m128i a, __m128i b);
+ /// PHADDD xmm, xmm/m128
/// </summary>
public static Vector128<int> HorizontalAdd(Vector128<int> left, Vector128<int> right) => HorizontalAdd(left, right);
/// <summary>
- /// __m128i _mm_hadds_epi16 (__m128i a, __m128i b); PHADDSW xmm, xmm/m128
+ /// __m128i _mm_hadds_epi16 (__m128i a, __m128i b);
+ /// PHADDSW xmm, xmm/m128
/// </summary>
public static Vector128<short> HorizontalAddSaturate(Vector128<short> left, Vector128<short> right) => HorizontalAddSaturate(left, right);
/// <summary>
- /// __m128i _mm_hsub_epi16 (__m128i a, __m128i b); PHSUBW xmm, xmm/m128
+ /// __m128i _mm_hsub_epi16 (__m128i a, __m128i b);
+ /// PHSUBW xmm, xmm/m128
/// </summary>
public static Vector128<short> HorizontalSubtract(Vector128<short> left, Vector128<short> right) => HorizontalSubtract(left, right);
/// <summary>
- /// __m128i _mm_hsub_epi32 (__m128i a, __m128i b); PHSUBD xmm, xmm/m128
+ /// __m128i _mm_hsub_epi32 (__m128i a, __m128i b);
+ /// PHSUBD xmm, xmm/m128
/// </summary>
public static Vector128<int> HorizontalSubtract(Vector128<int> left, Vector128<int> right) => HorizontalSubtract(left, right);
/// <summary>
- /// __m128i _mm_hsubs_epi16 (__m128i a, __m128i b); PHSUBSW xmm, xmm/m128
+ /// __m128i _mm_hsubs_epi16 (__m128i a, __m128i b);
+ /// PHSUBSW xmm, xmm/m128
/// </summary>
public static Vector128<short> HorizontalSubtractSaturate(Vector128<short> left, Vector128<short> right) => HorizontalSubtractSaturate(left, right);
/// <summary>
- /// __m128i _mm_maddubs_epi16 (__m128i a, __m128i b); PMADDUBSW xmm, xmm/m128
+ /// __m128i _mm_maddubs_epi16 (__m128i a, __m128i b);
+ /// PMADDUBSW xmm, xmm/m128
/// </summary>
public static Vector128<short> MultiplyAddAdjacent(Vector128<byte> left, Vector128<sbyte> right) => MultiplyAddAdjacent(left, right);
/// <summary>
- /// __m128i _mm_mulhrs_epi16 (__m128i a, __m128i b); PMULHRSW xmm, xmm/m128
+ /// __m128i _mm_mulhrs_epi16 (__m128i a, __m128i b);
+ /// PMULHRSW xmm, xmm/m128
/// </summary>
public static Vector128<short> MultiplyHighRoundScale(Vector128<short> left, Vector128<short> right) => MultiplyHighRoundScale(left, right);
/// <summary>
- /// __m128i _mm_shuffle_epi8 (__m128i a, __m128i b); PSHUFB xmm, xmm/m128
+ /// __m128i _mm_shuffle_epi8 (__m128i a, __m128i b);
+ /// PSHUFB xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> Shuffle(Vector128<sbyte> value, Vector128<sbyte> mask) => Shuffle(value, mask);
/// <summary>
- /// __m128i _mm_sign_epi8 (__m128i a, __m128i b); PSIGNB xmm, xmm/m128
+ /// __m128i _mm_sign_epi8 (__m128i a, __m128i b);
+ /// PSIGNB xmm, xmm/m128
/// </summary>
public static Vector128<sbyte> Sign(Vector128<sbyte> left, Vector128<sbyte> right) => Sign(left, right);
/// <summary>
- /// __m128i _mm_sign_epi16 (__m128i a, __m128i b); PSIGNW xmm, xmm/m128
+ /// __m128i _mm_sign_epi16 (__m128i a, __m128i b);
+ /// PSIGNW xmm, xmm/m128
/// </summary>
public static Vector128<short> Sign(Vector128<short> left, Vector128<short> right) => Sign(left, right);
/// <summary>
- /// __m128i _mm_sign_epi32 (__m128i a, __m128i b); PSIGND xmm, xmm/m128
+ /// __m128i _mm_sign_epi32 (__m128i a, __m128i b);
+ /// PSIGND xmm, xmm/m128
/// </summary>
public static Vector128<int> Sign(Vector128<int> left, Vector128<int> right) => Sign(left, right);
}