// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. using System; using System.Runtime.CompilerServices; using System.Runtime.Intrinsics; namespace System.Runtime.Intrinsics.X86 { /// /// This class provides access to Intel SSE2 hardware instructions via intrinsics /// [CLSCompliant(false)] public abstract class Sse2 : Sse { internal Sse2() { } public new static bool IsSupported { get => IsSupported; } public new abstract class X64 : Sse.X64 { internal X64() { } public new static bool IsSupported { get => IsSupported; } /// /// __int64 _mm_cvtsd_si64 (__m128d a) /// CVTSD2SI r64, xmm/m64 /// This intrinisc is only available on 64-bit processes /// public static long ConvertToInt64(Vector128 value) => ConvertToInt64(value); /// /// __int64 _mm_cvtsi128_si64 (__m128i a) /// MOVQ reg/m64, xmm /// This intrinisc is only available on 64-bit processes /// public static long ConvertToInt64(Vector128 value) => ConvertToInt64(value); /// /// __int64 _mm_cvtsi128_si64 (__m128i a) /// MOVQ reg/m64, xmm /// This intrinisc is only available on 64-bit processes /// public static ulong ConvertToUInt64(Vector128 value) => ConvertToUInt64(value); /// /// __m128d _mm_cvtsi64_sd (__m128d a, __int64 b) /// CVTSI2SD xmm, reg/m64 /// This intrinisc is only available on 64-bit processes /// public static Vector128 ConvertScalarToVector128Double(Vector128 upper, long value) => ConvertScalarToVector128Double(upper, value); /// /// __m128i _mm_cvtsi64_si128 (__int64 a) /// MOVQ xmm, reg/m64 /// This intrinisc is only available on 64-bit processes /// public static Vector128 ConvertScalarToVector128Int64(long value) => ConvertScalarToVector128Int64(value); /// /// __m128i _mm_cvtsi64_si128 (__int64 a) /// MOVQ xmm, reg/m64 /// This intrinisc is only available on 64-bit processes /// public static Vector128 ConvertScalarToVector128UInt64(ulong value) => ConvertScalarToVector128UInt64(value); /// /// __int64 _mm_cvttsd_si64 (__m128d a) /// CVTTSD2SI reg, xmm/m64 /// This intrinisc is only available on 64-bit processes /// public static long ConvertToInt64WithTruncation(Vector128 value) => ConvertToInt64WithTruncation(value); /// /// void _mm_stream_si64(__int64 *p, __int64 a) /// MOVNTI m64, r64 /// This intrinisc is only available on 64-bit processes /// public static unsafe void StoreNonTemporal(long* address, long value) => StoreNonTemporal(address, value); /// /// void _mm_stream_si64(__int64 *p, __int64 a) /// MOVNTI m64, r64 /// This intrinisc is only available on 64-bit processes /// public static unsafe void StoreNonTemporal(ulong* address, ulong value) => StoreNonTemporal(address, value); } /// /// __m128i _mm_add_epi8 (__m128i a, __m128i b) /// PADDB xmm, xmm/m128 /// public static Vector128 Add(Vector128 left, Vector128 right) => Add(left, right); /// /// __m128i _mm_add_epi8 (__m128i a, __m128i b) /// PADDB xmm, xmm/m128 /// public static Vector128 Add(Vector128 left, Vector128 right) => Add(left, right); /// /// __m128i _mm_add_epi16 (__m128i a, __m128i b) /// PADDW xmm, xmm/m128 /// public static Vector128 Add(Vector128 left, Vector128 right) => Add(left, right); /// /// __m128i _mm_add_epi16 (__m128i a, __m128i b) /// PADDW xmm, xmm/m128 /// public static Vector128 Add(Vector128 left, Vector128 right) => Add(left, right); /// /// __m128i _mm_add_epi32 (__m128i a, __m128i b) /// PADDD xmm, xmm/m128 /// public static Vector128 Add(Vector128 left, Vector128 right) => Add(left, right); /// /// __m128i _mm_add_epi32 (__m128i a, __m128i b) /// PADDD xmm, xmm/m128 /// public static Vector128 Add(Vector128 left, Vector128 right) => Add(left, right); /// /// __m128i _mm_add_epi64 (__m128i a, __m128i b) /// PADDQ xmm, xmm/m128 /// public static Vector128 Add(Vector128 left, Vector128 right) => Add(left, right); /// /// __m128i _mm_add_epi64 (__m128i a, __m128i b) /// PADDQ xmm, xmm/m128 /// public static Vector128 Add(Vector128 left, Vector128 right) => Add(left, right); /// /// __m128d _mm_add_pd (__m128d a, __m128d b) /// ADDPD xmm, xmm/m128 /// public static Vector128 Add(Vector128 left, Vector128 right) => Add(left, right); /// /// __m128d _mm_add_sd (__m128d a, __m128d b) /// ADDSD xmm, xmm/m64 /// public static Vector128 AddScalar(Vector128 left, Vector128 right) => AddScalar(left, right); /// /// __m128i _mm_adds_epi8 (__m128i a, __m128i b) /// PADDSB xmm, xmm/m128 /// public static Vector128 AddSaturate(Vector128 left, Vector128 right) => AddSaturate(left, right); /// /// __m128i _mm_adds_epu8 (__m128i a, __m128i b) /// PADDUSB xmm, xmm/m128 /// public static Vector128 AddSaturate(Vector128 left, Vector128 right) => AddSaturate(left, right); /// /// __m128i _mm_adds_epi16 (__m128i a, __m128i b) /// PADDSW xmm, xmm/m128 /// public static Vector128 AddSaturate(Vector128 left, Vector128 right) => AddSaturate(left, right); /// /// __m128i _mm_adds_epu16 (__m128i a, __m128i b) /// PADDUSW xmm, xmm/m128 /// public static Vector128 AddSaturate(Vector128 left, Vector128 right) => AddSaturate(left, right); /// /// __m128i _mm_and_si128 (__m128i a, __m128i b) /// PAND xmm, xmm/m128 /// public static Vector128 And(Vector128 left, Vector128 right) => And(left, right); /// /// __m128i _mm_and_si128 (__m128i a, __m128i b) /// PAND xmm, xmm/m128 /// public static Vector128 And(Vector128 left, Vector128 right) => And(left, right); /// /// __m128i _mm_and_si128 (__m128i a, __m128i b) /// PAND xmm, xmm/m128 /// public static Vector128 And(Vector128 left, Vector128 right) => And(left, right); /// /// __m128i _mm_and_si128 (__m128i a, __m128i b) /// PAND xmm, xmm/m128 /// public static Vector128 And(Vector128 left, Vector128 right) => And(left, right); /// /// __m128i _mm_and_si128 (__m128i a, __m128i b) /// PAND xmm, xmm/m128 /// public static Vector128 And(Vector128 left, Vector128 right) => And(left, right); /// /// __m128i _mm_and_si128 (__m128i a, __m128i b) /// PAND xmm, xmm/m128 /// public static Vector128 And(Vector128 left, Vector128 right) => And(left, right); /// /// __m128i _mm_and_si128 (__m128i a, __m128i b) /// PAND xmm, xmm/m128 /// public static Vector128 And(Vector128 left, Vector128 right) => And(left, right); /// /// __m128i _mm_and_si128 (__m128i a, __m128i b) /// PAND xmm, xmm/m128 /// public static Vector128 And(Vector128 left, Vector128 right) => And(left, right); /// /// __m128d _mm_and_pd (__m128d a, __m128d b) /// ANDPD xmm, xmm/m128 /// public static Vector128 And(Vector128 left, Vector128 right) => And(left, right); /// /// __m128i _mm_andnot_si128 (__m128i a, __m128i b) /// PANDN xmm, xmm/m128 /// public static Vector128 AndNot(Vector128 left, Vector128 right) => AndNot(left, right); /// /// __m128i _mm_andnot_si128 (__m128i a, __m128i b) /// PANDN xmm, xmm/m128 /// public static Vector128 AndNot(Vector128 left, Vector128 right) => AndNot(left, right); /// /// __m128i _mm_andnot_si128 (__m128i a, __m128i b) /// PANDN xmm, xmm/m128 /// public static Vector128 AndNot(Vector128 left, Vector128 right) => AndNot(left, right); /// /// __m128i _mm_andnot_si128 (__m128i a, __m128i b) /// PANDN xmm, xmm/m128 /// public static Vector128 AndNot(Vector128 left, Vector128 right) => AndNot(left, right); /// /// __m128i _mm_andnot_si128 (__m128i a, __m128i b) /// PANDN xmm, xmm/m128 /// public static Vector128 AndNot(Vector128 left, Vector128 right) => AndNot(left, right); /// /// __m128i _mm_andnot_si128 (__m128i a, __m128i b) /// PANDN xmm, xmm/m128 /// public static Vector128 AndNot(Vector128 left, Vector128 right) => AndNot(left, right); /// /// __m128i _mm_andnot_si128 (__m128i a, __m128i b) /// PANDN xmm, xmm/m128 /// public static Vector128 AndNot(Vector128 left, Vector128 right) => AndNot(left, right); /// /// __m128i _mm_andnot_si128 (__m128i a, __m128i b) /// PANDN xmm, xmm/m128 /// public static Vector128 AndNot(Vector128 left, Vector128 right) => AndNot(left, right); /// /// __m128d _mm_andnot_pd (__m128d a, __m128d b) /// ADDNPD xmm, xmm/m128 /// public static Vector128 AndNot(Vector128 left, Vector128 right) => AndNot(left, right); /// /// __m128i _mm_avg_epu8 (__m128i a, __m128i b) /// PAVGB xmm, xmm/m128 /// public static Vector128 Average(Vector128 left, Vector128 right) => Average(left, right); /// /// __m128i _mm_avg_epu16 (__m128i a, __m128i b) /// PAVGW xmm, xmm/m128 /// public static Vector128 Average(Vector128 left, Vector128 right) => Average(left, right); /// /// __m128i _mm_cmpeq_epi8 (__m128i a, __m128i b) /// PCMPEQB xmm, xmm/m128 /// public static Vector128 CompareEqual(Vector128 left, Vector128 right) => CompareEqual(left, right); /// /// __m128i _mm_cmpeq_epi8 (__m128i a, __m128i b) /// PCMPEQB xmm, xmm/m128 /// public static Vector128 CompareEqual(Vector128 left, Vector128 right) => CompareEqual(left, right); /// /// __m128i _mm_cmpeq_epi16 (__m128i a, __m128i b) /// PCMPEQW xmm, xmm/m128 /// public static Vector128 CompareEqual(Vector128 left, Vector128 right) => CompareEqual(left, right); /// /// __m128i _mm_cmpeq_epi16 (__m128i a, __m128i b) /// PCMPEQW xmm, xmm/m128 /// public static Vector128 CompareEqual(Vector128 left, Vector128 right) => CompareEqual(left, right); /// /// __m128i _mm_cmpeq_epi32 (__m128i a, __m128i b) /// PCMPEQD xmm, xmm/m128 /// public static Vector128 CompareEqual(Vector128 left, Vector128 right) => CompareEqual(left, right); /// /// __m128i _mm_cmpeq_epi32 (__m128i a, __m128i b) /// PCMPEQD xmm, xmm/m128 /// public static Vector128 CompareEqual(Vector128 left, Vector128 right) => CompareEqual(left, right); /// /// __m128d _mm_cmpeq_pd (__m128d a, __m128d b) /// CMPPD xmm, xmm/m128, imm8(0) /// public static Vector128 CompareEqual(Vector128 left, Vector128 right) => CompareEqual(left, right); /// /// int _mm_comieq_sd (__m128d a, __m128d b) /// COMISS xmm, xmm/m64 /// public static bool CompareEqualOrderedScalar(Vector128 left, Vector128 right) => CompareEqualOrderedScalar(left, right); /// /// int _mm_ucomieq_sd (__m128d a, __m128d b) /// UCOMISS xmm, xmm/m64 /// public static bool CompareEqualUnorderedScalar(Vector128 left, Vector128 right) => CompareEqualUnorderedScalar(left, right); /// /// __m128d _mm_cmpeq_sd (__m128d a, __m128d b) /// CMPSD xmm, xmm/m64, imm8(0) /// public static Vector128 CompareEqualScalar(Vector128 left, Vector128 right) => CompareEqualScalar(left, right); /// /// __m128i _mm_cmpgt_epi8 (__m128i a, __m128i b) /// PCMPGTB xmm, xmm/m128 /// public static Vector128 CompareGreaterThan(Vector128 left, Vector128 right) => CompareGreaterThan(left, right); /// /// __m128i _mm_cmpgt_epi16 (__m128i a, __m128i b) /// PCMPGTW xmm, xmm/m128 /// public static Vector128 CompareGreaterThan(Vector128 left, Vector128 right) => CompareGreaterThan(left, right); /// /// __m128i _mm_cmpgt_epi32 (__m128i a, __m128i b) /// PCMPGTD xmm, xmm/m128 /// public static Vector128 CompareGreaterThan(Vector128 left, Vector128 right) => CompareGreaterThan(left, right); /// /// __m128d _mm_cmpgt_pd (__m128d a, __m128d b) /// CMPPD xmm, xmm/m128, imm8(6) /// public static Vector128 CompareGreaterThan(Vector128 left, Vector128 right) => CompareGreaterThan(left, right); /// /// int _mm_comigt_sd (__m128d a, __m128d b) /// COMISS xmm, xmm/m64 /// public static bool CompareGreaterThanOrderedScalar(Vector128 left, Vector128 right) => CompareGreaterThanOrderedScalar(left, right); /// /// int _mm_ucomigt_sd (__m128d a, __m128d b) /// UCOMISS xmm, xmm/m64 /// public static bool CompareGreaterThanUnorderedScalar(Vector128 left, Vector128 right) => CompareGreaterThanUnorderedScalar(left, right); /// /// __m128d _mm_cmpgt_sd (__m128d a, __m128d b) /// CMPSD xmm, xmm/m64, imm8(6) /// public static Vector128 CompareGreaterThanScalar(Vector128 left, Vector128 right) => CompareGreaterThanScalar(left, right); /// /// __m128d _mm_cmpge_pd (__m128d a, __m128d b) /// CMPPD xmm, xmm/m128, imm8(5) /// public static Vector128 CompareGreaterThanOrEqual(Vector128 left, Vector128 right) => CompareGreaterThanOrEqual(left, right); /// /// int _mm_comige_sd (__m128d a, __m128d b) /// COMISS xmm, xmm/m64 /// public static bool CompareGreaterThanOrEqualOrderedScalar(Vector128 left, Vector128 right) => CompareGreaterThanOrEqualOrderedScalar(left, right); /// /// int _mm_ucomige_sd (__m128d a, __m128d b) /// UCOMISS xmm, xmm/m64 /// public static bool CompareGreaterThanOrEqualUnorderedScalar(Vector128 left, Vector128 right) => CompareGreaterThanOrEqualUnorderedScalar(left, right); /// /// __m128d _mm_cmpge_sd (__m128d a, __m128d b) /// CMPSD xmm, xmm/m64, imm8(5) /// public static Vector128 CompareGreaterThanOrEqualScalar(Vector128 left, Vector128 right) => CompareGreaterThanOrEqualScalar(left, right); /// /// __m128i _mm_cmplt_epi8 (__m128i a, __m128i b) /// PCMPGTB xmm, xmm/m128 /// public static Vector128 CompareLessThan(Vector128 left, Vector128 right) => CompareLessThan(left, right); /// /// __m128i _mm_cmplt_epi16 (__m128i a, __m128i b) /// PCMPGTW xmm, xmm/m128 /// public static Vector128 CompareLessThan(Vector128 left, Vector128 right) => CompareLessThan(left, right); /// /// __m128i _mm_cmplt_epi32 (__m128i a, __m128i b) /// PCMPGTD xmm, xmm/m128 /// public static Vector128 CompareLessThan(Vector128 left, Vector128 right) => CompareLessThan(left, right); /// /// __m128d _mm_cmplt_pd (__m128d a, __m128d b) /// CMPPD xmm, xmm/m128, imm8(1) /// public static Vector128 CompareLessThan(Vector128 left, Vector128 right) => CompareLessThan(left, right); /// /// int _mm_comilt_sd (__m128d a, __m128d b) /// COMISS xmm, xmm/m64 /// public static bool CompareLessThanOrderedScalar(Vector128 left, Vector128 right) => CompareLessThanOrderedScalar(left, right); /// /// int _mm_ucomilt_sd (__m128d a, __m128d b) /// UCOMISS xmm, xmm/m64 /// public static bool CompareLessThanUnorderedScalar(Vector128 left, Vector128 right) => CompareLessThanUnorderedScalar(left, right); /// /// __m128d _mm_cmplt_sd (__m128d a, __m128d b) /// CMPSD xmm, xmm/m64, imm8(1) /// public static Vector128 CompareLessThanScalar(Vector128 left, Vector128 right) => CompareLessThanScalar(left, right); /// /// __m128d _mm_cmple_pd (__m128d a, __m128d b) /// CMPPD xmm, xmm/m128, imm8(2) /// public static Vector128 CompareLessThanOrEqual(Vector128 left, Vector128 right) => CompareLessThanOrEqual(left, right); /// /// int _mm_comile_sd (__m128d a, __m128d b) /// COMISS xmm, xmm/m64 /// public static bool CompareLessThanOrEqualOrderedScalar(Vector128 left, Vector128 right) => CompareLessThanOrEqualOrderedScalar(left, right); /// /// int _mm_ucomile_sd (__m128d a, __m128d b) /// UCOMISS xmm, xmm/m64 /// public static bool CompareLessThanOrEqualUnorderedScalar(Vector128 left, Vector128 right) => CompareLessThanOrEqualUnorderedScalar(left, right); /// /// __m128d _mm_cmple_sd (__m128d a, __m128d b) /// CMPSD xmm, xmm/m64, imm8(2) /// public static Vector128 CompareLessThanOrEqualScalar(Vector128 left, Vector128 right) => CompareLessThanOrEqualScalar(left, right); /// /// __m128d _mm_cmpneq_pd (__m128d a, __m128d b) /// CMPPD xmm, xmm/m128, imm8(4) /// public static Vector128 CompareNotEqual(Vector128 left, Vector128 right) => CompareNotEqual(left, right); /// /// int _mm_comineq_sd (__m128d a, __m128d b) /// COMISS xmm, xmm/m64 /// public static bool CompareNotEqualOrderedScalar(Vector128 left, Vector128 right) => CompareNotEqualOrderedScalar(left, right); /// /// int _mm_ucomineq_sd (__m128d a, __m128d b) /// UCOMISS xmm, xmm/m64 /// public static bool CompareNotEqualUnorderedScalar(Vector128 left, Vector128 right) => CompareNotEqualUnorderedScalar(left, right); /// /// __m128d _mm_cmpneq_sd (__m128d a, __m128d b) /// CMPSD xmm, xmm/m64, imm8(4) /// public static Vector128 CompareNotEqualScalar(Vector128 left, Vector128 right) => CompareNotEqualScalar(left, right); /// /// __m128d _mm_cmpngt_pd (__m128d a, __m128d b) /// CMPPD xmm, xmm/m128, imm8(2) /// public static Vector128 CompareNotGreaterThan(Vector128 left, Vector128 right) => CompareNotGreaterThan(left, right); /// /// __m128d _mm_cmpngt_sd (__m128d a, __m128d b) /// CMPSD xmm, xmm/m64, imm8(2) /// public static Vector128 CompareNotGreaterThanScalar(Vector128 left, Vector128 right) => CompareNotGreaterThanScalar(left, right); /// /// __m128d _mm_cmpnge_pd (__m128d a, __m128d b) /// CMPPD xmm, xmm/m128, imm8(1) /// public static Vector128 CompareNotGreaterThanOrEqual(Vector128 left, Vector128 right) => CompareNotGreaterThanOrEqual(left, right); /// /// __m128d _mm_cmpnge_sd (__m128d a, __m128d b) /// CMPSD xmm, xmm/m64, imm8(1) /// public static Vector128 CompareNotGreaterThanOrEqualScalar(Vector128 left, Vector128 right) => CompareNotGreaterThanOrEqualScalar(left, right); /// /// __m128d _mm_cmpnlt_pd (__m128d a, __m128d b) /// CMPPD xmm, xmm/m128, imm8(5) /// public static Vector128 CompareNotLessThan(Vector128 left, Vector128 right) => CompareNotLessThan(left, right); /// /// __m128d _mm_cmpnlt_sd (__m128d a, __m128d b) /// CMPSD xmm, xmm/m64, imm8(5) /// public static Vector128 CompareNotLessThanScalar(Vector128 left, Vector128 right) => CompareNotLessThanScalar(left, right); /// /// __m128d _mm_cmpnle_pd (__m128d a, __m128d b) /// CMPPD xmm, xmm/m128, imm8(6) /// public static Vector128 CompareNotLessThanOrEqual(Vector128 left, Vector128 right) => CompareNotLessThanOrEqual(left, right); /// /// __m128d _mm_cmpnle_sd (__m128d a, __m128d b) /// CMPSD xmm, xmm/m64, imm8(6) /// public static Vector128 CompareNotLessThanOrEqualScalar(Vector128 left, Vector128 right) => CompareNotLessThanOrEqualScalar(left, right); /// /// __m128d _mm_cmpord_pd (__m128d a, __m128d b) /// CMPPD xmm, xmm/m128, imm8(7) /// public static Vector128 CompareOrdered(Vector128 left, Vector128 right) => CompareOrdered(left, right); /// /// __m128d _mm_cmpord_sd (__m128d a, __m128d b) /// CMPSD xmm, xmm/m64, imm8(7) /// public static Vector128 CompareOrderedScalar(Vector128 left, Vector128 right) => CompareOrderedScalar(left, right); /// /// __m128d _mm_cmpunord_pd (__m128d a, __m128d b) /// CMPPD xmm, xmm/m128, imm8(3) /// public static Vector128 CompareUnordered(Vector128 left, Vector128 right) => CompareUnordered(left, right); /// /// __m128d _mm_cmpunord_sd (__m128d a, __m128d b) /// CMPSD xmm, xmm/m64, imm8(3) /// public static Vector128 CompareUnorderedScalar(Vector128 left, Vector128 right) => CompareUnorderedScalar(left, right); /// /// __m128i _mm_cvtps_epi32 (__m128 a) /// CVTPS2DQ xmm, xmm/m128 /// public static Vector128 ConvertToVector128Int32(Vector128 value) => ConvertToVector128Int32(value); /// /// __m128i _mm_cvtpd_epi32 (__m128d a) /// CVTPD2DQ xmm, xmm/m128 /// public static Vector128 ConvertToVector128Int32(Vector128 value) => ConvertToVector128Int32(value); /// /// __m128 _mm_cvtepi32_ps (__m128i a) /// CVTDQ2PS xmm, xmm/m128 /// public static Vector128 ConvertToVector128Single(Vector128 value) => ConvertToVector128Single(value); /// /// __m128 _mm_cvtpd_ps (__m128d a) /// CVTPD2PS xmm, xmm/m128 /// public static Vector128 ConvertToVector128Single(Vector128 value) => ConvertToVector128Single(value); /// /// __m128d _mm_cvtepi32_pd (__m128i a) /// CVTDQ2PD xmm, xmm/m128 /// public static Vector128 ConvertToVector128Double(Vector128 value) => ConvertToVector128Double(value); /// /// __m128d _mm_cvtps_pd (__m128 a) /// CVTPS2PD xmm, xmm/m128 /// public static Vector128 ConvertToVector128Double(Vector128 value) => ConvertToVector128Double(value); /// /// int _mm_cvtsd_si32 (__m128d a) /// CVTSD2SI r32, xmm/m64 /// public static int ConvertToInt32(Vector128 value) => ConvertToInt32(value); /// /// int _mm_cvtsi128_si32 (__m128i a) /// MOVD reg/m32, xmm /// public static int ConvertToInt32(Vector128 value) => ConvertToInt32(value); /// /// int _mm_cvtsi128_si32 (__m128i a) /// MOVD reg/m32, xmm /// public static uint ConvertToUInt32(Vector128 value) => ConvertToUInt32(value); /// /// __m128d _mm_cvtsi32_sd (__m128d a, int b) /// CVTSI2SD xmm, reg/m32 /// public static Vector128 ConvertScalarToVector128Double(Vector128 upper, int value) => ConvertScalarToVector128Double(upper, value); /// /// __m128d _mm_cvtss_sd (__m128d a, __m128 b) /// CVTSS2SD xmm, xmm/m32 /// public static Vector128 ConvertScalarToVector128Double(Vector128 upper, Vector128 value) => ConvertScalarToVector128Double(upper, value); /// /// __m128i _mm_cvtsi32_si128 (int a) /// MOVD xmm, reg/m32 /// public static Vector128 ConvertScalarToVector128Int32(int value) => ConvertScalarToVector128Int32(value); /// /// __m128 _mm_cvtsd_ss (__m128 a, __m128d b) /// CVTSD2SS xmm, xmm/m64 /// public static Vector128 ConvertScalarToVector128Single(Vector128 upper, Vector128 value) => ConvertScalarToVector128Single(upper, value); /// /// __m128i _mm_cvtsi32_si128 (int a) /// MOVD xmm, reg/m32 /// public static Vector128 ConvertScalarToVector128UInt32(uint value) => ConvertScalarToVector128UInt32(value); /// /// __m128i _mm_cvttps_epi32 (__m128 a) /// CVTTPS2DQ xmm, xmm/m128 /// public static Vector128 ConvertToVector128Int32WithTruncation(Vector128 value) => ConvertToVector128Int32WithTruncation(value); /// /// __m128i _mm_cvttpd_epi32 (__m128d a) /// CVTTPD2DQ xmm, xmm/m128 /// public static Vector128 ConvertToVector128Int32WithTruncation(Vector128 value) => ConvertToVector128Int32WithTruncation(value); /// /// int _mm_cvttsd_si32 (__m128d a) /// CVTTSD2SI reg, xmm/m64 /// public static int ConvertToInt32WithTruncation(Vector128 value) => ConvertToInt32WithTruncation(value); /// /// __m128d _mm_div_pd (__m128d a, __m128d b) /// DIVPD xmm, xmm/m128 /// public static Vector128 Divide(Vector128 left, Vector128 right) => Divide(left, right); /// /// __m128d _mm_div_sd (__m128d a, __m128d b) /// DIVSD xmm, xmm/m64 /// public static Vector128 DivideScalar(Vector128 left, Vector128 right) => DivideScalar(left, right); /// /// int _mm_extract_epi16 (__m128i a, int immediate) /// PEXTRW reg, xmm, imm8 /// public static ushort Extract(Vector128 value, byte index) => Extract(value, index); /// /// __m128i _mm_insert_epi16 (__m128i a, int i, int immediate) /// PINSRW xmm, reg/m16, imm8 /// public static Vector128 Insert(Vector128 value, short data, byte index) => Insert(value, data, index); /// /// __m128i _mm_insert_epi16 (__m128i a, int i, int immediate) /// PINSRW xmm, reg/m16, imm8 /// public static Vector128 Insert(Vector128 value, ushort data, byte index) => Insert(value, data, index); /// /// __m128i _mm_loadu_si128 (__m128i const* mem_address) /// MOVDQU xmm, m128 /// public static unsafe Vector128 LoadVector128(sbyte* address) => LoadVector128(address); /// /// __m128i _mm_loadu_si128 (__m128i const* mem_address) /// MOVDQU xmm, m128 /// public static unsafe Vector128 LoadVector128(byte* address) => LoadVector128(address); /// /// __m128i _mm_loadu_si128 (__m128i const* mem_address) /// MOVDQU xmm, m128 /// public static unsafe Vector128 LoadVector128(short* address) => LoadVector128(address); /// /// __m128i _mm_loadu_si128 (__m128i const* mem_address) /// MOVDQU xmm, m128 /// public static unsafe Vector128 LoadVector128(ushort* address) => LoadVector128(address); /// /// __m128i _mm_loadu_si128 (__m128i const* mem_address) /// MOVDQU xmm, m128 /// public static unsafe Vector128 LoadVector128(int* address) => LoadVector128(address); /// /// __m128i _mm_loadu_si128 (__m128i const* mem_address) /// MOVDQU xmm, m128 /// public static unsafe Vector128 LoadVector128(uint* address) => LoadVector128(address); /// /// __m128i _mm_loadu_si128 (__m128i const* mem_address) /// MOVDQU xmm, m128 /// public static unsafe Vector128 LoadVector128(long* address) => LoadVector128(address); /// /// __m128i _mm_loadu_si128 (__m128i const* mem_address) /// MOVDQU xmm, m128 /// public static unsafe Vector128 LoadVector128(ulong* address) => LoadVector128(address); /// /// __m128d _mm_loadu_pd (double const* mem_address) /// MOVUPD xmm, m128 /// public static unsafe Vector128 LoadVector128(double* address) => LoadVector128(address); /// /// __m128d _mm_load_sd (double const* mem_address) /// MOVSD xmm, m64 /// public static unsafe Vector128 LoadScalarVector128(double* address) => LoadScalarVector128(address); /// /// __m128i _mm_load_si128 (__m128i const* mem_address) /// MOVDQA xmm, m128 /// public static unsafe Vector128 LoadAlignedVector128(sbyte* address) => LoadAlignedVector128(address); /// /// __m128i _mm_load_si128 (__m128i const* mem_address) /// MOVDQA xmm, m128 /// public static unsafe Vector128 LoadAlignedVector128(byte* address) => LoadAlignedVector128(address); /// /// __m128i _mm_load_si128 (__m128i const* mem_address) /// MOVDQA xmm, m128 /// public static unsafe Vector128 LoadAlignedVector128(short* address) => LoadAlignedVector128(address); /// /// __m128i _mm_load_si128 (__m128i const* mem_address) /// MOVDQA xmm, m128 /// public static unsafe Vector128 LoadAlignedVector128(ushort* address) => LoadAlignedVector128(address); /// /// __m128i _mm_load_si128 (__m128i const* mem_address) /// MOVDQA xmm, m128 /// public static unsafe Vector128 LoadAlignedVector128(int* address) => LoadAlignedVector128(address); /// /// __m128i _mm_load_si128 (__m128i const* mem_address) /// MOVDQA xmm, m128 /// public static unsafe Vector128 LoadAlignedVector128(uint* address) => LoadAlignedVector128(address); /// /// __m128i _mm_load_si128 (__m128i const* mem_address) /// MOVDQA xmm, m128 /// public static unsafe Vector128 LoadAlignedVector128(long* address) => LoadAlignedVector128(address); /// /// __m128i _mm_load_si128 (__m128i const* mem_address) /// MOVDQA xmm, m128 /// public static unsafe Vector128 LoadAlignedVector128(ulong* address) => LoadAlignedVector128(address); /// /// __m128d _mm_load_pd (double const* mem_address) /// MOVAPD xmm, m128 /// public static unsafe Vector128 LoadAlignedVector128(double* address) => LoadAlignedVector128(address); /// /// void _mm_lfence(void) /// LFENCE /// public static void LoadFence() => LoadFence(); /// /// __m128d _mm_loadh_pd (__m128d a, double const* mem_addr) /// MOVHPD xmm, m64 /// public static unsafe Vector128 LoadHigh(Vector128 lower, double* address) => LoadHigh(lower, address); /// /// __m128d _mm_loadl_pd (__m128d a, double const* mem_addr) /// MOVLPD xmm, m64 /// public static unsafe Vector128 LoadLow(Vector128 upper, double* address) => LoadLow(upper, address); /// /// __m128i _mm_loadl_epi32 (__m128i const* mem_addr) /// MOVD xmm, reg/m32 /// The above native signature does not exist. We provide this additional overload for completeness. /// public static unsafe Vector128 LoadScalarVector128(int* address) => LoadScalarVector128(address); /// /// __m128i _mm_loadl_epi32 (__m128i const* mem_addr) /// MOVD xmm, reg/m32 /// The above native signature does not exist. We provide this additional overload for completeness. /// public static unsafe Vector128 LoadScalarVector128(uint* address) => LoadScalarVector128(address); /// /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr) /// MOVQ xmm, reg/m64 /// public static unsafe Vector128 LoadScalarVector128(long* address) => LoadScalarVector128(address); /// /// __m128i _mm_loadl_epi64 (__m128i const* mem_addr) /// MOVQ xmm, reg/m64 /// public static unsafe Vector128 LoadScalarVector128(ulong* address) => LoadScalarVector128(address); /// /// void _mm_maskmoveu_si128 (__m128i a, __m128i mask, char* mem_address) /// MASKMOVDQU xmm, xmm /// public static unsafe void MaskMove(Vector128 source, Vector128 mask, sbyte* address) => MaskMove(source, mask, address); /// /// void _mm_maskmoveu_si128 (__m128i a, __m128i mask, char* mem_address) /// MASKMOVDQU xmm, xmm /// public static unsafe void MaskMove(Vector128 source, Vector128 mask, byte* address) => MaskMove(source, mask, address); /// /// __m128i _mm_max_epu8 (__m128i a, __m128i b) /// PMAXUB xmm, xmm/m128 /// public static Vector128 Max(Vector128 left, Vector128 right) => Max(left, right); /// /// __m128i _mm_max_epi16 (__m128i a, __m128i b) /// PMAXSW xmm, xmm/m128 /// public static Vector128 Max(Vector128 left, Vector128 right) => Max(left, right); /// /// __m128d _mm_max_pd (__m128d a, __m128d b) /// MAXPD xmm, xmm/m128 /// public static Vector128 Max(Vector128 left, Vector128 right) => Max(left, right); /// /// __m128d _mm_max_sd (__m128d a, __m128d b) /// MAXSD xmm, xmm/m64 /// public static Vector128 MaxScalar(Vector128 left, Vector128 right) => MaxScalar(left, right); /// /// void _mm_mfence(void) /// MFENCE /// public static void MemoryFence() => MemoryFence(); /// /// __m128i _mm_min_epu8 (__m128i a, __m128i b) /// PMINUB xmm, xmm/m128 /// public static Vector128 Min(Vector128 left, Vector128 right) => Min(left, right); /// /// __m128i _mm_min_epi16 (__m128i a, __m128i b) /// PMINSW xmm, xmm/m128 /// public static Vector128 Min(Vector128 left, Vector128 right) => Min(left, right); /// /// __m128d _mm_min_pd (__m128d a, __m128d b) /// MINPD xmm, xmm/m128 /// public static Vector128 Min(Vector128 left, Vector128 right) => Min(left, right); /// /// __m128d _mm_min_sd (__m128d a, __m128d b) /// MINSD xmm, xmm/m64 /// public static Vector128 MinScalar(Vector128 left, Vector128 right) => MinScalar(left, right); /// /// __m128d _mm_move_sd (__m128d a, __m128d b) /// MOVSD xmm, xmm /// public static Vector128 MoveScalar(Vector128 upper, Vector128 value) => MoveScalar(upper, value); /// /// int _mm_movemask_epi8 (__m128i a) /// PMOVMSKB reg, xmm /// public static int MoveMask(Vector128 value) => MoveMask(value); /// /// int _mm_movemask_epi8 (__m128i a) /// PMOVMSKB reg, xmm /// public static int MoveMask(Vector128 value) => MoveMask(value); /// /// int _mm_movemask_pd (__m128d a) /// MOVMSKPD reg, xmm /// public static int MoveMask(Vector128 value) => MoveMask(value); /// /// __m128i _mm_move_epi64 (__m128i a) /// MOVQ xmm, xmm /// public static Vector128 MoveScalar(Vector128 value) => MoveScalar(value); /// /// __m128i _mm_move_epi64 (__m128i a) /// MOVQ xmm, xmm /// public static Vector128 MoveScalar(Vector128 value) => MoveScalar(value); /// /// __m128i _mm_mul_epu32 (__m128i a, __m128i b) /// PMULUDQ xmm, xmm/m128 /// public static Vector128 Multiply(Vector128 left, Vector128 right) => Multiply(left, right); /// /// __m128d _mm_mul_pd (__m128d a, __m128d b) /// MULPD xmm, xmm/m128 /// public static Vector128 Multiply(Vector128 left, Vector128 right) => Multiply(left, right); /// /// __m128d _mm_mul_sd (__m128d a, __m128d b) /// MULSD xmm, xmm/m64 /// public static Vector128 MultiplyScalar(Vector128 left, Vector128 right) => MultiplyScalar(left, right); /// /// __m128i _mm_mulhi_epi16 (__m128i a, __m128i b) /// PMULHW xmm, xmm/m128 /// public static Vector128 MultiplyHigh(Vector128 left, Vector128 right) => MultiplyHigh(left, right); /// /// __m128i _mm_mulhi_epu16 (__m128i a, __m128i b) /// PMULHUW xmm, xmm/m128 /// public static Vector128 MultiplyHigh(Vector128 left, Vector128 right) => MultiplyHigh(left, right); /// /// __m128i _mm_madd_epi16 (__m128i a, __m128i b) /// PMADDWD xmm, xmm/m128 /// public static Vector128 MultiplyAddAdjacent(Vector128 left, Vector128 right) => MultiplyAddAdjacent(left, right); /// /// __m128i _mm_mullo_epi16 (__m128i a, __m128i b) /// PMULLW xmm, xmm/m128 /// public static Vector128 MultiplyLow(Vector128 left, Vector128 right) => MultiplyLow(left, right); /// /// __m128i _mm_mullo_epi16 (__m128i a, __m128i b) /// PMULLW xmm, xmm/m128 /// public static Vector128 MultiplyLow(Vector128 left, Vector128 right) => MultiplyLow(left, right); /// /// __m128i _mm_or_si128 (__m128i a, __m128i b) /// POR xmm, xmm/m128 /// public static Vector128 Or(Vector128 left, Vector128 right) => Or(left, right); /// /// __m128i _mm_or_si128 (__m128i a, __m128i b) /// POR xmm, xmm/m128 /// public static Vector128 Or(Vector128 left, Vector128 right) => Or(left, right); /// /// __m128i _mm_or_si128 (__m128i a, __m128i b) /// POR xmm, xmm/m128 /// public static Vector128 Or(Vector128 left, Vector128 right) => Or(left, right); /// /// __m128i _mm_or_si128 (__m128i a, __m128i b) /// POR xmm, xmm/m128 /// public static Vector128 Or(Vector128 left, Vector128 right) => Or(left, right); /// /// __m128i _mm_or_si128 (__m128i a, __m128i b) /// POR xmm, xmm/m128 /// public static Vector128 Or(Vector128 left, Vector128 right) => Or(left, right); /// /// __m128i _mm_or_si128 (__m128i a, __m128i b) /// POR xmm, xmm/m128 /// public static Vector128 Or(Vector128 left, Vector128 right) => Or(left, right); /// /// __m128i _mm_or_si128 (__m128i a, __m128i b) /// POR xmm, xmm/m128 /// public static Vector128 Or(Vector128 left, Vector128 right) => Or(left, right); /// /// __m128i _mm_or_si128 (__m128i a, __m128i b) /// POR xmm, xmm/m128 /// public static Vector128 Or(Vector128 left, Vector128 right) => Or(left, right); /// /// __m128d _mm_or_pd (__m128d a, __m128d b) /// ORPD xmm, xmm/m128 /// public static Vector128 Or(Vector128 left, Vector128 right) => Or(left, right); /// /// __m128i _mm_packs_epi16 (__m128i a, __m128i b) /// PACKSSWB xmm, xmm/m128 /// public static Vector128 PackSignedSaturate(Vector128 left, Vector128 right) => PackSignedSaturate(left, right); /// /// __m128i _mm_packs_epi32 (__m128i a, __m128i b) /// PACKSSDW xmm, xmm/m128 /// public static Vector128 PackSignedSaturate(Vector128 left, Vector128 right) => PackSignedSaturate(left, right); /// /// __m128i _mm_packus_epi16 (__m128i a, __m128i b) /// PACKUSWB xmm, xmm/m128 /// public static Vector128 PackUnsignedSaturate(Vector128 left, Vector128 right) => PackUnsignedSaturate(left, right); /// /// __m128i _mm_sad_epu8 (__m128i a, __m128i b) /// PSADBW xmm, xmm/m128 /// public static Vector128 SumAbsoluteDifferences(Vector128 left, Vector128 right) => SumAbsoluteDifferences(left, right); /// /// __m128i _mm_shuffle_epi32 (__m128i a, int immediate) /// PSHUFD xmm, xmm/m128, imm8 /// public static Vector128 Shuffle(Vector128 value, byte control) => Shuffle(value, control); /// /// __m128i _mm_shuffle_epi32 (__m128i a, int immediate) /// PSHUFD xmm, xmm/m128, imm8 /// public static Vector128 Shuffle(Vector128 value, byte control) => Shuffle(value, control); /// /// __m128d _mm_shuffle_pd (__m128d a, __m128d b, int immediate) /// SHUFPD xmm, xmm/m128, imm8 /// public static Vector128 Shuffle(Vector128 left, Vector128 right, byte control) => Shuffle(left, right, control); /// /// __m128i _mm_shufflehi_epi16 (__m128i a, int immediate) /// PSHUFHW xmm, xmm/m128, imm8 /// public static Vector128 ShuffleHigh(Vector128 value, byte control) => ShuffleHigh(value, control); /// /// __m128i _mm_shufflehi_epi16 (__m128i a, int control) /// PSHUFHW xmm, xmm/m128, imm8 /// public static Vector128 ShuffleHigh(Vector128 value, byte control) => ShuffleHigh(value, control); /// /// __m128i _mm_shufflelo_epi16 (__m128i a, int control) /// PSHUFLW xmm, xmm/m128, imm8 /// public static Vector128 ShuffleLow(Vector128 value, byte control) => ShuffleLow(value, control); /// /// __m128i _mm_shufflelo_epi16 (__m128i a, int control) /// PSHUFLW xmm, xmm/m128, imm8 /// public static Vector128 ShuffleLow(Vector128 value, byte control) => ShuffleLow(value, control); /// /// __m128i _mm_sll_epi16 (__m128i a, __m128i count) /// PSLLW xmm, xmm/m128 /// public static Vector128 ShiftLeftLogical(Vector128 value, Vector128 count) => ShiftLeftLogical(value, count); /// /// __m128i _mm_sll_epi16 (__m128i a, __m128i count) /// PSLLW xmm, xmm/m128 /// public static Vector128 ShiftLeftLogical(Vector128 value, Vector128 count) => ShiftLeftLogical(value, count); /// /// __m128i _mm_sll_epi32 (__m128i a, __m128i count) /// PSLLD xmm, xmm/m128 /// public static Vector128 ShiftLeftLogical(Vector128 value, Vector128 count) => ShiftLeftLogical(value, count); /// /// __m128i _mm_sll_epi32 (__m128i a, __m128i count) /// PSLLD xmm, xmm/m128 /// public static Vector128 ShiftLeftLogical(Vector128 value, Vector128 count) => ShiftLeftLogical(value, count); /// /// __m128i _mm_sll_epi64 (__m128i a, __m128i count) /// PSLLQ xmm, xmm/m128 /// public static Vector128 ShiftLeftLogical(Vector128 value, Vector128 count) => ShiftLeftLogical(value, count); /// /// __m128i _mm_sll_epi64 (__m128i a, __m128i count) /// PSLLQ xmm, xmm/m128 /// public static Vector128 ShiftLeftLogical(Vector128 value, Vector128 count) => ShiftLeftLogical(value, count); /// /// __m128i _mm_slli_epi16 (__m128i a, int immediate) /// PSLLW xmm, imm8 /// public static Vector128 ShiftLeftLogical(Vector128 value, byte count) => ShiftLeftLogical(value, count); /// /// __m128i _mm_slli_epi16 (__m128i a, int immediate) /// PSLLW xmm, imm8 /// public static Vector128 ShiftLeftLogical(Vector128 value, byte count) => ShiftLeftLogical(value, count); /// /// __m128i _mm_slli_epi32 (__m128i a, int immediate) /// PSLLD xmm, imm8 /// public static Vector128 ShiftLeftLogical(Vector128 value, byte count) => ShiftLeftLogical(value, count); /// /// __m128i _mm_slli_epi32 (__m128i a, int immediate) /// PSLLD xmm, imm8 /// public static Vector128 ShiftLeftLogical(Vector128 value, byte count) => ShiftLeftLogical(value, count); /// /// __m128i _mm_slli_epi64 (__m128i a, int immediate) /// PSLLQ xmm, imm8 /// public static Vector128 ShiftLeftLogical(Vector128 value, byte count) => ShiftLeftLogical(value, count); /// /// __m128i _mm_slli_epi64 (__m128i a, int immediate) /// PSLLQ xmm, imm8 /// public static Vector128 ShiftLeftLogical(Vector128 value, byte count) => ShiftLeftLogical(value, count); /// /// __m128i _mm_bslli_si128 (__m128i a, int imm8) /// PSLLDQ xmm, imm8 /// public static Vector128 ShiftLeftLogical128BitLane(Vector128 value, byte numBytes) => ShiftLeftLogical128BitLane(value, numBytes); /// /// __m128i _mm_bslli_si128 (__m128i a, int imm8) /// PSLLDQ xmm, imm8 /// public static Vector128 ShiftLeftLogical128BitLane(Vector128 value, byte numBytes) => ShiftLeftLogical128BitLane(value, numBytes); /// /// __m128i _mm_bslli_si128 (__m128i a, int imm8) /// PSLLDQ xmm, imm8 /// public static Vector128 ShiftLeftLogical128BitLane(Vector128 value, byte numBytes) => ShiftLeftLogical128BitLane(value, numBytes); /// /// __m128i _mm_bslli_si128 (__m128i a, int imm8) /// PSLLDQ xmm, imm8 /// public static Vector128 ShiftLeftLogical128BitLane(Vector128 value, byte numBytes) => ShiftLeftLogical128BitLane(value, numBytes); /// /// __m128i _mm_bslli_si128 (__m128i a, int imm8) /// PSLLDQ xmm, imm8 /// public static Vector128 ShiftLeftLogical128BitLane(Vector128 value, byte numBytes) => ShiftLeftLogical128BitLane(value, numBytes); /// /// __m128i _mm_bslli_si128 (__m128i a, int imm8) /// PSLLDQ xmm, imm8 /// public static Vector128 ShiftLeftLogical128BitLane(Vector128 value, byte numBytes) => ShiftLeftLogical128BitLane(value, numBytes); /// /// __m128i _mm_bslli_si128 (__m128i a, int imm8) /// PSLLDQ xmm, imm8 /// public static Vector128 ShiftLeftLogical128BitLane(Vector128 value, byte numBytes) => ShiftLeftLogical128BitLane(value, numBytes); /// /// __m128i _mm_bslli_si128 (__m128i a, int imm8) /// PSLLDQ xmm, imm8 /// public static Vector128 ShiftLeftLogical128BitLane(Vector128 value, byte numBytes) => ShiftLeftLogical128BitLane(value, numBytes); /// /// __m128i _mm_sra_epi16 (__m128i a, __m128i count) /// PSRAW xmm, xmm/m128 /// public static Vector128 ShiftRightArithmetic(Vector128 value, Vector128 count) => ShiftRightArithmetic(value, count); /// /// __m128i _mm_sra_epi32 (__m128i a, __m128i count) /// PSRAD xmm, xmm/m128 /// public static Vector128 ShiftRightArithmetic(Vector128 value, Vector128 count) => ShiftRightArithmetic(value, count); /// /// __m128i _mm_srai_epi16 (__m128i a, int immediate) /// PSRAW xmm, imm8 /// public static Vector128 ShiftRightArithmetic(Vector128 value, byte count) => ShiftRightArithmetic(value, count); /// /// __m128i _mm_srai_epi32 (__m128i a, int immediate) /// PSRAD xmm, imm8 /// public static Vector128 ShiftRightArithmetic(Vector128 value, byte count) => ShiftRightArithmetic(value, count); /// /// __m128i _mm_srl_epi16 (__m128i a, __m128i count) /// PSRLW xmm, xmm/m128 /// public static Vector128 ShiftRightLogical(Vector128 value, Vector128 count) => ShiftRightLogical(value, count); /// /// __m128i _mm_srl_epi16 (__m128i a, __m128i count) /// PSRLW xmm, xmm/m128 /// public static Vector128 ShiftRightLogical(Vector128 value, Vector128 count) => ShiftRightLogical(value, count); /// /// __m128i _mm_srl_epi32 (__m128i a, __m128i count) /// PSRLD xmm, xmm/m128 /// public static Vector128 ShiftRightLogical(Vector128 value, Vector128 count) => ShiftRightLogical(value, count); /// /// __m128i _mm_srl_epi32 (__m128i a, __m128i count) /// PSRLD xmm, xmm/m128 /// public static Vector128 ShiftRightLogical(Vector128 value, Vector128 count) => ShiftRightLogical(value, count); /// /// __m128i _mm_srl_epi64 (__m128i a, __m128i count) /// PSRLQ xmm, xmm/m128 /// public static Vector128 ShiftRightLogical(Vector128 value, Vector128 count) => ShiftRightLogical(value, count); /// /// __m128i _mm_srl_epi64 (__m128i a, __m128i count) /// PSRLQ xmm, xmm/m128 /// public static Vector128 ShiftRightLogical(Vector128 value, Vector128 count) => ShiftRightLogical(value, count); /// /// __m128i _mm_srli_epi16 (__m128i a, int immediate) /// PSRLW xmm, imm8 /// public static Vector128 ShiftRightLogical(Vector128 value, byte count) => ShiftRightLogical(value, count); /// /// __m128i _mm_srli_epi16 (__m128i a, int immediate) /// PSRLW xmm, imm8 /// public static Vector128 ShiftRightLogical(Vector128 value, byte count) => ShiftRightLogical(value, count); /// /// __m128i _mm_srli_epi32 (__m128i a, int immediate) /// PSRLD xmm, imm8 /// public static Vector128 ShiftRightLogical(Vector128 value, byte count) => ShiftRightLogical(value, count); /// /// __m128i _mm_srli_epi32 (__m128i a, int immediate) /// PSRLD xmm, imm8 /// public static Vector128 ShiftRightLogical(Vector128 value, byte count) => ShiftRightLogical(value, count); /// /// __m128i _mm_srli_epi64 (__m128i a, int immediate) /// PSRLQ xmm, imm8 /// public static Vector128 ShiftRightLogical(Vector128 value, byte count) => ShiftRightLogical(value, count); /// /// __m128i _mm_srli_epi64 (__m128i a, int immediate) /// PSRLQ xmm, imm8 /// public static Vector128 ShiftRightLogical(Vector128 value, byte count) => ShiftRightLogical(value, count); /// /// __m128i _mm_bsrli_si128 (__m128i a, int imm8) /// PSRLDQ xmm, imm8 /// public static Vector128 ShiftRightLogical128BitLane(Vector128 value, byte numBytes) => ShiftRightLogical128BitLane(value, numBytes); /// /// __m128i _mm_bsrli_si128 (__m128i a, int imm8) /// PSRLDQ xmm, imm8 /// public static Vector128 ShiftRightLogical128BitLane(Vector128 value, byte numBytes) => ShiftRightLogical128BitLane(value, numBytes); /// /// __m128i _mm_bsrli_si128 (__m128i a, int imm8) /// PSRLDQ xmm, imm8 /// public static Vector128 ShiftRightLogical128BitLane(Vector128 value, byte numBytes) => ShiftRightLogical128BitLane(value, numBytes); /// /// __m128i _mm_bsrli_si128 (__m128i a, int imm8) /// PSRLDQ xmm, imm8 /// public static Vector128 ShiftRightLogical128BitLane(Vector128 value, byte numBytes) => ShiftRightLogical128BitLane(value, numBytes); /// /// __m128i _mm_bsrli_si128 (__m128i a, int imm8) /// PSRLDQ xmm, imm8 /// public static Vector128 ShiftRightLogical128BitLane(Vector128 value, byte numBytes) => ShiftRightLogical128BitLane(value, numBytes); /// /// __m128i _mm_bsrli_si128 (__m128i a, int imm8) /// PSRLDQ xmm, imm8 /// public static Vector128 ShiftRightLogical128BitLane(Vector128 value, byte numBytes) => ShiftRightLogical128BitLane(value, numBytes); /// /// __m128i _mm_bsrli_si128 (__m128i a, int imm8) /// PSRLDQ xmm, imm8 /// public static Vector128 ShiftRightLogical128BitLane(Vector128 value, byte numBytes) => ShiftRightLogical128BitLane(value, numBytes); /// /// __m128i _mm_bsrli_si128 (__m128i a, int imm8) /// PSRLDQ xmm, imm8 /// public static Vector128 ShiftRightLogical128BitLane(Vector128 value, byte numBytes) => ShiftRightLogical128BitLane(value, numBytes); /// /// __m128d _mm_sqrt_pd (__m128d a) /// SQRTPD xmm, xmm/m128 /// public static Vector128 Sqrt(Vector128 value) => Sqrt(value); /// /// __m128d _mm_sqrt_sd (__m128d a) /// SQRTSD xmm, xmm/64 /// The above native signature does not exist. We provide this additional overload for the recommended use case of this intrinsic. /// public static Vector128 SqrtScalar(Vector128 value) => SqrtScalar(value); /// /// __m128d _mm_sqrt_sd (__m128d a, __m128d b) /// SQRTSD xmm, xmm/64 /// public static Vector128 SqrtScalar(Vector128 upper, Vector128 value) => SqrtScalar(upper, value); /// /// void _mm_store_sd (double* mem_addr, __m128d a) /// MOVSD m64, xmm /// public static unsafe void StoreScalar(double* address, Vector128 source) => StoreScalar(address, source); /// /// void _mm_store_si128 (__m128i* mem_addr, __m128i a) /// MOVDQA m128, xmm /// public static unsafe void StoreAligned(sbyte* address, Vector128 source) => StoreAligned(address, source); /// /// void _mm_store_si128 (__m128i* mem_addr, __m128i a) /// MOVDQA m128, xmm /// public static unsafe void StoreAligned(byte* address, Vector128 source) => StoreAligned(address, source); /// /// void _mm_store_si128 (__m128i* mem_addr, __m128i a) /// MOVDQA m128, xmm /// public static unsafe void StoreAligned(short* address, Vector128 source) => StoreAligned(address, source); /// /// void _mm_store_si128 (__m128i* mem_addr, __m128i a) /// MOVDQA m128, xmm /// public static unsafe void StoreAligned(ushort* address, Vector128 source) => StoreAligned(address, source); /// /// void _mm_store_si128 (__m128i* mem_addr, __m128i a) /// MOVDQA m128, xmm /// public static unsafe void StoreAligned(int* address, Vector128 source) => StoreAligned(address, source); /// /// void _mm_store_si128 (__m128i* mem_addr, __m128i a) /// MOVDQA m128, xmm /// public static unsafe void StoreAligned(uint* address, Vector128 source) => StoreAligned(address, source); /// /// void _mm_store_si128 (__m128i* mem_addr, __m128i a) /// MOVDQA m128, xmm /// public static unsafe void StoreAligned(long* address, Vector128 source) => StoreAligned(address, source); /// /// void _mm_store_si128 (__m128i* mem_addr, __m128i a) /// MOVDQA m128, xmm /// public static unsafe void StoreAligned(ulong* address, Vector128 source) => StoreAligned(address, source); /// /// void _mm_store_pd (double* mem_addr, __m128d a) /// MOVAPD m128, xmm /// public static unsafe void StoreAligned(double* address, Vector128 source) => StoreAligned(address, source); /// /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a) /// MOVNTDQ m128, xmm /// public static unsafe void StoreAlignedNonTemporal(sbyte* address, Vector128 source) => StoreAlignedNonTemporal(address, source); /// /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a) /// MOVNTDQ m128, xmm /// public static unsafe void StoreAlignedNonTemporal(byte* address, Vector128 source) => StoreAlignedNonTemporal(address, source); /// /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a) /// MOVNTDQ m128, xmm /// public static unsafe void StoreAlignedNonTemporal(short* address, Vector128 source) => StoreAlignedNonTemporal(address, source); /// /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a) /// MOVNTDQ m128, xmm /// public static unsafe void StoreAlignedNonTemporal(ushort* address, Vector128 source) => StoreAlignedNonTemporal(address, source); /// /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a) /// MOVNTDQ m128, xmm /// public static unsafe void StoreAlignedNonTemporal(int* address, Vector128 source) => StoreAlignedNonTemporal(address, source); /// /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a) /// MOVNTDQ m128, xmm /// public static unsafe void StoreAlignedNonTemporal(uint* address, Vector128 source) => StoreAlignedNonTemporal(address, source); /// /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a) /// MOVNTDQ m128, xmm /// public static unsafe void StoreAlignedNonTemporal(long* address, Vector128 source) => StoreAlignedNonTemporal(address, source); /// /// void _mm_stream_si128 (__m128i* mem_addr, __m128i a) /// MOVNTDQ m128, xmm /// public static unsafe void StoreAlignedNonTemporal(ulong* address, Vector128 source) => StoreAlignedNonTemporal(address, source); /// /// void _mm_stream_pd (double* mem_addr, __m128d a) /// MOVNTPD m128, xmm /// public static unsafe void StoreAlignedNonTemporal(double* address, Vector128 source) => StoreAlignedNonTemporal(address, source); /// /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a) /// MOVDQU m128, xmm /// public static unsafe void Store(sbyte* address, Vector128 source) => Store(address, source); /// /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a) /// MOVDQU m128, xmm /// public static unsafe void Store(byte* address, Vector128 source) => Store(address, source); /// /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a) /// MOVDQU m128, xmm /// public static unsafe void Store(short* address, Vector128 source) => Store(address, source); /// /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a) /// MOVDQU m128, xmm /// public static unsafe void Store(ushort* address, Vector128 source) => Store(address, source); /// /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a) /// MOVDQU m128, xmm /// public static unsafe void Store(int* address, Vector128 source) => Store(address, source); /// /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a) /// MOVDQU m128, xmm /// public static unsafe void Store(uint* address, Vector128 source) => Store(address, source); /// /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a) /// MOVDQU m128, xmm /// public static unsafe void Store(long* address, Vector128 source) => Store(address, source); /// /// void _mm_storeu_si128 (__m128i* mem_addr, __m128i a) /// MOVDQU m128, xmm /// public static unsafe void Store(ulong* address, Vector128 source) => Store(address, source); /// /// void _mm_storeu_pd (double* mem_addr, __m128d a) /// MOVUPD m128, xmm /// public static unsafe void Store(double* address, Vector128 source) => Store(address, source); /// /// void _mm_storeh_pd (double* mem_addr, __m128d a) /// MOVHPD m64, xmm /// public static unsafe void StoreHigh(double* address, Vector128 source) => StoreHigh(address, source); /// /// void _mm_storel_epi64 (__m128i* mem_addr, __m128i a) /// MOVQ m64, xmm /// public static unsafe void StoreLow(long* address, Vector128 source) => StoreLow(address, source); /// /// void _mm_storel_epi64 (__m128i* mem_addr, __m128i a) /// MOVQ m64, xmm /// public static unsafe void StoreLow(ulong* address, Vector128 source) => StoreLow(address, source); /// /// void _mm_storel_pd (double* mem_addr, __m128d a) /// MOVLPD m64, xmm /// public static unsafe void StoreLow(double* address, Vector128 source) => StoreLow(address, source); /// /// void _mm_stream_si32(int *p, int a) /// MOVNTI m32, r32 /// public static unsafe void StoreNonTemporal(int* address, int value) => StoreNonTemporal(address, value); /// /// void _mm_stream_si32(int *p, int a) /// MOVNTI m32, r32 /// public static unsafe void StoreNonTemporal(uint* address, uint value) => StoreNonTemporal(address, value); /// /// __m128i _mm_sub_epi8 (__m128i a, __m128i b) /// PSUBB xmm, xmm/m128 /// public static Vector128 Subtract(Vector128 left, Vector128 right) => Subtract(left, right); /// /// __m128i _mm_sub_epi8 (__m128i a, __m128i b) /// PSUBB xmm, xmm/m128 /// public static Vector128 Subtract(Vector128 left, Vector128 right) => Subtract(left, right); /// /// __m128i _mm_sub_epi16 (__m128i a, __m128i b) /// PSUBW xmm, xmm/m128 /// public static Vector128 Subtract(Vector128 left, Vector128 right) => Subtract(left, right); /// /// __m128i _mm_sub_epi16 (__m128i a, __m128i b) /// PSUBW xmm, xmm/m128 /// public static Vector128 Subtract(Vector128 left, Vector128 right) => Subtract(left, right); /// /// __m128i _mm_sub_epi32 (__m128i a, __m128i b) /// PSUBD xmm, xmm/m128 /// public static Vector128 Subtract(Vector128 left, Vector128 right) => Subtract(left, right); /// /// __m128i _mm_sub_epi32 (__m128i a, __m128i b) /// PSUBD xmm, xmm/m128 /// public static Vector128 Subtract(Vector128 left, Vector128 right) => Subtract(left, right); /// /// __m128i _mm_sub_epi64 (__m128i a, __m128i b) /// PSUBQ xmm, xmm/m128 /// public static Vector128 Subtract(Vector128 left, Vector128 right) => Subtract(left, right); /// /// __m128i _mm_sub_epi64 (__m128i a, __m128i b) /// PSUBQ xmm, xmm/m128 /// public static Vector128 Subtract(Vector128 left, Vector128 right) => Subtract(left, right); /// /// __m128d _mm_sub_pd (__m128d a, __m128d b) /// SUBPD xmm, xmm/m128 /// public static Vector128 Subtract(Vector128 left, Vector128 right) => Subtract(left, right); /// /// __m128d _mm_sub_sd (__m128d a, __m128d b) /// SUBSD xmm, xmm/m64 /// public static Vector128 SubtractScalar(Vector128 left, Vector128 right) => SubtractScalar(left, right); /// /// __m128i _mm_subs_epi8 (__m128i a, __m128i b) /// PSUBSB xmm, xmm/m128 /// public static Vector128 SubtractSaturate(Vector128 left, Vector128 right) => SubtractSaturate(left, right); /// /// __m128i _mm_subs_epi16 (__m128i a, __m128i b) /// PSUBSW xmm, xmm/m128 /// public static Vector128 SubtractSaturate(Vector128 left, Vector128 right) => SubtractSaturate(left, right); /// /// __m128i _mm_subs_epu8 (__m128i a, __m128i b) /// PSUBUSB xmm, xmm/m128 /// public static Vector128 SubtractSaturate(Vector128 left, Vector128 right) => SubtractSaturate(left, right); /// /// __m128i _mm_subs_epu16 (__m128i a, __m128i b) /// PSUBUSW xmm, xmm/m128 /// public static Vector128 SubtractSaturate(Vector128 left, Vector128 right) => SubtractSaturate(left, right); /// /// __m128i _mm_unpackhi_epi8 (__m128i a, __m128i b) /// PUNPCKHBW xmm, xmm/m128 /// public static Vector128 UnpackHigh(Vector128 left, Vector128 right) => UnpackHigh(left, right); /// /// __m128i _mm_unpackhi_epi8 (__m128i a, __m128i b) /// PUNPCKHBW xmm, xmm/m128 /// public static Vector128 UnpackHigh(Vector128 left, Vector128 right) => UnpackHigh(left, right); /// /// __m128i _mm_unpackhi_epi16 (__m128i a, __m128i b) /// PUNPCKHWD xmm, xmm/m128 /// public static Vector128 UnpackHigh(Vector128 left, Vector128 right) => UnpackHigh(left, right); /// /// __m128i _mm_unpackhi_epi16 (__m128i a, __m128i b) /// PUNPCKHWD xmm, xmm/m128 /// public static Vector128 UnpackHigh(Vector128 left, Vector128 right) => UnpackHigh(left, right); /// /// __m128i _mm_unpackhi_epi32 (__m128i a, __m128i b) /// PUNPCKHDQ xmm, xmm/m128 /// public static Vector128 UnpackHigh(Vector128 left, Vector128 right) => UnpackHigh(left, right); /// /// __m128i _mm_unpackhi_epi32 (__m128i a, __m128i b) /// PUNPCKHDQ xmm, xmm/m128 /// public static Vector128 UnpackHigh(Vector128 left, Vector128 right) => UnpackHigh(left, right); /// /// __m128i _mm_unpackhi_epi64 (__m128i a, __m128i b) /// PUNPCKHQDQ xmm, xmm/m128 /// public static Vector128 UnpackHigh(Vector128 left, Vector128 right) => UnpackHigh(left, right); /// /// __m128i _mm_unpackhi_epi64 (__m128i a, __m128i b) /// PUNPCKHQDQ xmm, xmm/m128 /// public static Vector128 UnpackHigh(Vector128 left, Vector128 right) => UnpackHigh(left, right); /// /// __m128d _mm_unpackhi_pd (__m128d a, __m128d b) /// UNPCKHPD xmm, xmm/m128 /// public static Vector128 UnpackHigh(Vector128 left, Vector128 right) => UnpackHigh(left, right); /// /// __m128i _mm_unpacklo_epi8 (__m128i a, __m128i b) /// PUNPCKLBW xmm, xmm/m128 /// public static Vector128 UnpackLow(Vector128 left, Vector128 right) => UnpackLow(left, right); /// /// __m128i _mm_unpacklo_epi8 (__m128i a, __m128i b) /// PUNPCKLBW xmm, xmm/m128 /// public static Vector128 UnpackLow(Vector128 left, Vector128 right) => UnpackLow(left, right); /// /// __m128i _mm_unpacklo_epi16 (__m128i a, __m128i b) /// PUNPCKLWD xmm, xmm/m128 /// public static Vector128 UnpackLow(Vector128 left, Vector128 right) => UnpackLow(left, right); /// /// __m128i _mm_unpacklo_epi16 (__m128i a, __m128i b) /// PUNPCKLWD xmm, xmm/m128 /// public static Vector128 UnpackLow(Vector128 left, Vector128 right) => UnpackLow(left, right); /// /// __m128i _mm_unpacklo_epi32 (__m128i a, __m128i b) /// PUNPCKLDQ xmm, xmm/m128 /// public static Vector128 UnpackLow(Vector128 left, Vector128 right) => UnpackLow(left, right); /// /// __m128i _mm_unpacklo_epi32 (__m128i a, __m128i b) /// PUNPCKLDQ xmm, xmm/m128 /// public static Vector128 UnpackLow(Vector128 left, Vector128 right) => UnpackLow(left, right); /// /// __m128i _mm_unpacklo_epi64 (__m128i a, __m128i b) /// PUNPCKLQDQ xmm, xmm/m128 /// public static Vector128 UnpackLow(Vector128 left, Vector128 right) => UnpackLow(left, right); /// /// __m128i _mm_unpacklo_epi64 (__m128i a, __m128i b) /// PUNPCKLQDQ xmm, xmm/m128 /// public static Vector128 UnpackLow(Vector128 left, Vector128 right) => UnpackLow(left, right); /// /// __m128d _mm_unpacklo_pd (__m128d a, __m128d b) /// UNPCKLPD xmm, xmm/m128 /// public static Vector128 UnpackLow(Vector128 left, Vector128 right) => UnpackLow(left, right); /// /// __m128i _mm_xor_si128 (__m128i a, __m128i b) /// PXOR xmm, xmm/m128 /// public static Vector128 Xor(Vector128 left, Vector128 right) => Xor(left, right); /// /// __m128i _mm_xor_si128 (__m128i a, __m128i b) /// PXOR xmm, xmm/m128 /// public static Vector128 Xor(Vector128 left, Vector128 right) => Xor(left, right); /// /// __m128i _mm_xor_si128 (__m128i a, __m128i b) /// PXOR xmm, xmm/m128 /// public static Vector128 Xor(Vector128 left, Vector128 right) => Xor(left, right); /// /// __m128i _mm_xor_si128 (__m128i a, __m128i b) /// PXOR xmm, xmm/m128 /// public static Vector128 Xor(Vector128 left, Vector128 right) => Xor(left, right); /// /// __m128i _mm_xor_si128 (__m128i a, __m128i b) /// PXOR xmm, xmm/m128 /// public static Vector128 Xor(Vector128 left, Vector128 right) => Xor(left, right); /// /// __m128i _mm_xor_si128 (__m128i a, __m128i b) /// PXOR xmm, xmm/m128 /// public static Vector128 Xor(Vector128 left, Vector128 right) => Xor(left, right); /// /// __m128i _mm_xor_si128 (__m128i a, __m128i b) /// PXOR xmm, xmm/m128 /// public static Vector128 Xor(Vector128 left, Vector128 right) => Xor(left, right); /// /// __m128i _mm_xor_si128 (__m128i a, __m128i b) /// PXOR xmm, xmm/m128 /// public static Vector128 Xor(Vector128 left, Vector128 right) => Xor(left, right); /// /// __m128d _mm_xor_pd (__m128d a, __m128d b) /// XORPD xmm, xmm/m128 /// public static Vector128 Xor(Vector128 left, Vector128 right) => Xor(left, right); } }