// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using System;
using System.Runtime.Intrinsics;
namespace System.Runtime.Intrinsics.X86
{
///
/// This class provides access to Intel AVX2 hardware instructions via intrinsics
///
[CLSCompliant(false)]
public abstract class Avx2 : Avx
{
internal Avx2() { }
public new static bool IsSupported { get => IsSupported; }
///
/// __m256i _mm256_abs_epi8 (__m256i a)
/// VPABSB ymm, ymm/m256
///
public static Vector256 Abs(Vector256 value) => Abs(value);
///
/// __m256i _mm256_abs_epi16 (__m256i a)
/// VPABSW ymm, ymm/m256
///
public static Vector256 Abs(Vector256 value) => Abs(value);
///
/// __m256i _mm256_abs_epi32 (__m256i a)
/// VPABSD ymm, ymm/m256
///
public static Vector256 Abs(Vector256 value) => Abs(value);
///
/// __m256i _mm256_add_epi8 (__m256i a, __m256i b)
/// VPADDB ymm, ymm, ymm/m256
///
public static Vector256 Add(Vector256 left, Vector256 right) => Add(left, right);
///
/// __m256i _mm256_add_epi8 (__m256i a, __m256i b)
/// VPADDB ymm, ymm, ymm/m256
///
public static Vector256 Add(Vector256 left, Vector256 right) => Add(left, right);
///
/// __m256i _mm256_add_epi16 (__m256i a, __m256i b)
/// VPADDW ymm, ymm, ymm/m256
///
public static Vector256 Add(Vector256 left, Vector256 right) => Add(left, right);
///
/// __m256i _mm256_add_epi16 (__m256i a, __m256i b)
/// VPADDW ymm, ymm, ymm/m256
///
public static Vector256 Add(Vector256 left, Vector256 right) => Add(left, right);
///
/// __m256i _mm256_add_epi32 (__m256i a, __m256i b)
/// VPADDD ymm, ymm, ymm/m256
///
public static Vector256 Add(Vector256 left, Vector256 right) => Add(left, right);
///
/// __m256i _mm256_add_epi32 (__m256i a, __m256i b)
/// VPADDD ymm, ymm, ymm/m256
///
public static Vector256 Add(Vector256 left, Vector256 right) => Add(left, right);
///
/// __m256i _mm256_add_epi64 (__m256i a, __m256i b)
/// VPADDQ ymm, ymm, ymm/m256
///
public static Vector256 Add(Vector256 left, Vector256 right) => Add(left, right);
///
/// __m256i _mm256_add_epi64 (__m256i a, __m256i b)
/// VPADDQ ymm, ymm, ymm/m256
///
public static Vector256 Add(Vector256 left, Vector256 right) => Add(left, right);
///
/// __m256i _mm256_adds_epi8 (__m256i a, __m256i b)
/// VPADDSB ymm, ymm, ymm/m256
///
public static Vector256 AddSaturate(Vector256 left, Vector256 right) => AddSaturate(left, right);
///
/// __m256i _mm256_adds_epu8 (__m256i a, __m256i b)
/// VPADDUSB ymm, ymm, ymm/m256
///
public static Vector256 AddSaturate(Vector256 left, Vector256 right) => AddSaturate(left, right);
///
/// __m256i _mm256_adds_epi16 (__m256i a, __m256i b)
/// VPADDSW ymm, ymm, ymm/m256
///
public static Vector256 AddSaturate(Vector256 left, Vector256 right) => AddSaturate(left, right);
///
/// __m256i _mm256_adds_epu16 (__m256i a, __m256i b)
/// VPADDUSW ymm, ymm, ymm/m256
///
public static Vector256 AddSaturate(Vector256 left, Vector256 right) => AddSaturate(left, right);
///
/// __m256i _mm256_alignr_epi8 (__m256i a, __m256i b, const int count)
/// VPALIGNR ymm, ymm, ymm/m256, imm8
///
public static Vector256 AlignRight(Vector256 left, Vector256 right, byte mask) => AlignRight(left, right, mask);
///
/// __m256i _mm256_alignr_epi8 (__m256i a, __m256i b, const int count)
/// VPALIGNR ymm, ymm, ymm/m256, imm8
///
public static Vector256 AlignRight(Vector256 left, Vector256 right, byte mask) => AlignRight(left, right, mask);
///
/// __m256i _mm256_alignr_epi8 (__m256i a, __m256i b, const int count)
/// VPALIGNR ymm, ymm, ymm/m256, imm8
/// This intrinsic generates VPALIGNR that operates over bytes rather than elements of the vectors.
///
public static Vector256 AlignRight(Vector256 left, Vector256 right, byte mask) => AlignRight(left, right, mask);
///
/// __m256i _mm256_alignr_epi8 (__m256i a, __m256i b, const int count)
/// VPALIGNR ymm, ymm, ymm/m256, imm8
/// This intrinsic generates VPALIGNR that operates over bytes rather than elements of the vectors.
///
public static Vector256 AlignRight(Vector256 left, Vector256 right, byte mask) => AlignRight(left, right, mask);
///
/// __m256i _mm256_alignr_epi8 (__m256i a, __m256i b, const int count)
/// VPALIGNR ymm, ymm, ymm/m256, imm8
/// This intrinsic generates VPALIGNR that operates over bytes rather than elements of the vectors.
///
public static Vector256 AlignRight(Vector256 left, Vector256 right, byte mask) => AlignRight(left, right, mask);
///
/// __m256i _mm256_alignr_epi8 (__m256i a, __m256i b, const int count)
/// VPALIGNR ymm, ymm, ymm/m256, imm8
/// This intrinsic generates VPALIGNR that operates over bytes rather than elements of the vectors.
///
public static Vector256 AlignRight(Vector256 left, Vector256 right, byte mask) => AlignRight(left, right, mask);
///
/// __m256i _mm256_alignr_epi8 (__m256i a, __m256i b, const int count)
/// VPALIGNR ymm, ymm, ymm/m256, imm8
/// This intrinsic generates VPALIGNR that operates over bytes rather than elements of the vectors.
///
public static Vector256 AlignRight(Vector256 left, Vector256 right, byte mask) => AlignRight(left, right, mask);
///
/// __m256i _mm256_alignr_epi8 (__m256i a, __m256i b, const int count)
/// VPALIGNR ymm, ymm, ymm/m256, imm8
/// This intrinsic generates VPALIGNR that operates over bytes rather than elements of the vectors.
///
public static Vector256 AlignRight(Vector256 left, Vector256 right, byte mask) => AlignRight(left, right, mask);
///
/// __m256i _mm256_and_si256 (__m256i a, __m256i b)
/// VPAND ymm, ymm, ymm/m256
///
public static Vector256 And(Vector256 left, Vector256 right) => And(left, right);
///
/// __m256i _mm256_and_si256 (__m256i a, __m256i b)
/// VPAND ymm, ymm, ymm/m256
///
public static Vector256 And(Vector256 left, Vector256 right) => And(left, right);
///
/// __m256i _mm256_and_si256 (__m256i a, __m256i b)
/// VPAND ymm, ymm, ymm/m256
///
public static Vector256 And(Vector256 left, Vector256 right) => And(left, right);
///
/// __m256i _mm256_and_si256 (__m256i a, __m256i b)
/// VPAND ymm, ymm, ymm/m256
///
public static Vector256 And(Vector256 left, Vector256 right) => And(left, right);
///
/// __m256i _mm256_and_si256 (__m256i a, __m256i b)
/// VPAND ymm, ymm, ymm/m256
///
public static Vector256 And(Vector256 left, Vector256 right) => And(left, right);
///
/// __m256i _mm256_and_si256 (__m256i a, __m256i b)
/// VPAND ymm, ymm, ymm/m256
///
public static Vector256 And(Vector256 left, Vector256 right) => And(left, right);
///
/// __m256i _mm256_and_si256 (__m256i a, __m256i b)
/// VPAND ymm, ymm, ymm/m256
///
public static Vector256 And(Vector256 left, Vector256 right) => And(left, right);
///
/// __m256i _mm256_and_si256 (__m256i a, __m256i b)
/// VPAND ymm, ymm, ymm/m256
///
public static Vector256 And(Vector256 left, Vector256 right) => And(left, right);
///
/// __m256i _mm256_andnot_si256 (__m256i a, __m256i b)
/// VPANDN ymm, ymm, ymm/m256
///
public static Vector256 AndNot(Vector256 left, Vector256 right) => AndNot(left, right);
///
/// __m256i _mm256_andnot_si256 (__m256i a, __m256i b)
/// VPANDN ymm, ymm, ymm/m256
///
public static Vector256 AndNot(Vector256 left, Vector256 right) => AndNot(left, right);
///
/// __m256i _mm256_andnot_si256 (__m256i a, __m256i b)
/// VPANDN ymm, ymm, ymm/m256
///
public static Vector256 AndNot(Vector256 left, Vector256 right) => AndNot(left, right);
///
/// __m256i _mm256_andnot_si256 (__m256i a, __m256i b)
/// VPANDN ymm, ymm, ymm/m256
///
public static Vector256 AndNot(Vector256 left, Vector256 right) => AndNot(left, right);
///
/// __m256i _mm256_andnot_si256 (__m256i a, __m256i b)
/// VPANDN ymm, ymm, ymm/m256
///
public static Vector256 AndNot(Vector256 left, Vector256 right) => AndNot(left, right);
///
/// __m256i _mm256_andnot_si256 (__m256i a, __m256i b)
/// VPANDN ymm, ymm, ymm/m256
///
public static Vector256 AndNot(Vector256 left, Vector256 right) => AndNot(left, right);
///
/// __m256i _mm256_andnot_si256 (__m256i a, __m256i b)
/// VPANDN ymm, ymm, ymm/m256
///
public static Vector256 AndNot(Vector256 left, Vector256 right) => AndNot(left, right);
///
/// __m256i _mm256_andnot_si256 (__m256i a, __m256i b)
/// VPANDN ymm, ymm, ymm/m256
///
public static Vector256 AndNot(Vector256 left, Vector256 right) => AndNot(left, right);
///
/// __m256i _mm256_avg_epu8 (__m256i a, __m256i b)
/// VPAVGB ymm, ymm, ymm/m256
///
public static Vector256 Average(Vector256 left, Vector256 right) => Average(left, right);
///
/// __m256i _mm256_avg_epu16 (__m256i a, __m256i b)
/// VPAVGW ymm, ymm, ymm/m256
///
public static Vector256 Average(Vector256 left, Vector256 right) => Average(left, right);
///
/// __m128i _mm_blend_epi32 (__m128i a, __m128i b, const int imm8)
/// VPBLENDD xmm, xmm, xmm/m128, imm8
///
public static Vector128 Blend(Vector128 left, Vector128 right, byte control) => Blend(left, right, control);
///
/// __m128i _mm_blend_epi32 (__m128i a, __m128i b, const int imm8)
/// VPBLENDD xmm, xmm, xmm/m128, imm8
///
public static Vector128 Blend(Vector128 left, Vector128 right, byte control) => Blend(left, right, control);
///
/// __m256i _mm256_blend_epi16 (__m256i a, __m256i b, const int imm8)
/// VPBLENDW ymm, ymm, ymm/m256, imm8
///
public static Vector256 Blend(Vector256 left, Vector256 right, byte control) => Blend(left, right, control);
///
/// __m256i _mm256_blend_epi16 (__m256i a, __m256i b, const int imm8)
/// VPBLENDW ymm, ymm, ymm/m256, imm8
///
public static Vector256 Blend(Vector256 left, Vector256 right, byte control) => Blend(left, right, control);
///
/// __m256i _mm256_blend_epi32 (__m256i a, __m256i b, const int imm8)
/// VPBLENDD ymm, ymm, ymm/m256, imm8
///
public static Vector256 Blend(Vector256 left, Vector256 right, byte control) => Blend(left, right, control);
///
/// __m256i _mm256_blend_epi32 (__m256i a, __m256i b, const int imm8)
/// VPBLENDD ymm, ymm, ymm/m256, imm8
///
public static Vector256 Blend(Vector256 left, Vector256 right, byte control) => Blend(left, right, control);
///
/// __m256i _mm256_blendv_epi8 (__m256i a, __m256i b, __m256i mask)
/// VPBLENDVB ymm, ymm, ymm/m256, ymm
///
public static Vector256 BlendVariable(Vector256 left, Vector256 right, Vector256 mask) => BlendVariable(left, right, mask);
///
/// __m256i _mm256_blendv_epi8 (__m256i a, __m256i b, __m256i mask)
/// VPBLENDVB ymm, ymm, ymm/m256, ymm
///
public static Vector256 BlendVariable(Vector256 left, Vector256 right, Vector256 mask) => BlendVariable(left, right, mask);
///
/// __m256i _mm256_blendv_epi8 (__m256i a, __m256i b, __m256i mask)
/// VPBLENDVB ymm, ymm, ymm/m256, ymm
/// This intrinsic generates VPBLENDVB that needs a BYTE mask-vector, so users should correctly set each mask byte for the selected elements.
///
public static Vector256 BlendVariable(Vector256 left, Vector256 right, Vector256 mask) => BlendVariable(left, right, mask);
///
/// __m256i _mm256_blendv_epi8 (__m256i a, __m256i b, __m256i mask)
/// VPBLENDVB ymm, ymm, ymm/m256, ymm
/// This intrinsic generates VPBLENDVB that needs a BYTE mask-vector, so users should correctly set each mask byte for the selected elements.
///
public static Vector256 BlendVariable(Vector256 left, Vector256 right, Vector256 mask) => BlendVariable(left, right, mask);
///
/// __m256i _mm256_blendv_epi8 (__m256i a, __m256i b, __m256i mask)
/// VPBLENDVB ymm, ymm, ymm/m256, ymm
/// This intrinsic generates VPBLENDVB that needs a BYTE mask-vector, so users should correctly set each mask byte for the selected elements.
///
public static Vector256 BlendVariable(Vector256 left, Vector256 right, Vector256 mask) => BlendVariable(left, right, mask);
///
/// __m256i _mm256_blendv_epi8 (__m256i a, __m256i b, __m256i mask)
/// VPBLENDVB ymm, ymm, ymm/m256, ymm
/// This intrinsic generates VPBLENDVB that needs a BYTE mask-vector, so users should correctly set each mask byte for the selected elements.
///
public static Vector256 BlendVariable(Vector256 left, Vector256 right, Vector256 mask) => BlendVariable(left, right, mask);
///
/// __m256i _mm256_blendv_epi8 (__m256i a, __m256i b, __m256i mask)
/// VPBLENDVB ymm, ymm, ymm/m256, ymm
/// This intrinsic generates VPBLENDVB that needs a BYTE mask-vector, so users should correctly set each mask byte for the selected elements.
///
public static Vector256 BlendVariable(Vector256 left, Vector256 right, Vector256 mask) => BlendVariable(left, right, mask);
///
/// __m256i _mm256_blendv_epi8 (__m256i a, __m256i b, __m256i mask)
/// VPBLENDVB ymm, ymm, ymm/m256, ymm
/// This intrinsic generates VPBLENDVB that needs a BYTE mask-vector, so users should correctly set each mask byte for the selected elements.
///
public static Vector256 BlendVariable(Vector256 left, Vector256 right, Vector256 mask) => BlendVariable(left, right, mask);
///
/// __m128i _mm_broadcastb_epi8 (__m128i a)
/// VPBROADCASTB xmm, xmm
/// __m128i _mm_broadcastw_epi16 (__m128i a)
/// VPBROADCASTW xmm, xmm
/// __m128i _mm_broadcastd_epi32 (__m128i a)
/// VPBROADCASTD xmm, xmm
/// __m128i _mm_broadcastq_epi64 (__m128i a)
/// VPBROADCASTQ xmm, xmm
/// __m128 _mm_broadcastss_ps (__m128 a)
/// VBROADCASTSS xmm, xmm
/// __m128d _mm_broadcastsd_pd (__m128d a)
/// VMOVDDUP xmm, xmm
///
public static Vector128 BroadcastScalarToVector128(Vector128 value) where T : struct => BroadcastScalarToVector128(value);
///
/// __m128i _mm_broadcastb_epi8 (__m128i a)
/// VPBROADCASTB xmm, m8
/// The above native signature does not directly correspond to the managed signature.
///
public static unsafe Vector128 BroadcastScalarToVector128(byte* source) => BroadcastScalarToVector128(source);
///
/// __m128i _mm_broadcastb_epi8 (__m128i a)
/// VPBROADCASTB xmm, m8
/// The above native signature does not directly correspond to the managed signature.
///
public static unsafe Vector128 BroadcastScalarToVector128(sbyte* source) => BroadcastScalarToVector128(source);
///
/// __m128i _mm_broadcastw_epi16 (__m128i a)
/// VPBROADCASTW xmm, m16
/// The above native signature does not directly correspond to the managed signature.
///
public static unsafe Vector128 BroadcastScalarToVector128(short* source) => BroadcastScalarToVector128(source);
///
/// __m128i _mm_broadcastw_epi16 (__m128i a)
/// VPBROADCASTW xmm, m16
/// The above native signature does not directly correspond to the managed signature.
///
public static unsafe Vector128 BroadcastScalarToVector128(ushort* source) => BroadcastScalarToVector128(source);
///
/// __m128i _mm_broadcastd_epi32 (__m128i a)
/// VPBROADCASTD xmm, m32
/// The above native signature does not directly correspond to the managed signature.
///
public static unsafe Vector128 BroadcastScalarToVector128(int* source) => BroadcastScalarToVector128(source);
///
/// __m128i _mm_broadcastd_epi32 (__m128i a)
/// VPBROADCASTD xmm, m32
/// The above native signature does not directly correspond to the managed signature.
///
public static unsafe Vector128 BroadcastScalarToVector128(uint* source) => BroadcastScalarToVector128(source);
///
/// __m128i _mm_broadcastq_epi64 (__m128i a)
/// VPBROADCASTQ xmm, m64
/// The above native signature does not directly correspond to the managed signature.
///
public static unsafe Vector128 BroadcastScalarToVector128(long* source) => BroadcastScalarToVector128(source);
///
/// __m128i _mm_broadcastq_epi64 (__m128i a)
/// VPBROADCASTQ xmm, m64
/// The above native signature does not directly correspond to the managed signature.
///
public static unsafe Vector128 BroadcastScalarToVector128(ulong* source) => BroadcastScalarToVector128(source);
///
/// __m256i _mm256_broadcastb_epi8 (__m128i a)
/// VPBROADCASTB ymm, xmm
/// __m256i _mm256_broadcastw_epi16 (__m128i a)
/// VPBROADCASTW ymm, xmm
/// __m256i _mm256_broadcastd_epi32 (__m128i a)
/// VPBROADCASTD ymm, xmm
/// __m256i _mm256_broadcastq_epi64 (__m128i a)
/// VPBROADCASTQ ymm, xmm
/// __m256 _mm256_broadcastss_ps (__m128 a)
/// VBROADCASTSS ymm, xmm
/// __m256d _mm256_broadcastsd_pd (__m128d a)
/// VBROADCASTSD ymm, xmm
///
public static Vector256 BroadcastScalarToVector256(Vector128 value) where T : struct => BroadcastScalarToVector256(value);
///
/// __m256i _mm256_broadcastb_epi8 (__m128i a)
/// VPBROADCASTB ymm, m8
/// The above native signature does not directly correspond to the managed signature.
///
public static unsafe Vector256 BroadcastScalarToVector256(byte* source) => BroadcastScalarToVector256(source);
///
/// __m256i _mm256_broadcastb_epi8 (__m128i a)
/// VPBROADCASTB ymm, m8
/// The above native signature does not directly correspond to the managed signature.
///
public static unsafe Vector256 BroadcastScalarToVector256(sbyte* source) => BroadcastScalarToVector256(source);
///
/// __m256i _mm256_broadcastw_epi16 (__m128i a)
/// VPBROADCASTW ymm, m16
/// The above native signature does not directly correspond to the managed signature.
///
public static unsafe Vector256 BroadcastScalarToVector256(short* source) => BroadcastScalarToVector256(source);
///
/// __m256i _mm256_broadcastw_epi16 (__m128i a)
/// VPBROADCASTW ymm, m16
/// The above native signature does not directly correspond to the managed signature.
///
public static unsafe Vector256 BroadcastScalarToVector256(ushort* source) => BroadcastScalarToVector256(source);
///
/// __m256i _mm256_broadcastd_epi32 (__m128i a)
/// VPBROADCASTD ymm, m32
/// The above native signature does not directly correspond to the managed signature.
///
public static unsafe Vector256 BroadcastScalarToVector256(int* source) => BroadcastScalarToVector256(source);
///
/// __m256i _mm256_broadcastd_epi32 (__m128i a)
/// VPBROADCASTD ymm, m32
/// The above native signature does not directly correspond to the managed signature.
///
public static unsafe Vector256 BroadcastScalarToVector256(uint* source) => BroadcastScalarToVector256(source);
///
/// __m256i _mm256_broadcastq_epi64 (__m128i a)
/// VPBROADCASTQ ymm, m64
/// The above native signature does not directly correspond to the managed signature.
///
public static unsafe Vector256 BroadcastScalarToVector256(long* source) => BroadcastScalarToVector256(source);
///
/// __m256i _mm256_broadcastq_epi64 (__m128i a)
/// VPBROADCASTQ ymm, m64
/// The above native signature does not directly correspond to the managed signature.
///
public static unsafe Vector256 BroadcastScalarToVector256(ulong* source) => BroadcastScalarToVector256(source);
///
/// __m256i _mm256_broadcastsi128_si256 (__m128i a)
/// VBROADCASTI128 ymm, m128
/// The above native signature does not directly correspond to the managed signature.
///
public static unsafe Vector256 BroadcastVector128ToVector256(sbyte* address) => BroadcastVector128ToVector256(address);
///
/// __m256i _mm256_broadcastsi128_si256 (__m128i a)
/// VBROADCASTI128 ymm, m128
/// The above native signature does not directly correspond to the managed signature.
///
public static unsafe Vector256 BroadcastVector128ToVector256(byte* address) => BroadcastVector128ToVector256(address);
///
/// __m256i _mm256_broadcastsi128_si256 (__m128i a)
/// VBROADCASTI128 ymm, m128
/// The above native signature does not directly correspond to the managed signature.
///
public static unsafe Vector256 BroadcastVector128ToVector256(short* address) => BroadcastVector128ToVector256(address);
///
/// __m256i _mm256_broadcastsi128_si256 (__m128i a)
/// VBROADCASTI128 ymm, m128
/// The above native signature does not directly correspond to the managed signature.
///
public static unsafe Vector256 BroadcastVector128ToVector256(ushort* address) => BroadcastVector128ToVector256(address);
///
/// __m256i _mm256_broadcastsi128_si256 (__m128i a)
/// VBROADCASTI128 ymm, m128
/// The above native signature does not directly correspond to the managed signature.
///
public static unsafe Vector256 BroadcastVector128ToVector256(int* address) => BroadcastVector128ToVector256(address);
///
/// __m256i _mm256_broadcastsi128_si256 (__m128i a)
/// VBROADCASTI128 ymm, m128
/// The above native signature does not directly correspond to the managed signature.
///
public static unsafe Vector256 BroadcastVector128ToVector256(uint* address) => BroadcastVector128ToVector256(address);
///
/// __m256i _mm256_broadcastsi128_si256 (__m128i a)
/// VBROADCASTI128 ymm, m128
/// The above native signature does not directly correspond to the managed signature.
///
public static unsafe Vector256 BroadcastVector128ToVector256(long* address) => BroadcastVector128ToVector256(address);
///
/// __m256i _mm256_broadcastsi128_si256 (__m128i a)
/// VBROADCASTI128 ymm, m128
/// The above native signature does not directly correspond to the managed signature.
///
public static unsafe Vector256 BroadcastVector128ToVector256(ulong* address) => BroadcastVector128ToVector256(address);
///
/// __m256i _mm256_cmpeq_epi8 (__m256i a, __m256i b)
/// VPCMPEQB ymm, ymm, ymm/m256
///
public static Vector256 CompareEqual(Vector256 left, Vector256 right) => CompareEqual(left, right);
///
/// __m256i _mm256_cmpeq_epi8 (__m256i a, __m256i b)
/// VPCMPEQB ymm, ymm, ymm/m256
///
public static Vector256 CompareEqual(Vector256 left, Vector256 right) => CompareEqual(left, right);
///
/// __m256i _mm256_cmpeq_epi16 (__m256i a, __m256i b)
/// VPCMPEQW ymm, ymm, ymm/m256
///
public static Vector256 CompareEqual(Vector256 left, Vector256 right) => CompareEqual(left, right);
///
/// __m256i _mm256_cmpeq_epi16 (__m256i a, __m256i b)
/// VPCMPEQW ymm, ymm, ymm/m256
///
public static Vector256 CompareEqual(Vector256 left, Vector256 right) => CompareEqual(left, right);
///
/// __m256i _mm256_cmpeq_epi32 (__m256i a, __m256i b)
/// VPCMPEQD ymm, ymm, ymm/m256
///
public static Vector256 CompareEqual(Vector256 left, Vector256 right) => CompareEqual(left, right);
///
/// __m256i _mm256_cmpeq_epi32 (__m256i a, __m256i b)
/// VPCMPEQD ymm, ymm, ymm/m256
///
public static Vector256 CompareEqual(Vector256 left, Vector256 right) => CompareEqual(left, right);
///
/// __m256i _mm256_cmpeq_epi64 (__m256i a, __m256i b)
/// VPCMPEQQ ymm, ymm, ymm/m256
///
public static Vector256 CompareEqual(Vector256 left, Vector256 right) => CompareEqual(left, right);
///
/// __m256i _mm256_cmpeq_epi64 (__m256i a, __m256i b)
/// VPCMPEQQ ymm, ymm, ymm/m256
///
public static Vector256 CompareEqual(Vector256 left, Vector256 right) => CompareEqual(left, right);
///
/// __m256i _mm256_cmpgt_epi8 (__m256i a, __m256i b)
/// VPCMPGTB ymm, ymm, ymm/m256
///
public static Vector256 CompareGreaterThan(Vector256 left, Vector256 right) => CompareGreaterThan(left, right);
///
/// __m256i _mm256_cmpgt_epi16 (__m256i a, __m256i b)
/// VPCMPGTW ymm, ymm, ymm/m256
///
public static Vector256 CompareGreaterThan(Vector256 left, Vector256 right) => CompareGreaterThan(left, right);
///
/// __m256i _mm256_cmpgt_epi32 (__m256i a, __m256i b)
/// VPCMPGTD ymm, ymm, ymm/m256
///
public static Vector256 CompareGreaterThan(Vector256 left, Vector256 right) => CompareGreaterThan(left, right);
///
/// __m256i _mm256_cmpgt_epi64 (__m256i a, __m256i b)
/// VPCMPGTQ ymm, ymm, ymm/m256
///
public static Vector256 CompareGreaterThan(Vector256 left, Vector256 right) => CompareGreaterThan(left, right);
///
/// double _mm256_cvtsd_f64 (__m256d a)
/// HELPER: MOVSD
///
public static double ConvertToDouble(Vector256 value) => ConvertToDouble(value);
///
/// int _mm256_cvtsi256_si32 (__m256i a)
/// MOVD reg/m32, xmm
///
public static int ConvertToInt32(Vector256 value) => ConvertToInt32(value);
///
/// int _mm256_cvtsi256_si32 (__m256i a)
/// MOVD reg/m32, xmm
///
public static uint ConvertToUInt32(Vector256 value) => ConvertToUInt32(value);
///
/// __m256i _mm256_cvtepi8_epi16 (__m128i a)
/// VPMOVSXBW ymm, xmm/m128
///
public static Vector256 ConvertToVector256Int16(Vector128 value) => ConvertToVector256Int16(value);
///
/// __m256i _mm256_cvtepu8_epi16 (__m128i a)
/// VPMOVZXBW ymm, xmm/m128
///
public static Vector256 ConvertToVector256UInt16(Vector128 value) => ConvertToVector256UInt16(value);
///
/// __m256i _mm256_cvtepi8_epi32 (__m128i a)
/// VPMOVSXBD ymm, xmm/m128
///
public static Vector256 ConvertToVector256Int32(Vector128 value) => ConvertToVector256Int32(value);
///
/// __m256i _mm256_cvtepi16_epi32 (__m128i a)
/// VPMOVSXWD ymm, xmm/m128
///
public static Vector256 ConvertToVector256Int32(Vector128 value) => ConvertToVector256Int32(value);
///
/// __m256i _mm256_cvtepu8_epi32 (__m128i a)
/// VPMOVZXBD ymm, xmm/m128
///
public static Vector256 ConvertToVector256UInt32(Vector128 value) => ConvertToVector256UInt32(value);
///
/// __m256i _mm256_cvtepu16_epi32 (__m128i a)
/// VPMOVZXWD ymm, xmm/m128
///
public static Vector256 ConvertToVector256UInt32(Vector128 value) => ConvertToVector256UInt32(value);
///
/// __m256i _mm256_cvtepi8_epi64 (__m128i a)
/// VPMOVSXBQ ymm, xmm/m128
///
public static Vector256 ConvertToVector256Int64(Vector128