2025-05-22 19:59:12 +00:00
// Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT.
//go:build goexperiment.simd
package simd
2025-06-16 20:11:27 +00:00
/* Absolute */
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Absolute computes the absolute value of each element.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPABSB, CPU Feature: AVX
func ( x Int8x16 ) Absolute ( ) Int8x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Absolute computes the absolute value of each element.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPABSB, CPU Feature: AVX2
func ( x Int8x32 ) Absolute ( ) Int8x32
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Absolute computes the absolute value of each element.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPABSB, CPU Feature: AVX512EVEX
func ( x Int8x64 ) Absolute ( ) Int8x64
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Absolute computes the absolute value of each element.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPABSW, CPU Feature: AVX
func ( x Int16x8 ) Absolute ( ) Int16x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Absolute computes the absolute value of each element.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPABSW, CPU Feature: AVX2
func ( x Int16x16 ) Absolute ( ) Int16x16
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Absolute computes the absolute value of each element.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPABSW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) Absolute ( ) Int16x32
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Absolute computes the absolute value of each element.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPABSD, CPU Feature: AVX
func ( x Int32x4 ) Absolute ( ) Int32x4
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Absolute computes the absolute value of each element.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPABSD, CPU Feature: AVX2
func ( x Int32x8 ) Absolute ( ) Int32x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Absolute computes the absolute value of each element.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPABSD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) Absolute ( ) Int32x16
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Absolute computes the absolute value of each element.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPABSQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) Absolute ( ) Int64x2
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Absolute computes the absolute value of each element.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPABSQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) Absolute ( ) Int64x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Absolute computes the absolute value of each element.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPABSQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) Absolute ( ) Int64x8
2025-06-12 16:21:35 +00:00
2025-07-08 18:18:55 +00:00
/* AbsoluteMasked */
// Absolute computes the absolute value of each element.
//
// Asm: VPABSB, CPU Feature: AVX512EVEX
func ( x Int8x16 ) AbsoluteMasked ( y Mask8x16 ) Int8x16
// Absolute computes the absolute value of each element.
//
// Asm: VPABSB, CPU Feature: AVX512EVEX
func ( x Int8x32 ) AbsoluteMasked ( y Mask8x32 ) Int8x32
// Absolute computes the absolute value of each element.
//
// Asm: VPABSB, CPU Feature: AVX512EVEX
func ( x Int8x64 ) AbsoluteMasked ( y Mask8x64 ) Int8x64
// Absolute computes the absolute value of each element.
//
// Asm: VPABSW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) AbsoluteMasked ( y Mask16x8 ) Int16x8
// Absolute computes the absolute value of each element.
//
// Asm: VPABSW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) AbsoluteMasked ( y Mask16x16 ) Int16x16
// Absolute computes the absolute value of each element.
//
// Asm: VPABSW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) AbsoluteMasked ( y Mask16x32 ) Int16x32
// Absolute computes the absolute value of each element.
//
// Asm: VPABSD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) AbsoluteMasked ( y Mask32x4 ) Int32x4
// Absolute computes the absolute value of each element.
//
// Asm: VPABSD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) AbsoluteMasked ( y Mask32x8 ) Int32x8
// Absolute computes the absolute value of each element.
//
// Asm: VPABSD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) AbsoluteMasked ( y Mask32x16 ) Int32x16
// Absolute computes the absolute value of each element.
//
// Asm: VPABSQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) AbsoluteMasked ( y Mask64x2 ) Int64x2
// Absolute computes the absolute value of each element.
//
// Asm: VPABSQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) AbsoluteMasked ( y Mask64x4 ) Int64x4
// Absolute computes the absolute value of each element.
//
// Asm: VPABSQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) AbsoluteMasked ( y Mask64x8 ) Int64x8
2025-06-16 20:11:27 +00:00
/* Add */
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Add adds corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VADDPS, CPU Feature: AVX
func ( x Float32x4 ) Add ( y Float32x4 ) Float32x4
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Add adds corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VADDPS, CPU Feature: AVX
func ( x Float32x8 ) Add ( y Float32x8 ) Float32x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Add adds corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VADDPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) Add ( y Float32x16 ) Float32x16
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Add adds corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VADDPD, CPU Feature: AVX
func ( x Float64x2 ) Add ( y Float64x2 ) Float64x2
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Add adds corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VADDPD, CPU Feature: AVX
func ( x Float64x4 ) Add ( y Float64x4 ) Float64x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Add adds corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VADDPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) Add ( y Float64x8 ) Float64x8
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Add adds corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPADDB, CPU Feature: AVX
func ( x Int8x16 ) Add ( y Int8x16 ) Int8x16
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Add adds corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPADDB, CPU Feature: AVX2
func ( x Int8x32 ) Add ( y Int8x32 ) Int8x32
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Add adds corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPADDB, CPU Feature: AVX512EVEX
func ( x Int8x64 ) Add ( y Int8x64 ) Int8x64
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Add adds corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPADDW, CPU Feature: AVX
func ( x Int16x8 ) Add ( y Int16x8 ) Int16x8
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Add adds corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPADDW, CPU Feature: AVX2
func ( x Int16x16 ) Add ( y Int16x16 ) Int16x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Add adds corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPADDW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) Add ( y Int16x32 ) Int16x32
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Add adds corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPADDD, CPU Feature: AVX
func ( x Int32x4 ) Add ( y Int32x4 ) Int32x4
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Add adds corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPADDD, CPU Feature: AVX2
func ( x Int32x8 ) Add ( y Int32x8 ) Int32x8
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Add adds corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPADDD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) Add ( y Int32x16 ) Int32x16
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Add adds corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPADDQ, CPU Feature: AVX
func ( x Int64x2 ) Add ( y Int64x2 ) Int64x2
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Add adds corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPADDQ, CPU Feature: AVX2
func ( x Int64x4 ) Add ( y Int64x4 ) Int64x4
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Add adds corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPADDQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) Add ( y Int64x8 ) Int64x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Add adds corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPADDB, CPU Feature: AVX
func ( x Uint8x16 ) Add ( y Uint8x16 ) Uint8x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Add adds corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPADDB, CPU Feature: AVX2
func ( x Uint8x32 ) Add ( y Uint8x32 ) Uint8x32
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Add adds corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPADDB, CPU Feature: AVX512EVEX
func ( x Uint8x64 ) Add ( y Uint8x64 ) Uint8x64
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Add adds corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPADDW, CPU Feature: AVX
func ( x Uint16x8 ) Add ( y Uint16x8 ) Uint16x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Add adds corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPADDW, CPU Feature: AVX2
func ( x Uint16x16 ) Add ( y Uint16x16 ) Uint16x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Add adds corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPADDW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) Add ( y Uint16x32 ) Uint16x32
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Add adds corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPADDD, CPU Feature: AVX
func ( x Uint32x4 ) Add ( y Uint32x4 ) Uint32x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Add adds corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPADDD, CPU Feature: AVX2
func ( x Uint32x8 ) Add ( y Uint32x8 ) Uint32x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Add adds corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPADDD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) Add ( y Uint32x16 ) Uint32x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Add adds corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPADDQ, CPU Feature: AVX
func ( x Uint64x2 ) Add ( y Uint64x2 ) Uint64x2
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Add adds corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPADDQ, CPU Feature: AVX2
func ( x Uint64x4 ) Add ( y Uint64x4 ) Uint64x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Add adds corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPADDQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) Add ( y Uint64x8 ) Uint64x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
/* AddMasked */
// Add adds corresponding elements of two vectors.
//
// Asm: VADDPS, CPU Feature: AVX512EVEX
func ( x Float32x4 ) AddMasked ( y Float32x4 , z Mask32x4 ) Float32x4
// Add adds corresponding elements of two vectors.
//
// Asm: VADDPS, CPU Feature: AVX512EVEX
func ( x Float32x8 ) AddMasked ( y Float32x8 , z Mask32x8 ) Float32x8
// Add adds corresponding elements of two vectors.
//
// Asm: VADDPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) AddMasked ( y Float32x16 , z Mask32x16 ) Float32x16
// Add adds corresponding elements of two vectors.
//
// Asm: VADDPD, CPU Feature: AVX512EVEX
func ( x Float64x2 ) AddMasked ( y Float64x2 , z Mask64x2 ) Float64x2
// Add adds corresponding elements of two vectors.
//
// Asm: VADDPD, CPU Feature: AVX512EVEX
func ( x Float64x4 ) AddMasked ( y Float64x4 , z Mask64x4 ) Float64x4
// Add adds corresponding elements of two vectors.
//
// Asm: VADDPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) AddMasked ( y Float64x8 , z Mask64x8 ) Float64x8
// Add adds corresponding elements of two vectors.
//
// Asm: VPADDB, CPU Feature: AVX512EVEX
func ( x Int8x16 ) AddMasked ( y Int8x16 , z Mask8x16 ) Int8x16
// Add adds corresponding elements of two vectors.
//
// Asm: VPADDB, CPU Feature: AVX512EVEX
func ( x Int8x32 ) AddMasked ( y Int8x32 , z Mask8x32 ) Int8x32
// Add adds corresponding elements of two vectors.
//
// Asm: VPADDB, CPU Feature: AVX512EVEX
func ( x Int8x64 ) AddMasked ( y Int8x64 , z Mask8x64 ) Int8x64
// Add adds corresponding elements of two vectors.
//
// Asm: VPADDW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) AddMasked ( y Int16x8 , z Mask16x8 ) Int16x8
// Add adds corresponding elements of two vectors.
//
// Asm: VPADDW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) AddMasked ( y Int16x16 , z Mask16x16 ) Int16x16
// Add adds corresponding elements of two vectors.
//
// Asm: VPADDW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) AddMasked ( y Int16x32 , z Mask16x32 ) Int16x32
// Add adds corresponding elements of two vectors.
//
// Asm: VPADDD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) AddMasked ( y Int32x4 , z Mask32x4 ) Int32x4
// Add adds corresponding elements of two vectors.
//
// Asm: VPADDD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) AddMasked ( y Int32x8 , z Mask32x8 ) Int32x8
// Add adds corresponding elements of two vectors.
//
// Asm: VPADDD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) AddMasked ( y Int32x16 , z Mask32x16 ) Int32x16
// Add adds corresponding elements of two vectors.
//
// Asm: VPADDQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) AddMasked ( y Int64x2 , z Mask64x2 ) Int64x2
// Add adds corresponding elements of two vectors.
//
// Asm: VPADDQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) AddMasked ( y Int64x4 , z Mask64x4 ) Int64x4
// Add adds corresponding elements of two vectors.
//
// Asm: VPADDQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) AddMasked ( y Int64x8 , z Mask64x8 ) Int64x8
// Add adds corresponding elements of two vectors.
//
// Asm: VPADDB, CPU Feature: AVX512EVEX
func ( x Uint8x16 ) AddMasked ( y Uint8x16 , z Mask8x16 ) Uint8x16
// Add adds corresponding elements of two vectors.
//
// Asm: VPADDB, CPU Feature: AVX512EVEX
func ( x Uint8x32 ) AddMasked ( y Uint8x32 , z Mask8x32 ) Uint8x32
// Add adds corresponding elements of two vectors.
//
// Asm: VPADDB, CPU Feature: AVX512EVEX
func ( x Uint8x64 ) AddMasked ( y Uint8x64 , z Mask8x64 ) Uint8x64
// Add adds corresponding elements of two vectors.
//
// Asm: VPADDW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) AddMasked ( y Uint16x8 , z Mask16x8 ) Uint16x8
// Add adds corresponding elements of two vectors.
//
// Asm: VPADDW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) AddMasked ( y Uint16x16 , z Mask16x16 ) Uint16x16
// Add adds corresponding elements of two vectors.
//
// Asm: VPADDW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) AddMasked ( y Uint16x32 , z Mask16x32 ) Uint16x32
// Add adds corresponding elements of two vectors.
//
// Asm: VPADDD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) AddMasked ( y Uint32x4 , z Mask32x4 ) Uint32x4
// Add adds corresponding elements of two vectors.
//
// Asm: VPADDD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) AddMasked ( y Uint32x8 , z Mask32x8 ) Uint32x8
// Add adds corresponding elements of two vectors.
//
// Asm: VPADDD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) AddMasked ( y Uint32x16 , z Mask32x16 ) Uint32x16
// Add adds corresponding elements of two vectors.
//
// Asm: VPADDQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) AddMasked ( y Uint64x2 , z Mask64x2 ) Uint64x2
// Add adds corresponding elements of two vectors.
//
// Asm: VPADDQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) AddMasked ( y Uint64x4 , z Mask64x4 ) Uint64x4
// Add adds corresponding elements of two vectors.
//
// Asm: VPADDQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) AddMasked ( y Uint64x8 , z Mask64x8 ) Uint64x8
2025-06-16 20:11:27 +00:00
/* AddSub */
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// AddSub subtracts even elements and adds odd elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VADDSUBPS, CPU Feature: AVX
func ( x Float32x4 ) AddSub ( y Float32x4 ) Float32x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// AddSub subtracts even elements and adds odd elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VADDSUBPS, CPU Feature: AVX
func ( x Float32x8 ) AddSub ( y Float32x8 ) Float32x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// AddSub subtracts even elements and adds odd elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VADDSUBPD, CPU Feature: AVX
func ( x Float64x2 ) AddSub ( y Float64x2 ) Float64x2
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// AddSub subtracts even elements and adds odd elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VADDSUBPD, CPU Feature: AVX
func ( x Float64x4 ) AddSub ( y Float64x4 ) Float64x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
/* And */
// And performs a bitwise AND operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPAND, CPU Feature: AVX
func ( x Int8x16 ) And ( y Int8x16 ) Int8x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// And performs a bitwise AND operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPAND, CPU Feature: AVX2
func ( x Int8x32 ) And ( y Int8x32 ) Int8x32
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// And performs a bitwise AND operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPAND, CPU Feature: AVX
func ( x Int16x8 ) And ( y Int16x8 ) Int16x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// And performs a bitwise AND operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPAND, CPU Feature: AVX2
func ( x Int16x16 ) And ( y Int16x16 ) Int16x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// And performs a bitwise AND operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPAND, CPU Feature: AVX
func ( x Int32x4 ) And ( y Int32x4 ) Int32x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// And performs a bitwise AND operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPAND, CPU Feature: AVX2
func ( x Int32x8 ) And ( y Int32x8 ) Int32x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// And performs a masked bitwise AND operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPANDD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) And ( y Int32x16 ) Int32x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// And performs a bitwise AND operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPAND, CPU Feature: AVX
func ( x Int64x2 ) And ( y Int64x2 ) Int64x2
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// And performs a bitwise AND operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPAND, CPU Feature: AVX2
func ( x Int64x4 ) And ( y Int64x4 ) Int64x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// And performs a masked bitwise AND operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPANDQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) And ( y Int64x8 ) Int64x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// And performs a bitwise AND operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPAND, CPU Feature: AVX
func ( x Uint8x16 ) And ( y Uint8x16 ) Uint8x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// And performs a bitwise AND operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPAND, CPU Feature: AVX2
func ( x Uint8x32 ) And ( y Uint8x32 ) Uint8x32
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// And performs a bitwise AND operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPAND, CPU Feature: AVX
func ( x Uint16x8 ) And ( y Uint16x8 ) Uint16x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// And performs a bitwise AND operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPAND, CPU Feature: AVX2
func ( x Uint16x16 ) And ( y Uint16x16 ) Uint16x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// And performs a bitwise AND operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPAND, CPU Feature: AVX
func ( x Uint32x4 ) And ( y Uint32x4 ) Uint32x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// And performs a bitwise AND operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPAND, CPU Feature: AVX2
func ( x Uint32x8 ) And ( y Uint32x8 ) Uint32x8
2025-05-22 19:59:12 +00:00
2025-06-12 16:42:02 +00:00
// And performs a masked bitwise AND operation between two vectors.
//
2025-06-16 20:11:27 +00:00
// Asm: VPANDD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) And ( y Uint32x16 ) Uint32x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// And performs a bitwise AND operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPAND, CPU Feature: AVX
func ( x Uint64x2 ) And ( y Uint64x2 ) Uint64x2
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// And performs a bitwise AND operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPAND, CPU Feature: AVX2
func ( x Uint64x4 ) And ( y Uint64x4 ) Uint64x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// And performs a masked bitwise AND operation between two vectors.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPANDQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) And ( y Uint64x8 ) Uint64x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
/* AndMasked */
// And performs a masked bitwise AND operation between two vectors.
//
// Asm: VPANDD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) AndMasked ( y Int32x4 , z Mask32x4 ) Int32x4
// And performs a masked bitwise AND operation between two vectors.
//
// Asm: VPANDD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) AndMasked ( y Int32x8 , z Mask32x8 ) Int32x8
// And performs a masked bitwise AND operation between two vectors.
//
// Asm: VPANDD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) AndMasked ( y Int32x16 , z Mask32x16 ) Int32x16
// And performs a masked bitwise AND operation between two vectors.
//
// Asm: VPANDQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) AndMasked ( y Int64x2 , z Mask64x2 ) Int64x2
// And performs a masked bitwise AND operation between two vectors.
//
// Asm: VPANDQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) AndMasked ( y Int64x4 , z Mask64x4 ) Int64x4
// And performs a masked bitwise AND operation between two vectors.
//
// Asm: VPANDQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) AndMasked ( y Int64x8 , z Mask64x8 ) Int64x8
// And performs a masked bitwise AND operation between two vectors.
//
// Asm: VPANDD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) AndMasked ( y Uint32x4 , z Mask32x4 ) Uint32x4
// And performs a masked bitwise AND operation between two vectors.
//
// Asm: VPANDD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) AndMasked ( y Uint32x8 , z Mask32x8 ) Uint32x8
// And performs a masked bitwise AND operation between two vectors.
//
// Asm: VPANDD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) AndMasked ( y Uint32x16 , z Mask32x16 ) Uint32x16
// And performs a masked bitwise AND operation between two vectors.
//
// Asm: VPANDQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) AndMasked ( y Uint64x2 , z Mask64x2 ) Uint64x2
// And performs a masked bitwise AND operation between two vectors.
//
// Asm: VPANDQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) AndMasked ( y Uint64x4 , z Mask64x4 ) Uint64x4
// And performs a masked bitwise AND operation between two vectors.
//
// Asm: VPANDQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) AndMasked ( y Uint64x8 , z Mask64x8 ) Uint64x8
2025-06-16 20:11:27 +00:00
/* AndNot */
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// AndNot performs a bitwise AND NOT operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPANDN, CPU Feature: AVX
func ( x Int8x16 ) AndNot ( y Int8x16 ) Int8x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// AndNot performs a bitwise AND NOT operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPANDN, CPU Feature: AVX2
func ( x Int8x32 ) AndNot ( y Int8x32 ) Int8x32
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// AndNot performs a bitwise AND NOT operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPANDN, CPU Feature: AVX
func ( x Int16x8 ) AndNot ( y Int16x8 ) Int16x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// AndNot performs a bitwise AND NOT operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPANDN, CPU Feature: AVX2
func ( x Int16x16 ) AndNot ( y Int16x16 ) Int16x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// AndNot performs a bitwise AND NOT operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPANDN, CPU Feature: AVX
func ( x Int32x4 ) AndNot ( y Int32x4 ) Int32x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// AndNot performs a bitwise AND NOT operation between two vectors.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPANDN, CPU Feature: AVX2
func ( x Int32x8 ) AndNot ( y Int32x8 ) Int32x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// AndNot performs a masked bitwise AND NOT operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPANDND, CPU Feature: AVX512EVEX
func ( x Int32x16 ) AndNot ( y Int32x16 ) Int32x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// AndNot performs a bitwise AND NOT operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPANDN, CPU Feature: AVX
func ( x Int64x2 ) AndNot ( y Int64x2 ) Int64x2
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// AndNot performs a bitwise AND NOT operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPANDN, CPU Feature: AVX2
func ( x Int64x4 ) AndNot ( y Int64x4 ) Int64x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// AndNot performs a masked bitwise AND NOT operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPANDNQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) AndNot ( y Int64x8 ) Int64x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// AndNot performs a bitwise AND NOT operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPANDN, CPU Feature: AVX
func ( x Uint8x16 ) AndNot ( y Uint8x16 ) Uint8x16
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// AndNot performs a bitwise AND NOT operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPANDN, CPU Feature: AVX2
func ( x Uint8x32 ) AndNot ( y Uint8x32 ) Uint8x32
2025-05-22 19:59:12 +00:00
2025-06-12 16:42:02 +00:00
// AndNot performs a bitwise AND NOT operation between two vectors.
//
2025-06-16 20:11:27 +00:00
// Asm: VPANDN, CPU Feature: AVX
func ( x Uint16x8 ) AndNot ( y Uint16x8 ) Uint16x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// AndNot performs a bitwise AND NOT operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPANDN, CPU Feature: AVX2
func ( x Uint16x16 ) AndNot ( y Uint16x16 ) Uint16x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// AndNot performs a bitwise AND NOT operation between two vectors.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPANDN, CPU Feature: AVX
func ( x Uint32x4 ) AndNot ( y Uint32x4 ) Uint32x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// AndNot performs a bitwise AND NOT operation between two vectors.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPANDN, CPU Feature: AVX2
func ( x Uint32x8 ) AndNot ( y Uint32x8 ) Uint32x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// AndNot performs a masked bitwise AND NOT operation between two vectors.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPANDND, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) AndNot ( y Uint32x16 ) Uint32x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// AndNot performs a bitwise AND NOT operation between two vectors.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPANDN, CPU Feature: AVX
func ( x Uint64x2 ) AndNot ( y Uint64x2 ) Uint64x2
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// AndNot performs a bitwise AND NOT operation between two vectors.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPANDN, CPU Feature: AVX2
func ( x Uint64x4 ) AndNot ( y Uint64x4 ) Uint64x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// AndNot performs a masked bitwise AND NOT operation between two vectors.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPANDNQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) AndNot ( y Uint64x8 ) Uint64x8
2025-07-08 18:18:55 +00:00
/* AndNotMasked */
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// AndNot performs a masked bitwise AND NOT operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPANDND, CPU Feature: AVX512EVEX
func ( x Int32x4 ) AndNotMasked ( y Int32x4 , z Mask32x4 ) Int32x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// AndNot performs a masked bitwise AND NOT operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPANDND, CPU Feature: AVX512EVEX
func ( x Int32x8 ) AndNotMasked ( y Int32x8 , z Mask32x8 ) Int32x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// AndNot performs a masked bitwise AND NOT operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPANDND, CPU Feature: AVX512EVEX
func ( x Int32x16 ) AndNotMasked ( y Int32x16 , z Mask32x16 ) Int32x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// AndNot performs a masked bitwise AND NOT operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPANDNQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) AndNotMasked ( y Int64x2 , z Mask64x2 ) Int64x2
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// AndNot performs a masked bitwise AND NOT operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPANDNQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) AndNotMasked ( y Int64x4 , z Mask64x4 ) Int64x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// AndNot performs a masked bitwise AND NOT operation between two vectors.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPANDNQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) AndNotMasked ( y Int64x8 , z Mask64x8 ) Int64x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// AndNot performs a masked bitwise AND NOT operation between two vectors.
//
// Asm: VPANDND, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) AndNotMasked ( y Uint32x4 , z Mask32x4 ) Uint32x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// AndNot performs a masked bitwise AND NOT operation between two vectors.
//
// Asm: VPANDND, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) AndNotMasked ( y Uint32x8 , z Mask32x8 ) Uint32x8
// AndNot performs a masked bitwise AND NOT operation between two vectors.
//
// Asm: VPANDND, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) AndNotMasked ( y Uint32x16 , z Mask32x16 ) Uint32x16
// AndNot performs a masked bitwise AND NOT operation between two vectors.
//
// Asm: VPANDNQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) AndNotMasked ( y Uint64x2 , z Mask64x2 ) Uint64x2
// AndNot performs a masked bitwise AND NOT operation between two vectors.
//
// Asm: VPANDNQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) AndNotMasked ( y Uint64x4 , z Mask64x4 ) Uint64x4
// AndNot performs a masked bitwise AND NOT operation between two vectors.
//
// Asm: VPANDNQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) AndNotMasked ( y Uint64x8 , z Mask64x8 ) Uint64x8
/* ApproximateReciprocal */
// ApproximateReciprocal computes an approximate reciprocal of each element.
//
// Asm: VRCP14PS, CPU Feature: AVX512EVEX
func ( x Float32x4 ) ApproximateReciprocal ( ) Float32x4
// ApproximateReciprocal computes an approximate reciprocal of each element.
//
// Asm: VRCP14PS, CPU Feature: AVX512EVEX
func ( x Float32x8 ) ApproximateReciprocal ( ) Float32x8
// ApproximateReciprocal computes an approximate reciprocal of each element.
//
// Asm: VRCP14PS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) ApproximateReciprocal ( ) Float32x16
// ApproximateReciprocal computes an approximate reciprocal of each element.
//
// Asm: VRCP14PD, CPU Feature: AVX512EVEX
func ( x Float64x2 ) ApproximateReciprocal ( ) Float64x2
// ApproximateReciprocal computes an approximate reciprocal of each element.
//
// Asm: VRCP14PD, CPU Feature: AVX512EVEX
func ( x Float64x4 ) ApproximateReciprocal ( ) Float64x4
// ApproximateReciprocal computes an approximate reciprocal of each element.
//
// Asm: VRCP14PD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) ApproximateReciprocal ( ) Float64x8
/* ApproximateReciprocalMasked */
// ApproximateReciprocal computes an approximate reciprocal of each element.
//
// Asm: VRCP14PS, CPU Feature: AVX512EVEX
func ( x Float32x4 ) ApproximateReciprocalMasked ( y Mask32x4 ) Float32x4
// ApproximateReciprocal computes an approximate reciprocal of each element.
//
// Asm: VRCP14PS, CPU Feature: AVX512EVEX
func ( x Float32x8 ) ApproximateReciprocalMasked ( y Mask32x8 ) Float32x8
// ApproximateReciprocal computes an approximate reciprocal of each element.
//
// Asm: VRCP14PS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) ApproximateReciprocalMasked ( y Mask32x16 ) Float32x16
// ApproximateReciprocal computes an approximate reciprocal of each element.
//
// Asm: VRCP14PD, CPU Feature: AVX512EVEX
func ( x Float64x2 ) ApproximateReciprocalMasked ( y Mask64x2 ) Float64x2
// ApproximateReciprocal computes an approximate reciprocal of each element.
//
// Asm: VRCP14PD, CPU Feature: AVX512EVEX
func ( x Float64x4 ) ApproximateReciprocalMasked ( y Mask64x4 ) Float64x4
// ApproximateReciprocal computes an approximate reciprocal of each element.
//
// Asm: VRCP14PD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) ApproximateReciprocalMasked ( y Mask64x8 ) Float64x8
/* ApproximateReciprocalOfSqrt */
// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VRSQRTPS, CPU Feature: AVX
func ( x Float32x4 ) ApproximateReciprocalOfSqrt ( ) Float32x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VRSQRTPS, CPU Feature: AVX
func ( x Float32x8 ) ApproximateReciprocalOfSqrt ( ) Float32x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) ApproximateReciprocalOfSqrt ( ) Float32x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX
func ( x Float64x2 ) ApproximateReciprocalOfSqrt ( ) Float64x2
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX
func ( x Float64x4 ) ApproximateReciprocalOfSqrt ( ) Float64x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) ApproximateReciprocalOfSqrt ( ) Float64x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
/* ApproximateReciprocalOfSqrtMasked */
// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element.
//
// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX
func ( x Float32x4 ) ApproximateReciprocalOfSqrtMasked ( y Mask32x4 ) Float32x4
// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element.
//
// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX
func ( x Float32x8 ) ApproximateReciprocalOfSqrtMasked ( y Mask32x8 ) Float32x8
// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element.
//
// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) ApproximateReciprocalOfSqrtMasked ( y Mask32x16 ) Float32x16
// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element.
//
// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX
func ( x Float64x2 ) ApproximateReciprocalOfSqrtMasked ( y Mask64x2 ) Float64x2
// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element.
//
// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX
func ( x Float64x4 ) ApproximateReciprocalOfSqrtMasked ( y Mask64x4 ) Float64x4
// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element.
//
// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) ApproximateReciprocalOfSqrtMasked ( y Mask64x8 ) Float64x8
2025-06-16 20:11:27 +00:00
/* Average */
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Average computes the rounded average of corresponding elements.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPAVGB, CPU Feature: AVX
func ( x Uint8x16 ) Average ( y Uint8x16 ) Uint8x16
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Average computes the rounded average of corresponding elements.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPAVGB, CPU Feature: AVX2
func ( x Uint8x32 ) Average ( y Uint8x32 ) Uint8x32
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Average computes the rounded average of corresponding elements.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPAVGB, CPU Feature: AVX512EVEX
func ( x Uint8x64 ) Average ( y Uint8x64 ) Uint8x64
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Average computes the rounded average of corresponding elements.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPAVGW, CPU Feature: AVX
func ( x Uint16x8 ) Average ( y Uint16x8 ) Uint16x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Average computes the rounded average of corresponding elements.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPAVGW, CPU Feature: AVX2
func ( x Uint16x16 ) Average ( y Uint16x16 ) Uint16x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Average computes the rounded average of corresponding elements.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPAVGW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) Average ( y Uint16x32 ) Uint16x32
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
/* AverageMasked */
// Average computes the rounded average of corresponding elements.
//
// Asm: VPAVGB, CPU Feature: AVX512EVEX
func ( x Uint8x16 ) AverageMasked ( y Uint8x16 , z Mask8x16 ) Uint8x16
// Average computes the rounded average of corresponding elements.
//
// Asm: VPAVGB, CPU Feature: AVX512EVEX
func ( x Uint8x32 ) AverageMasked ( y Uint8x32 , z Mask8x32 ) Uint8x32
// Average computes the rounded average of corresponding elements.
//
// Asm: VPAVGB, CPU Feature: AVX512EVEX
func ( x Uint8x64 ) AverageMasked ( y Uint8x64 , z Mask8x64 ) Uint8x64
// Average computes the rounded average of corresponding elements.
//
// Asm: VPAVGW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) AverageMasked ( y Uint16x8 , z Mask16x8 ) Uint16x8
// Average computes the rounded average of corresponding elements.
//
// Asm: VPAVGW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) AverageMasked ( y Uint16x16 , z Mask16x16 ) Uint16x16
// Average computes the rounded average of corresponding elements.
//
// Asm: VPAVGW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) AverageMasked ( y Uint16x32 , z Mask16x32 ) Uint16x32
2025-06-16 20:11:27 +00:00
/* Ceil */
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Ceil rounds elements up to the nearest integer.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VROUNDPS, CPU Feature: AVX
func ( x Float32x4 ) Ceil ( ) Float32x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Ceil rounds elements up to the nearest integer.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VROUNDPS, CPU Feature: AVX
func ( x Float32x8 ) Ceil ( ) Float32x8
// Ceil rounds elements up to the nearest integer.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VROUNDPD, CPU Feature: AVX
func ( x Float64x2 ) Ceil ( ) Float64x2
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Ceil rounds elements up to the nearest integer.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VROUNDPD, CPU Feature: AVX
func ( x Float64x4 ) Ceil ( ) Float64x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
/* CeilWithPrecision */
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// CeilWithPrecision rounds elements up with specified precision, masked.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float32x4 ) CeilWithPrecision ( imm uint8 ) Float32x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// CeilWithPrecision rounds elements up with specified precision, masked.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float32x8 ) CeilWithPrecision ( imm uint8 ) Float32x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// CeilWithPrecision rounds elements up with specified precision, masked.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float32x16 ) CeilWithPrecision ( imm uint8 ) Float32x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// CeilWithPrecision rounds elements up with specified precision, masked.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float64x2 ) CeilWithPrecision ( imm uint8 ) Float64x2
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// CeilWithPrecision rounds elements up with specified precision, masked.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float64x4 ) CeilWithPrecision ( imm uint8 ) Float64x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// CeilWithPrecision rounds elements up with specified precision, masked.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float64x8 ) CeilWithPrecision ( imm uint8 ) Float64x8
2025-06-12 16:21:35 +00:00
2025-07-08 18:18:55 +00:00
/* CeilWithPrecisionMasked */
// CeilWithPrecision rounds elements up with specified precision, masked.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func ( x Float32x4 ) CeilWithPrecisionMasked ( imm uint8 , y Mask32x4 ) Float32x4
// CeilWithPrecision rounds elements up with specified precision, masked.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func ( x Float32x8 ) CeilWithPrecisionMasked ( imm uint8 , y Mask32x8 ) Float32x8
// CeilWithPrecision rounds elements up with specified precision, masked.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) CeilWithPrecisionMasked ( imm uint8 , y Mask32x16 ) Float32x16
// CeilWithPrecision rounds elements up with specified precision, masked.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func ( x Float64x2 ) CeilWithPrecisionMasked ( imm uint8 , y Mask64x2 ) Float64x2
// CeilWithPrecision rounds elements up with specified precision, masked.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func ( x Float64x4 ) CeilWithPrecisionMasked ( imm uint8 , y Mask64x4 ) Float64x4
// CeilWithPrecision rounds elements up with specified precision, masked.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) CeilWithPrecisionMasked ( imm uint8 , y Mask64x8 ) Float64x8
2025-06-16 20:11:27 +00:00
/* DiffWithCeilWithPrecision */
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float32x4 ) DiffWithCeilWithPrecision ( imm uint8 ) Float32x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float32x8 ) DiffWithCeilWithPrecision ( imm uint8 ) Float32x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float32x16 ) DiffWithCeilWithPrecision ( imm uint8 ) Float32x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float64x2 ) DiffWithCeilWithPrecision ( imm uint8 ) Float64x2
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float64x4 ) DiffWithCeilWithPrecision ( imm uint8 ) Float64x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float64x8 ) DiffWithCeilWithPrecision ( imm uint8 ) Float64x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
/* DiffWithCeilWithPrecisionMasked */
// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func ( x Float32x4 ) DiffWithCeilWithPrecisionMasked ( imm uint8 , y Mask32x4 ) Float32x4
// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func ( x Float32x8 ) DiffWithCeilWithPrecisionMasked ( imm uint8 , y Mask32x8 ) Float32x8
// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) DiffWithCeilWithPrecisionMasked ( imm uint8 , y Mask32x16 ) Float32x16
// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func ( x Float64x2 ) DiffWithCeilWithPrecisionMasked ( imm uint8 , y Mask64x2 ) Float64x2
// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func ( x Float64x4 ) DiffWithCeilWithPrecisionMasked ( imm uint8 , y Mask64x4 ) Float64x4
// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) DiffWithCeilWithPrecisionMasked ( imm uint8 , y Mask64x8 ) Float64x8
2025-06-16 20:11:27 +00:00
/* DiffWithFloorWithPrecision */
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// DiffWithFloorWithPrecision computes the difference after flooring with specified precision.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float32x4 ) DiffWithFloorWithPrecision ( imm uint8 ) Float32x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// DiffWithFloorWithPrecision computes the difference after flooring with specified precision.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float32x8 ) DiffWithFloorWithPrecision ( imm uint8 ) Float32x8
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// DiffWithFloorWithPrecision computes the difference after flooring with specified precision.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float32x16 ) DiffWithFloorWithPrecision ( imm uint8 ) Float32x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// DiffWithFloorWithPrecision computes the difference after flooring with specified precision.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float64x2 ) DiffWithFloorWithPrecision ( imm uint8 ) Float64x2
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// DiffWithFloorWithPrecision computes the difference after flooring with specified precision.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float64x4 ) DiffWithFloorWithPrecision ( imm uint8 ) Float64x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// DiffWithFloorWithPrecision computes the difference after flooring with specified precision.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float64x8 ) DiffWithFloorWithPrecision ( imm uint8 ) Float64x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
/* DiffWithFloorWithPrecisionMasked */
// DiffWithFloorWithPrecision computes the difference after flooring with specified precision.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func ( x Float32x4 ) DiffWithFloorWithPrecisionMasked ( imm uint8 , y Mask32x4 ) Float32x4
// DiffWithFloorWithPrecision computes the difference after flooring with specified precision.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func ( x Float32x8 ) DiffWithFloorWithPrecisionMasked ( imm uint8 , y Mask32x8 ) Float32x8
// DiffWithFloorWithPrecision computes the difference after flooring with specified precision.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) DiffWithFloorWithPrecisionMasked ( imm uint8 , y Mask32x16 ) Float32x16
// DiffWithFloorWithPrecision computes the difference after flooring with specified precision.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func ( x Float64x2 ) DiffWithFloorWithPrecisionMasked ( imm uint8 , y Mask64x2 ) Float64x2
// DiffWithFloorWithPrecision computes the difference after flooring with specified precision.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func ( x Float64x4 ) DiffWithFloorWithPrecisionMasked ( imm uint8 , y Mask64x4 ) Float64x4
// DiffWithFloorWithPrecision computes the difference after flooring with specified precision.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) DiffWithFloorWithPrecisionMasked ( imm uint8 , y Mask64x8 ) Float64x8
2025-06-16 20:11:27 +00:00
/* DiffWithRoundWithPrecision */
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// DiffWithRoundWithPrecision computes the difference after rounding with specified precision.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float32x4 ) DiffWithRoundWithPrecision ( imm uint8 ) Float32x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// DiffWithRoundWithPrecision computes the difference after rounding with specified precision.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float32x8 ) DiffWithRoundWithPrecision ( imm uint8 ) Float32x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// DiffWithRoundWithPrecision computes the difference after rounding with specified precision.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float32x16 ) DiffWithRoundWithPrecision ( imm uint8 ) Float32x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// DiffWithRoundWithPrecision computes the difference after rounding with specified precision.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float64x2 ) DiffWithRoundWithPrecision ( imm uint8 ) Float64x2
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// DiffWithRoundWithPrecision computes the difference after rounding with specified precision.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float64x4 ) DiffWithRoundWithPrecision ( imm uint8 ) Float64x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// DiffWithRoundWithPrecision computes the difference after rounding with specified precision.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float64x8 ) DiffWithRoundWithPrecision ( imm uint8 ) Float64x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
/* DiffWithRoundWithPrecisionMasked */
// DiffWithRoundWithPrecision computes the difference after rounding with specified precision.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func ( x Float32x4 ) DiffWithRoundWithPrecisionMasked ( imm uint8 , y Mask32x4 ) Float32x4
// DiffWithRoundWithPrecision computes the difference after rounding with specified precision.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func ( x Float32x8 ) DiffWithRoundWithPrecisionMasked ( imm uint8 , y Mask32x8 ) Float32x8
// DiffWithRoundWithPrecision computes the difference after rounding with specified precision.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) DiffWithRoundWithPrecisionMasked ( imm uint8 , y Mask32x16 ) Float32x16
// DiffWithRoundWithPrecision computes the difference after rounding with specified precision.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func ( x Float64x2 ) DiffWithRoundWithPrecisionMasked ( imm uint8 , y Mask64x2 ) Float64x2
// DiffWithRoundWithPrecision computes the difference after rounding with specified precision.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func ( x Float64x4 ) DiffWithRoundWithPrecisionMasked ( imm uint8 , y Mask64x4 ) Float64x4
// DiffWithRoundWithPrecision computes the difference after rounding with specified precision.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) DiffWithRoundWithPrecisionMasked ( imm uint8 , y Mask64x8 ) Float64x8
2025-06-16 20:11:27 +00:00
/* DiffWithTruncWithPrecision */
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// DiffWithTruncWithPrecision computes the difference after truncating with specified precision.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float32x4 ) DiffWithTruncWithPrecision ( imm uint8 ) Float32x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// DiffWithTruncWithPrecision computes the difference after truncating with specified precision.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float32x8 ) DiffWithTruncWithPrecision ( imm uint8 ) Float32x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// DiffWithTruncWithPrecision computes the difference after truncating with specified precision.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float32x16 ) DiffWithTruncWithPrecision ( imm uint8 ) Float32x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// DiffWithTruncWithPrecision computes the difference after truncating with specified precision.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float64x2 ) DiffWithTruncWithPrecision ( imm uint8 ) Float64x2
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// DiffWithTruncWithPrecision computes the difference after truncating with specified precision.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float64x4 ) DiffWithTruncWithPrecision ( imm uint8 ) Float64x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// DiffWithTruncWithPrecision computes the difference after truncating with specified precision.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float64x8 ) DiffWithTruncWithPrecision ( imm uint8 ) Float64x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
/* DiffWithTruncWithPrecisionMasked */
// DiffWithTruncWithPrecision computes the difference after truncating with specified precision.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func ( x Float32x4 ) DiffWithTruncWithPrecisionMasked ( imm uint8 , y Mask32x4 ) Float32x4
// DiffWithTruncWithPrecision computes the difference after truncating with specified precision.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func ( x Float32x8 ) DiffWithTruncWithPrecisionMasked ( imm uint8 , y Mask32x8 ) Float32x8
// DiffWithTruncWithPrecision computes the difference after truncating with specified precision.
//
// Asm: VREDUCEPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) DiffWithTruncWithPrecisionMasked ( imm uint8 , y Mask32x16 ) Float32x16
// DiffWithTruncWithPrecision computes the difference after truncating with specified precision.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func ( x Float64x2 ) DiffWithTruncWithPrecisionMasked ( imm uint8 , y Mask64x2 ) Float64x2
// DiffWithTruncWithPrecision computes the difference after truncating with specified precision.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func ( x Float64x4 ) DiffWithTruncWithPrecisionMasked ( imm uint8 , y Mask64x4 ) Float64x4
// DiffWithTruncWithPrecision computes the difference after truncating with specified precision.
//
// Asm: VREDUCEPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) DiffWithTruncWithPrecisionMasked ( imm uint8 , y Mask64x8 ) Float64x8
2025-06-16 20:11:27 +00:00
/* Div */
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Div divides elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VDIVPS, CPU Feature: AVX
func ( x Float32x4 ) Div ( y Float32x4 ) Float32x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Div divides elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VDIVPS, CPU Feature: AVX
func ( x Float32x8 ) Div ( y Float32x8 ) Float32x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Div divides elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VDIVPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) Div ( y Float32x16 ) Float32x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Div divides elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VDIVPD, CPU Feature: AVX
func ( x Float64x2 ) Div ( y Float64x2 ) Float64x2
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Div divides elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VDIVPD, CPU Feature: AVX
func ( x Float64x4 ) Div ( y Float64x4 ) Float64x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Div divides elements of two vectors.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VDIVPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) Div ( y Float64x8 ) Float64x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
/* DivMasked */
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Div divides elements of two vectors.
//
// Asm: VDIVPS, CPU Feature: AVX512EVEX
func ( x Float32x4 ) DivMasked ( y Float32x4 , z Mask32x4 ) Float32x4
// Div divides elements of two vectors.
//
// Asm: VDIVPS, CPU Feature: AVX512EVEX
func ( x Float32x8 ) DivMasked ( y Float32x8 , z Mask32x8 ) Float32x8
// Div divides elements of two vectors.
//
// Asm: VDIVPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) DivMasked ( y Float32x16 , z Mask32x16 ) Float32x16
// Div divides elements of two vectors.
//
// Asm: VDIVPD, CPU Feature: AVX512EVEX
func ( x Float64x2 ) DivMasked ( y Float64x2 , z Mask64x2 ) Float64x2
// Div divides elements of two vectors.
//
// Asm: VDIVPD, CPU Feature: AVX512EVEX
func ( x Float64x4 ) DivMasked ( y Float64x4 , z Mask64x4 ) Float64x4
// Div divides elements of two vectors.
//
// Asm: VDIVPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) DivMasked ( y Float64x8 , z Mask64x8 ) Float64x8
/* DotProdBroadcast */
// DotProdBroadcast multiplies all elements and broadcasts the sum.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VDPPD, CPU Feature: AVX
func ( x Float64x2 ) DotProdBroadcast ( y Float64x2 ) Float64x2
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
/* Equal */
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Equal compares for equality.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPEQB, CPU Feature: AVX
func ( x Int8x16 ) Equal ( y Int8x16 ) Mask8x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Equal compares for equality.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPEQB, CPU Feature: AVX2
func ( x Int8x32 ) Equal ( y Int8x32 ) Mask8x32
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Equal compares for equality.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPEQW, CPU Feature: AVX
func ( x Int16x8 ) Equal ( y Int16x8 ) Mask16x8
2025-05-22 19:59:12 +00:00
2025-06-12 16:42:02 +00:00
// Equal compares for equality.
2025-06-12 03:54:34 +00:00
//
// Asm: VPCMPEQW, CPU Feature: AVX2
2025-05-22 19:59:12 +00:00
func ( x Int16x16 ) Equal ( y Int16x16 ) Mask16x16
2025-06-16 20:11:27 +00:00
// Equal compares for equality.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPEQD, CPU Feature: AVX
func ( x Int32x4 ) Equal ( y Int32x4 ) Mask32x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Equal compares for equality.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPEQD, CPU Feature: AVX2
func ( x Int32x8 ) Equal ( y Int32x8 ) Mask32x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Equal compares for equality.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPEQQ, CPU Feature: AVX
func ( x Int64x2 ) Equal ( y Int64x2 ) Mask64x2
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Equal compares for equality.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPEQQ, CPU Feature: AVX2
func ( x Int64x4 ) Equal ( y Int64x4 ) Mask64x4
2025-05-22 19:59:12 +00:00
2025-07-09 16:24:34 +00:00
// Equal compares for equality.
//
// Asm: VPCMPEQB, CPU Feature: AVX
func ( x Uint8x16 ) Equal ( y Uint8x16 ) Mask8x16
// Equal compares for equality.
//
// Asm: VPCMPEQB, CPU Feature: AVX2
func ( x Uint8x32 ) Equal ( y Uint8x32 ) Mask8x32
// Equal compares for equality.
//
// Asm: VPCMPEQW, CPU Feature: AVX
func ( x Uint16x8 ) Equal ( y Uint16x8 ) Mask16x8
// Equal compares for equality.
//
// Asm: VPCMPEQW, CPU Feature: AVX2
func ( x Uint16x16 ) Equal ( y Uint16x16 ) Mask16x16
// Equal compares for equality.
//
// Asm: VPCMPEQD, CPU Feature: AVX
func ( x Uint32x4 ) Equal ( y Uint32x4 ) Mask32x4
// Equal compares for equality.
//
// Asm: VPCMPEQD, CPU Feature: AVX2
func ( x Uint32x8 ) Equal ( y Uint32x8 ) Mask32x8
// Equal compares for equality.
//
// Asm: VPCMPEQQ, CPU Feature: AVX
func ( x Uint64x2 ) Equal ( y Uint64x2 ) Mask64x2
// Equal compares for equality.
//
// Asm: VPCMPEQQ, CPU Feature: AVX2
func ( x Uint64x4 ) Equal ( y Uint64x4 ) Mask64x4
2025-06-16 20:11:27 +00:00
// Equal compares for equality.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VCMPPS, CPU Feature: AVX
func ( x Float32x4 ) Equal ( y Float32x4 ) Mask32x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Equal compares for equality.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VCMPPS, CPU Feature: AVX
func ( x Float32x8 ) Equal ( y Float32x8 ) Mask32x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Equal compares for equality, masked.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) Equal ( y Float32x16 ) Mask32x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Equal compares for equality.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VCMPPD, CPU Feature: AVX
func ( x Float64x2 ) Equal ( y Float64x2 ) Mask64x2
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Equal compares for equality.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VCMPPD, CPU Feature: AVX
func ( x Float64x4 ) Equal ( y Float64x4 ) Mask64x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Equal compares for equality, masked.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) Equal ( y Float64x8 ) Mask64x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Equal compares for equality, masked.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func ( x Int8x64 ) Equal ( y Int8x64 ) Mask8x64
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Equal compares for equality, masked.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) Equal ( y Int16x32 ) Mask16x32
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Equal compares for equality, masked.
2025-06-12 16:24:24 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) Equal ( y Int32x16 ) Mask32x16
2025-06-12 16:24:24 +00:00
2025-06-16 20:11:27 +00:00
// Equal compares for equality, masked.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) Equal ( y Int64x8 ) Mask64x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Equal compares for equality, masked.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func ( x Uint8x64 ) Equal ( y Uint8x64 ) Mask8x64
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Equal compares for equality, masked.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) Equal ( y Uint16x32 ) Mask16x32
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Equal compares for equality, masked.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) Equal ( y Uint32x16 ) Mask32x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Equal compares for equality, masked.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) Equal ( y Uint64x8 ) Mask64x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
/* EqualMasked */
// Equal compares for equality, masked.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func ( x Float32x4 ) EqualMasked ( y Float32x4 , z Mask32x4 ) Mask32x4
// Equal compares for equality, masked.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func ( x Float32x8 ) EqualMasked ( y Float32x8 , z Mask32x8 ) Mask32x8
// Equal compares for equality, masked.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) EqualMasked ( y Float32x16 , z Mask32x16 ) Mask32x16
// Equal compares for equality, masked.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func ( x Float64x2 ) EqualMasked ( y Float64x2 , z Mask64x2 ) Mask64x2
// Equal compares for equality, masked.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func ( x Float64x4 ) EqualMasked ( y Float64x4 , z Mask64x4 ) Mask64x4
// Equal compares for equality, masked.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) EqualMasked ( y Float64x8 , z Mask64x8 ) Mask64x8
// Equal compares for equality, masked.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func ( x Int8x16 ) EqualMasked ( y Int8x16 , z Mask8x16 ) Mask8x16
// Equal compares for equality, masked.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func ( x Int8x32 ) EqualMasked ( y Int8x32 , z Mask8x32 ) Mask8x32
// Equal compares for equality, masked.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func ( x Int8x64 ) EqualMasked ( y Int8x64 , z Mask8x64 ) Mask8x64
// Equal compares for equality, masked.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) EqualMasked ( y Int16x8 , z Mask16x8 ) Mask16x8
// Equal compares for equality, masked.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) EqualMasked ( y Int16x16 , z Mask16x16 ) Mask16x16
// Equal compares for equality, masked.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) EqualMasked ( y Int16x32 , z Mask16x32 ) Mask16x32
// Equal compares for equality, masked.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) EqualMasked ( y Int32x4 , z Mask32x4 ) Mask32x4
// Equal compares for equality, masked.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) EqualMasked ( y Int32x8 , z Mask32x8 ) Mask32x8
// Equal compares for equality, masked.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) EqualMasked ( y Int32x16 , z Mask32x16 ) Mask32x16
// Equal compares for equality, masked.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) EqualMasked ( y Int64x2 , z Mask64x2 ) Mask64x2
// Equal compares for equality, masked.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) EqualMasked ( y Int64x4 , z Mask64x4 ) Mask64x4
// Equal compares for equality, masked.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) EqualMasked ( y Int64x8 , z Mask64x8 ) Mask64x8
// Equal compares for equality, masked.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func ( x Uint8x16 ) EqualMasked ( y Uint8x16 , z Mask8x16 ) Mask8x16
// Equal compares for equality, masked.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func ( x Uint8x32 ) EqualMasked ( y Uint8x32 , z Mask8x32 ) Mask8x32
// Equal compares for equality, masked.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func ( x Uint8x64 ) EqualMasked ( y Uint8x64 , z Mask8x64 ) Mask8x64
// Equal compares for equality, masked.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) EqualMasked ( y Uint16x8 , z Mask16x8 ) Mask16x8
// Equal compares for equality, masked.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) EqualMasked ( y Uint16x16 , z Mask16x16 ) Mask16x16
// Equal compares for equality, masked.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) EqualMasked ( y Uint16x32 , z Mask16x32 ) Mask16x32
// Equal compares for equality, masked.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) EqualMasked ( y Uint32x4 , z Mask32x4 ) Mask32x4
// Equal compares for equality, masked.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) EqualMasked ( y Uint32x8 , z Mask32x8 ) Mask32x8
// Equal compares for equality, masked.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) EqualMasked ( y Uint32x16 , z Mask32x16 ) Mask32x16
// Equal compares for equality, masked.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) EqualMasked ( y Uint64x2 , z Mask64x2 ) Mask64x2
// Equal compares for equality, masked.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) EqualMasked ( y Uint64x4 , z Mask64x4 ) Mask64x4
// Equal compares for equality, masked.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) EqualMasked ( y Uint64x8 , z Mask64x8 ) Mask64x8
2025-06-16 20:11:27 +00:00
/* Floor */
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Floor rounds elements down to the nearest integer.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VROUNDPS, CPU Feature: AVX
func ( x Float32x4 ) Floor ( ) Float32x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Floor rounds elements down to the nearest integer.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VROUNDPS, CPU Feature: AVX
func ( x Float32x8 ) Floor ( ) Float32x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Floor rounds elements down to the nearest integer.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VROUNDPD, CPU Feature: AVX
func ( x Float64x2 ) Floor ( ) Float64x2
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Floor rounds elements down to the nearest integer.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VROUNDPD, CPU Feature: AVX
func ( x Float64x4 ) Floor ( ) Float64x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
/* FloorWithPrecision */
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// FloorWithPrecision rounds elements down with specified precision, masked.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float32x4 ) FloorWithPrecision ( imm uint8 ) Float32x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// FloorWithPrecision rounds elements down with specified precision, masked.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float32x8 ) FloorWithPrecision ( imm uint8 ) Float32x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// FloorWithPrecision rounds elements down with specified precision, masked.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float32x16 ) FloorWithPrecision ( imm uint8 ) Float32x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// FloorWithPrecision rounds elements down with specified precision, masked.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float64x2 ) FloorWithPrecision ( imm uint8 ) Float64x2
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// FloorWithPrecision rounds elements down with specified precision, masked.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float64x4 ) FloorWithPrecision ( imm uint8 ) Float64x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// FloorWithPrecision rounds elements down with specified precision, masked.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float64x8 ) FloorWithPrecision ( imm uint8 ) Float64x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
/* FloorWithPrecisionMasked */
// FloorWithPrecision rounds elements down with specified precision, masked.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func ( x Float32x4 ) FloorWithPrecisionMasked ( imm uint8 , y Mask32x4 ) Float32x4
// FloorWithPrecision rounds elements down with specified precision, masked.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func ( x Float32x8 ) FloorWithPrecisionMasked ( imm uint8 , y Mask32x8 ) Float32x8
// FloorWithPrecision rounds elements down with specified precision, masked.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) FloorWithPrecisionMasked ( imm uint8 , y Mask32x16 ) Float32x16
// FloorWithPrecision rounds elements down with specified precision, masked.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func ( x Float64x2 ) FloorWithPrecisionMasked ( imm uint8 , y Mask64x2 ) Float64x2
// FloorWithPrecision rounds elements down with specified precision, masked.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func ( x Float64x4 ) FloorWithPrecisionMasked ( imm uint8 , y Mask64x4 ) Float64x4
// FloorWithPrecision rounds elements down with specified precision, masked.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) FloorWithPrecisionMasked ( imm uint8 , y Mask64x8 ) Float64x8
2025-06-17 19:31:11 +00:00
/* FusedMultiplyAdd */
2025-05-22 19:59:12 +00:00
2025-06-17 19:31:11 +00:00
// FusedMultiplyAdd performs `(v1 * v2) + v3`.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VFMADD213PS, CPU Feature: AVX512EVEX
2025-06-17 19:31:11 +00:00
func ( x Float32x4 ) FusedMultiplyAdd ( y Float32x4 , z Float32x4 ) Float32x4
2025-05-22 19:59:12 +00:00
2025-06-17 19:31:11 +00:00
// FusedMultiplyAdd performs `(v1 * v2) + v3`.
2025-06-12 16:24:24 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VFMADD213PS, CPU Feature: AVX512EVEX
2025-06-17 19:31:11 +00:00
func ( x Float32x8 ) FusedMultiplyAdd ( y Float32x8 , z Float32x8 ) Float32x8
2025-06-12 16:24:24 +00:00
2025-06-17 19:31:11 +00:00
// FusedMultiplyAdd performs `(v1 * v2) + v3`.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VFMADD213PS, CPU Feature: AVX512EVEX
2025-06-17 19:31:11 +00:00
func ( x Float32x16 ) FusedMultiplyAdd ( y Float32x16 , z Float32x16 ) Float32x16
2025-05-22 19:59:12 +00:00
2025-06-17 19:31:11 +00:00
// FusedMultiplyAdd performs `(v1 * v2) + v3`.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VFMADD213PD, CPU Feature: AVX512EVEX
2025-06-17 19:31:11 +00:00
func ( x Float64x2 ) FusedMultiplyAdd ( y Float64x2 , z Float64x2 ) Float64x2
2025-05-22 19:59:12 +00:00
2025-06-17 19:31:11 +00:00
// FusedMultiplyAdd performs `(v1 * v2) + v3`.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VFMADD213PD, CPU Feature: AVX512EVEX
2025-06-17 19:31:11 +00:00
func ( x Float64x4 ) FusedMultiplyAdd ( y Float64x4 , z Float64x4 ) Float64x4
2025-05-22 19:59:12 +00:00
2025-06-17 19:31:11 +00:00
// FusedMultiplyAdd performs `(v1 * v2) + v3`.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VFMADD213PD, CPU Feature: AVX512EVEX
2025-06-17 19:31:11 +00:00
func ( x Float64x8 ) FusedMultiplyAdd ( y Float64x8 , z Float64x8 ) Float64x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
/* FusedMultiplyAddMasked */
// FusedMultiplyAdd performs `(v1 * v2) + v3`.
//
// Asm: VFMADD213PS, CPU Feature: AVX512EVEX
func ( x Float32x4 ) FusedMultiplyAddMasked ( y Float32x4 , z Float32x4 , u Mask32x4 ) Float32x4
// FusedMultiplyAdd performs `(v1 * v2) + v3`.
//
// Asm: VFMADD213PS, CPU Feature: AVX512EVEX
func ( x Float32x8 ) FusedMultiplyAddMasked ( y Float32x8 , z Float32x8 , u Mask32x8 ) Float32x8
// FusedMultiplyAdd performs `(v1 * v2) + v3`.
//
// Asm: VFMADD213PS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) FusedMultiplyAddMasked ( y Float32x16 , z Float32x16 , u Mask32x16 ) Float32x16
// FusedMultiplyAdd performs `(v1 * v2) + v3`.
//
// Asm: VFMADD213PD, CPU Feature: AVX512EVEX
func ( x Float64x2 ) FusedMultiplyAddMasked ( y Float64x2 , z Float64x2 , u Mask64x2 ) Float64x2
// FusedMultiplyAdd performs `(v1 * v2) + v3`.
//
// Asm: VFMADD213PD, CPU Feature: AVX512EVEX
func ( x Float64x4 ) FusedMultiplyAddMasked ( y Float64x4 , z Float64x4 , u Mask64x4 ) Float64x4
// FusedMultiplyAdd performs `(v1 * v2) + v3`.
//
// Asm: VFMADD213PD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) FusedMultiplyAddMasked ( y Float64x8 , z Float64x8 , u Mask64x8 ) Float64x8
2025-06-17 19:31:11 +00:00
/* FusedMultiplyAddSub */
2025-05-22 19:59:12 +00:00
2025-06-17 19:31:11 +00:00
// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX
2025-06-17 19:31:11 +00:00
func ( x Float32x4 ) FusedMultiplyAddSub ( y Float32x4 , z Float32x4 ) Float32x4
2025-05-22 19:59:12 +00:00
2025-06-17 19:31:11 +00:00
// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX
2025-06-17 19:31:11 +00:00
func ( x Float32x8 ) FusedMultiplyAddSub ( y Float32x8 , z Float32x8 ) Float32x8
2025-05-22 19:59:12 +00:00
2025-06-17 19:31:11 +00:00
// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX
2025-06-17 19:31:11 +00:00
func ( x Float32x16 ) FusedMultiplyAddSub ( y Float32x16 , z Float32x16 ) Float32x16
2025-05-22 19:59:12 +00:00
2025-06-17 19:31:11 +00:00
// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX
2025-06-17 19:31:11 +00:00
func ( x Float64x2 ) FusedMultiplyAddSub ( y Float64x2 , z Float64x2 ) Float64x2
2025-05-22 19:59:12 +00:00
2025-06-17 19:31:11 +00:00
// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX
2025-06-17 19:31:11 +00:00
func ( x Float64x4 ) FusedMultiplyAddSub ( y Float64x4 , z Float64x4 ) Float64x4
2025-05-22 19:59:12 +00:00
2025-06-17 19:31:11 +00:00
// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX
2025-06-17 19:31:11 +00:00
func ( x Float64x8 ) FusedMultiplyAddSub ( y Float64x8 , z Float64x8 ) Float64x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
/* FusedMultiplyAddSubMasked */
// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements.
//
// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX
func ( x Float32x4 ) FusedMultiplyAddSubMasked ( y Float32x4 , z Float32x4 , u Mask32x4 ) Float32x4
// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements.
//
// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX
func ( x Float32x8 ) FusedMultiplyAddSubMasked ( y Float32x8 , z Float32x8 , u Mask32x8 ) Float32x8
// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements.
//
// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) FusedMultiplyAddSubMasked ( y Float32x16 , z Float32x16 , u Mask32x16 ) Float32x16
// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements.
//
// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX
func ( x Float64x2 ) FusedMultiplyAddSubMasked ( y Float64x2 , z Float64x2 , u Mask64x2 ) Float64x2
// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements.
//
// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX
func ( x Float64x4 ) FusedMultiplyAddSubMasked ( y Float64x4 , z Float64x4 , u Mask64x4 ) Float64x4
// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements.
//
// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) FusedMultiplyAddSubMasked ( y Float64x8 , z Float64x8 , u Mask64x8 ) Float64x8
2025-06-17 19:31:11 +00:00
/* FusedMultiplySubAdd */
2025-05-22 19:59:12 +00:00
2025-06-17 19:31:11 +00:00
// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX
2025-06-17 19:31:11 +00:00
func ( x Float32x4 ) FusedMultiplySubAdd ( y Float32x4 , z Float32x4 ) Float32x4
2025-05-22 19:59:12 +00:00
2025-06-17 19:31:11 +00:00
// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX
2025-06-17 19:31:11 +00:00
func ( x Float32x8 ) FusedMultiplySubAdd ( y Float32x8 , z Float32x8 ) Float32x8
2025-05-22 19:59:12 +00:00
2025-06-17 19:31:11 +00:00
// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX
2025-06-17 19:31:11 +00:00
func ( x Float32x16 ) FusedMultiplySubAdd ( y Float32x16 , z Float32x16 ) Float32x16
2025-05-22 19:59:12 +00:00
2025-06-17 19:31:11 +00:00
// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX
2025-06-17 19:31:11 +00:00
func ( x Float64x2 ) FusedMultiplySubAdd ( y Float64x2 , z Float64x2 ) Float64x2
2025-05-22 19:59:12 +00:00
2025-06-17 19:31:11 +00:00
// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX
2025-06-17 19:31:11 +00:00
func ( x Float64x4 ) FusedMultiplySubAdd ( y Float64x4 , z Float64x4 ) Float64x4
2025-05-22 19:59:12 +00:00
2025-06-17 19:31:11 +00:00
// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX
2025-06-17 19:31:11 +00:00
func ( x Float64x8 ) FusedMultiplySubAdd ( y Float64x8 , z Float64x8 ) Float64x8
2025-06-16 20:11:27 +00:00
2025-07-08 18:18:55 +00:00
/* FusedMultiplySubAddMasked */
// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements.
//
// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX
func ( x Float32x4 ) FusedMultiplySubAddMasked ( y Float32x4 , z Float32x4 , u Mask32x4 ) Float32x4
// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements.
//
// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX
func ( x Float32x8 ) FusedMultiplySubAddMasked ( y Float32x8 , z Float32x8 , u Mask32x8 ) Float32x8
// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements.
//
// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) FusedMultiplySubAddMasked ( y Float32x16 , z Float32x16 , u Mask32x16 ) Float32x16
// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements.
//
// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX
func ( x Float64x2 ) FusedMultiplySubAddMasked ( y Float64x2 , z Float64x2 , u Mask64x2 ) Float64x2
// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements.
//
// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX
func ( x Float64x4 ) FusedMultiplySubAddMasked ( y Float64x4 , z Float64x4 , u Mask64x4 ) Float64x4
// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements.
//
// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) FusedMultiplySubAddMasked ( y Float64x8 , z Float64x8 , u Mask64x8 ) Float64x8
2025-06-26 04:07:48 +00:00
/* GaloisFieldAffineTransform */
// GaloisFieldAffineTransform computes an affine transformation in GF(2^8):
// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes;
2025-06-28 11:05:44 -04:00
// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y
2025-06-26 04:07:48 +00:00
// corresponding to a group of 8 elements in x.
//
// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX
2025-06-28 11:05:44 -04:00
func ( x Uint8x16 ) GaloisFieldAffineTransform ( y Uint64x2 , b uint8 ) Uint8x16
2025-06-26 04:07:48 +00:00
// GaloisFieldAffineTransform computes an affine transformation in GF(2^8):
// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes;
2025-06-28 11:05:44 -04:00
// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y
2025-06-26 04:07:48 +00:00
// corresponding to a group of 8 elements in x.
//
// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX
2025-06-28 11:05:44 -04:00
func ( x Uint8x32 ) GaloisFieldAffineTransform ( y Uint64x4 , b uint8 ) Uint8x32
2025-06-26 04:07:48 +00:00
// GaloisFieldAffineTransform computes an affine transformation in GF(2^8):
// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes;
2025-06-28 11:05:44 -04:00
// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y
2025-06-26 04:07:48 +00:00
// corresponding to a group of 8 elements in x.
//
// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX
2025-06-28 11:05:44 -04:00
func ( x Uint8x64 ) GaloisFieldAffineTransform ( y Uint64x8 , b uint8 ) Uint8x64
2025-06-26 04:07:48 +00:00
/* GaloisFieldAffineTransformInversed */
2025-07-08 18:18:55 +00:00
// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8),
2025-06-26 04:07:48 +00:00
// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1:
// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes;
2025-06-28 11:05:44 -04:00
// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y
2025-06-26 04:07:48 +00:00
// corresponding to a group of 8 elements in x.
//
// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX
2025-06-28 11:05:44 -04:00
func ( x Uint8x16 ) GaloisFieldAffineTransformInversed ( y Uint64x2 , b uint8 ) Uint8x16
2025-06-26 04:07:48 +00:00
2025-07-08 18:18:55 +00:00
// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8),
2025-06-26 04:07:48 +00:00
// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1:
// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes;
2025-06-28 11:05:44 -04:00
// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y
2025-06-26 04:07:48 +00:00
// corresponding to a group of 8 elements in x.
//
// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX
2025-06-28 11:05:44 -04:00
func ( x Uint8x32 ) GaloisFieldAffineTransformInversed ( y Uint64x4 , b uint8 ) Uint8x32
2025-06-26 04:07:48 +00:00
2025-07-08 18:18:55 +00:00
// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8),
2025-06-26 04:07:48 +00:00
// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1:
// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes;
2025-06-28 11:05:44 -04:00
// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y
2025-06-26 04:07:48 +00:00
// corresponding to a group of 8 elements in x.
//
// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX
2025-06-28 11:05:44 -04:00
func ( x Uint8x64 ) GaloisFieldAffineTransformInversed ( y Uint64x8 , b uint8 ) Uint8x64
2025-06-26 04:07:48 +00:00
2025-07-08 18:18:55 +00:00
/* GaloisFieldAffineTransformInversedMasked */
2025-06-26 04:07:48 +00:00
2025-07-08 18:18:55 +00:00
// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8),
// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1:
// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes;
// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y
// corresponding to a group of 8 elements in x.
2025-06-26 04:07:48 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX
func ( x Uint8x16 ) GaloisFieldAffineTransformInversedMasked ( y Uint64x2 , b uint8 , m Mask8x16 ) Uint8x16
// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8),
// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1:
// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes;
// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y
// corresponding to a group of 8 elements in x.
//
// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX
func ( x Uint8x32 ) GaloisFieldAffineTransformInversedMasked ( y Uint64x4 , b uint8 , m Mask8x32 ) Uint8x32
// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8),
// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1:
// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes;
// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y
// corresponding to a group of 8 elements in x.
//
// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX
func ( x Uint8x64 ) GaloisFieldAffineTransformInversedMasked ( y Uint64x8 , b uint8 , m Mask8x64 ) Uint8x64
/* GaloisFieldAffineTransformMasked */
// GaloisFieldAffineTransform computes an affine transformation in GF(2^8):
// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes;
// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y
// corresponding to a group of 8 elements in x.
//
// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX
func ( x Uint8x16 ) GaloisFieldAffineTransformMasked ( y Uint64x2 , b uint8 , m Mask8x16 ) Uint8x16
// GaloisFieldAffineTransform computes an affine transformation in GF(2^8):
// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes;
// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y
// corresponding to a group of 8 elements in x.
//
// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX
func ( x Uint8x32 ) GaloisFieldAffineTransformMasked ( y Uint64x4 , b uint8 , m Mask8x32 ) Uint8x32
// GaloisFieldAffineTransform computes an affine transformation in GF(2^8):
// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes;
// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y
// corresponding to a group of 8 elements in x.
//
// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX
func ( x Uint8x64 ) GaloisFieldAffineTransformMasked ( y Uint64x8 , b uint8 , m Mask8x64 ) Uint8x64
/* GaloisFieldMul */
// GaloisFieldMul computes element-wise GF(2^8) multiplication with
// reduction polynomial x^8 + x^4 + x^3 + x + 1.
//
// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX
2025-06-26 04:07:48 +00:00
func ( x Uint8x16 ) GaloisFieldMul ( y Uint8x16 ) Uint8x16
// GaloisFieldMul computes element-wise GF(2^8) multiplication with
// reduction polynomial x^8 + x^4 + x^3 + x + 1.
//
// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX
func ( x Uint8x32 ) GaloisFieldMul ( y Uint8x32 ) Uint8x32
// GaloisFieldMul computes element-wise GF(2^8) multiplication with
// reduction polynomial x^8 + x^4 + x^3 + x + 1.
//
// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX
func ( x Uint8x64 ) GaloisFieldMul ( y Uint8x64 ) Uint8x64
2025-07-08 18:18:55 +00:00
/* GaloisFieldMulMasked */
// GaloisFieldMul computes element-wise GF(2^8) multiplication with
// reduction polynomial x^8 + x^4 + x^3 + x + 1.
//
// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX
func ( x Uint8x16 ) GaloisFieldMulMasked ( y Uint8x16 , z Mask8x16 ) Uint8x16
// GaloisFieldMul computes element-wise GF(2^8) multiplication with
// reduction polynomial x^8 + x^4 + x^3 + x + 1.
//
// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX
func ( x Uint8x32 ) GaloisFieldMulMasked ( y Uint8x32 , z Mask8x32 ) Uint8x32
// GaloisFieldMul computes element-wise GF(2^8) multiplication with
// reduction polynomial x^8 + x^4 + x^3 + x + 1.
//
// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX
func ( x Uint8x64 ) GaloisFieldMulMasked ( y Uint8x64 , z Mask8x64 ) Uint8x64
2025-06-25 18:20:50 -04:00
/* Get128 */
// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand.
//
// Asm: VEXTRACTF128, CPU Feature: AVX
func ( x Float32x8 ) Get128 ( imm uint8 ) Float32x4
// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand.
//
// Asm: VEXTRACTF128, CPU Feature: AVX
func ( x Float64x4 ) Get128 ( imm uint8 ) Float64x2
// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand.
//
// Asm: VEXTRACTI128, CPU Feature: AVX2
func ( x Int8x32 ) Get128 ( imm uint8 ) Int8x16
// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand.
//
// Asm: VEXTRACTI128, CPU Feature: AVX2
func ( x Int16x16 ) Get128 ( imm uint8 ) Int16x8
// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand.
//
// Asm: VEXTRACTI128, CPU Feature: AVX2
func ( x Int32x8 ) Get128 ( imm uint8 ) Int32x4
// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand.
//
// Asm: VEXTRACTI128, CPU Feature: AVX2
func ( x Int64x4 ) Get128 ( imm uint8 ) Int64x2
// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand.
//
// Asm: VEXTRACTI128, CPU Feature: AVX2
func ( x Uint8x32 ) Get128 ( imm uint8 ) Uint8x16
// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand.
//
// Asm: VEXTRACTI128, CPU Feature: AVX2
func ( x Uint16x16 ) Get128 ( imm uint8 ) Uint16x8
// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand.
//
// Asm: VEXTRACTI128, CPU Feature: AVX2
func ( x Uint32x8 ) Get128 ( imm uint8 ) Uint32x4
// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand.
//
// Asm: VEXTRACTI128, CPU Feature: AVX2
func ( x Uint64x4 ) Get128 ( imm uint8 ) Uint64x2
2025-06-24 18:29:38 -04:00
/* GetElem */
// GetElem retrieves a single constant-indexed element's value.
//
// Asm: VPEXTRB, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Int8x16 ) GetElem ( imm uint8 ) int8
2025-06-24 18:29:38 -04:00
// GetElem retrieves a single constant-indexed element's value.
//
// Asm: VPEXTRW, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Int16x8 ) GetElem ( imm uint8 ) int16
2025-06-24 18:29:38 -04:00
// GetElem retrieves a single constant-indexed element's value.
//
// Asm: VPEXTRD, CPU Feature: AVX
2025-06-28 10:20:53 -04:00
func ( x Int32x4 ) GetElem ( imm uint8 ) int32
2025-06-24 18:29:38 -04:00
// GetElem retrieves a single constant-indexed element's value.
//
// Asm: VPEXTRQ, CPU Feature: AVX
2025-06-28 10:20:53 -04:00
func ( x Int64x2 ) GetElem ( imm uint8 ) int64
2025-06-24 18:29:38 -04:00
// GetElem retrieves a single constant-indexed element's value.
//
// Asm: VPEXTRB, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Uint8x16 ) GetElem ( imm uint8 ) uint8
2025-06-24 18:29:38 -04:00
// GetElem retrieves a single constant-indexed element's value.
//
// Asm: VPEXTRW, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Uint16x8 ) GetElem ( imm uint8 ) uint16
2025-06-24 18:29:38 -04:00
// GetElem retrieves a single constant-indexed element's value.
//
// Asm: VPEXTRD, CPU Feature: AVX
2025-06-28 10:20:53 -04:00
func ( x Uint32x4 ) GetElem ( imm uint8 ) uint32
2025-06-24 18:29:38 -04:00
// GetElem retrieves a single constant-indexed element's value.
//
// Asm: VPEXTRQ, CPU Feature: AVX
2025-06-28 10:20:53 -04:00
func ( x Uint64x2 ) GetElem ( imm uint8 ) uint64
2025-06-24 18:29:38 -04:00
2025-06-16 20:11:27 +00:00
/* Greater */
2025-05-22 19:59:12 +00:00
2025-06-12 16:42:02 +00:00
// Greater compares for greater than.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPGTB, CPU Feature: AVX
func ( x Int8x16 ) Greater ( y Int8x16 ) Mask8x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Greater compares for greater than.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPGTB, CPU Feature: AVX2
func ( x Int8x32 ) Greater ( y Int8x32 ) Mask8x32
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Greater compares for greater than.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPGTW, CPU Feature: AVX
func ( x Int16x8 ) Greater ( y Int16x8 ) Mask16x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Greater compares for greater than.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPGTW, CPU Feature: AVX2
func ( x Int16x16 ) Greater ( y Int16x16 ) Mask16x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Greater compares for greater than.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPGTD, CPU Feature: AVX
func ( x Int32x4 ) Greater ( y Int32x4 ) Mask32x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Greater compares for greater than.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPGTD, CPU Feature: AVX2
func ( x Int32x8 ) Greater ( y Int32x8 ) Mask32x8
2025-05-22 19:59:12 +00:00
2025-07-09 16:24:34 +00:00
// Greater compares for greater than.
//
// Asm: VPCMPGTQ, CPU Feature: AVX
func ( x Int64x2 ) Greater ( y Int64x2 ) Int64x2
2025-06-16 20:11:27 +00:00
// Greater compares for greater than.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPGTQ, CPU Feature: AVX2
func ( x Int64x4 ) Greater ( y Int64x4 ) Mask64x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Greater compares for greater than.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VCMPPS, CPU Feature: AVX
func ( x Float32x4 ) Greater ( y Float32x4 ) Mask32x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Greater compares for greater than.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VCMPPS, CPU Feature: AVX
func ( x Float32x8 ) Greater ( y Float32x8 ) Mask32x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Greater compares for greater than.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) Greater ( y Float32x16 ) Mask32x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Greater compares for greater than.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VCMPPD, CPU Feature: AVX
func ( x Float64x2 ) Greater ( y Float64x2 ) Mask64x2
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Greater compares for greater than.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VCMPPD, CPU Feature: AVX
func ( x Float64x4 ) Greater ( y Float64x4 ) Mask64x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Greater compares for greater than.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) Greater ( y Float64x8 ) Mask64x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Greater compares for greater than.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func ( x Int8x64 ) Greater ( y Int8x64 ) Mask8x64
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Greater compares for greater than.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) Greater ( y Int16x32 ) Mask16x32
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Greater compares for greater than.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) Greater ( y Int32x16 ) Mask32x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Greater compares for greater than.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) Greater ( y Int64x8 ) Mask64x8
2025-05-22 19:59:12 +00:00
2025-06-12 16:42:02 +00:00
// Greater compares for greater than.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func ( x Uint8x16 ) Greater ( y Uint8x16 ) Mask8x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Greater compares for greater than.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func ( x Uint8x32 ) Greater ( y Uint8x32 ) Mask8x32
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Greater compares for greater than.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func ( x Uint8x64 ) Greater ( y Uint8x64 ) Mask8x64
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Greater compares for greater than.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) Greater ( y Uint16x8 ) Mask16x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Greater compares for greater than.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) Greater ( y Uint16x16 ) Mask16x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Greater compares for greater than.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) Greater ( y Uint16x32 ) Mask16x32
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Greater compares for greater than.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) Greater ( y Uint32x4 ) Mask32x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Greater compares for greater than.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) Greater ( y Uint32x8 ) Mask32x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Greater compares for greater than.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) Greater ( y Uint32x16 ) Mask32x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Greater compares for greater than.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) Greater ( y Uint64x2 ) Mask64x2
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Greater compares for greater than.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) Greater ( y Uint64x4 ) Mask64x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// Greater compares for greater than.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) Greater ( y Uint64x8 ) Mask64x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
/* GreaterEqual */
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VCMPPS, CPU Feature: AVX
func ( x Float32x4 ) GreaterEqual ( y Float32x4 ) Mask32x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VCMPPS, CPU Feature: AVX
func ( x Float32x8 ) GreaterEqual ( y Float32x8 ) Mask32x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) GreaterEqual ( y Float32x16 ) Mask32x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VCMPPD, CPU Feature: AVX
func ( x Float64x2 ) GreaterEqual ( y Float64x2 ) Mask64x2
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VCMPPD, CPU Feature: AVX
func ( x Float64x4 ) GreaterEqual ( y Float64x4 ) Mask64x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) GreaterEqual ( y Float64x8 ) Mask64x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func ( x Int8x16 ) GreaterEqual ( y Int8x16 ) Mask8x16
2025-05-22 19:59:12 +00:00
2025-06-12 16:42:02 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 03:54:34 +00:00
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
2025-05-22 19:59:12 +00:00
func ( x Int8x32 ) GreaterEqual ( y Int8x32 ) Mask8x32
2025-06-16 20:11:27 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 03:54:34 +00:00
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
2025-06-16 20:11:27 +00:00
func ( x Int8x64 ) GreaterEqual ( y Int8x64 ) Mask8x64
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) GreaterEqual ( y Int16x8 ) Mask16x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) GreaterEqual ( y Int16x16 ) Mask16x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) GreaterEqual ( y Int16x32 ) Mask16x32
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) GreaterEqual ( y Int32x4 ) Mask32x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) GreaterEqual ( y Int32x8 ) Mask32x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) GreaterEqual ( y Int32x16 ) Mask32x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) GreaterEqual ( y Int64x2 ) Mask64x2
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) GreaterEqual ( y Int64x4 ) Mask64x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) GreaterEqual ( y Int64x8 ) Mask64x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func ( x Uint8x16 ) GreaterEqual ( y Uint8x16 ) Mask8x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func ( x Uint8x32 ) GreaterEqual ( y Uint8x32 ) Mask8x32
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func ( x Uint8x64 ) GreaterEqual ( y Uint8x64 ) Mask8x64
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) GreaterEqual ( y Uint16x8 ) Mask16x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) GreaterEqual ( y Uint16x16 ) Mask16x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) GreaterEqual ( y Uint16x32 ) Mask16x32
2025-05-22 19:59:12 +00:00
2025-06-12 16:42:02 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) GreaterEqual ( y Uint32x4 ) Mask32x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) GreaterEqual ( y Uint32x8 ) Mask32x8
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) GreaterEqual ( y Uint32x16 ) Mask32x16
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) GreaterEqual ( y Uint64x2 ) Mask64x2
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) GreaterEqual ( y Uint64x4 ) Mask64x4
2025-05-22 19:59:12 +00:00
2025-06-16 20:11:27 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) GreaterEqual ( y Uint64x8 ) Mask64x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
/* GreaterEqualMasked */
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VCMPPS, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Float32x4 ) GreaterEqualMasked ( y Float32x4 , z Mask32x4 ) Mask32x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func ( x Float32x8 ) GreaterEqualMasked ( y Float32x8 , z Mask32x8 ) Mask32x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VCMPPS, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Float32x16 ) GreaterEqualMasked ( y Float32x16 , z Mask32x16 ) Mask32x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func ( x Float64x2 ) GreaterEqualMasked ( y Float64x2 , z Mask64x2 ) Mask64x2
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func ( x Float64x4 ) GreaterEqualMasked ( y Float64x4 , z Mask64x4 ) Mask64x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VCMPPD, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Float64x8 ) GreaterEqualMasked ( y Float64x8 , z Mask64x8 ) Mask64x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPB, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int8x16 ) GreaterEqualMasked ( y Int8x16 , z Mask8x16 ) Mask8x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPB, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int8x32 ) GreaterEqualMasked ( y Int8x32 , z Mask8x32 ) Mask8x32
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPB, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int8x64 ) GreaterEqualMasked ( y Int8x64 , z Mask8x64 ) Mask8x64
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPW, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int16x8 ) GreaterEqualMasked ( y Int16x8 , z Mask16x8 ) Mask16x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPW, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int16x16 ) GreaterEqualMasked ( y Int16x16 , z Mask16x16 ) Mask16x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPW, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int16x32 ) GreaterEqualMasked ( y Int16x32 , z Mask16x32 ) Mask16x32
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPD, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int32x4 ) GreaterEqualMasked ( y Int32x4 , z Mask32x4 ) Mask32x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPD, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int32x8 ) GreaterEqualMasked ( y Int32x8 , z Mask32x8 ) Mask32x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPD, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int32x16 ) GreaterEqualMasked ( y Int32x16 , z Mask32x16 ) Mask32x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:24:24 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int64x2 ) GreaterEqualMasked ( y Int64x2 , z Mask64x2 ) Mask64x2
2025-06-12 16:24:24 +00:00
2025-07-08 18:18:55 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int64x4 ) GreaterEqualMasked ( y Int64x4 , z Mask64x4 ) Mask64x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int64x8 ) GreaterEqualMasked ( y Int64x8 , z Mask64x8 ) Mask64x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint8x16 ) GreaterEqualMasked ( y Uint8x16 , z Mask8x16 ) Mask8x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint8x32 ) GreaterEqualMasked ( y Uint8x32 , z Mask8x32 ) Mask8x32
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint8x64 ) GreaterEqualMasked ( y Uint8x64 , z Mask8x64 ) Mask8x64
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 03:54:34 +00:00
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint16x8 ) GreaterEqualMasked ( y Uint16x8 , z Mask16x8 ) Mask16x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 03:54:34 +00:00
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint16x16 ) GreaterEqualMasked ( y Uint16x16 , z Mask16x16 ) Mask16x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 03:54:34 +00:00
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint16x32 ) GreaterEqualMasked ( y Uint16x32 , z Mask16x32 ) Mask16x32
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint32x4 ) GreaterEqualMasked ( y Uint32x4 , z Mask32x4 ) Mask32x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint32x8 ) GreaterEqualMasked ( y Uint32x8 , z Mask32x8 ) Mask32x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint32x16 ) GreaterEqualMasked ( y Uint32x16 , z Mask32x16 ) Mask32x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint64x2 ) GreaterEqualMasked ( y Uint64x2 , z Mask64x2 ) Mask64x2
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:24:24 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint64x4 ) GreaterEqualMasked ( y Uint64x4 , z Mask64x4 ) Mask64x4
2025-06-12 16:24:24 +00:00
2025-07-08 18:18:55 +00:00
// GreaterEqual compares for greater than or equal.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint64x8 ) GreaterEqualMasked ( y Uint64x8 , z Mask64x8 ) Mask64x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
/* GreaterMasked */
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Greater compares for greater than.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func ( x Float32x4 ) GreaterMasked ( y Float32x4 , z Mask32x4 ) Mask32x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Greater compares for greater than.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func ( x Float32x8 ) GreaterMasked ( y Float32x8 , z Mask32x8 ) Mask32x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Greater compares for greater than.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VCMPPS, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Float32x16 ) GreaterMasked ( y Float32x16 , z Mask32x16 ) Mask32x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Greater compares for greater than.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func ( x Float64x2 ) GreaterMasked ( y Float64x2 , z Mask64x2 ) Mask64x2
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Greater compares for greater than.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func ( x Float64x4 ) GreaterMasked ( y Float64x4 , z Mask64x4 ) Mask64x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Greater compares for greater than.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VCMPPD, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Float64x8 ) GreaterMasked ( y Float64x8 , z Mask64x8 ) Mask64x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Greater compares for greater than.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPB, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int8x16 ) GreaterMasked ( y Int8x16 , z Mask8x16 ) Mask8x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Greater compares for greater than.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPB, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int8x32 ) GreaterMasked ( y Int8x32 , z Mask8x32 ) Mask8x32
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Greater compares for greater than.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPB, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int8x64 ) GreaterMasked ( y Int8x64 , z Mask8x64 ) Mask8x64
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Greater compares for greater than.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPW, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int16x8 ) GreaterMasked ( y Int16x8 , z Mask16x8 ) Mask16x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Greater compares for greater than.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPW, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int16x16 ) GreaterMasked ( y Int16x16 , z Mask16x16 ) Mask16x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Greater compares for greater than.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPW, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int16x32 ) GreaterMasked ( y Int16x32 , z Mask16x32 ) Mask16x32
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Greater compares for greater than.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPD, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int32x4 ) GreaterMasked ( y Int32x4 , z Mask32x4 ) Mask32x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Greater compares for greater than.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPD, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int32x8 ) GreaterMasked ( y Int32x8 , z Mask32x8 ) Mask32x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Greater compares for greater than.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPD, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int32x16 ) GreaterMasked ( y Int32x16 , z Mask32x16 ) Mask32x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Greater compares for greater than.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int64x2 ) GreaterMasked ( y Int64x2 , z Mask64x2 ) Mask64x2
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Greater compares for greater than.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int64x4 ) GreaterMasked ( y Int64x4 , z Mask64x4 ) Mask64x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Greater compares for greater than.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int64x8 ) GreaterMasked ( y Int64x8 , z Mask64x8 ) Mask64x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Greater compares for greater than.
2025-06-12 16:24:24 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint8x16 ) GreaterMasked ( y Uint8x16 , z Mask8x16 ) Mask8x16
2025-06-12 16:24:24 +00:00
2025-07-08 18:18:55 +00:00
// Greater compares for greater than.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint8x32 ) GreaterMasked ( y Uint8x32 , z Mask8x32 ) Mask8x32
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Greater compares for greater than.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint8x64 ) GreaterMasked ( y Uint8x64 , z Mask8x64 ) Mask8x64
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Greater compares for greater than.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint16x8 ) GreaterMasked ( y Uint16x8 , z Mask16x8 ) Mask16x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Greater compares for greater than.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint16x16 ) GreaterMasked ( y Uint16x16 , z Mask16x16 ) Mask16x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Greater compares for greater than.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint16x32 ) GreaterMasked ( y Uint16x32 , z Mask16x32 ) Mask16x32
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Greater compares for greater than.
2025-06-12 03:54:34 +00:00
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint32x4 ) GreaterMasked ( y Uint32x4 , z Mask32x4 ) Mask32x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Greater compares for greater than.
2025-06-12 03:54:34 +00:00
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint32x8 ) GreaterMasked ( y Uint32x8 , z Mask32x8 ) Mask32x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Greater compares for greater than.
2025-06-12 03:54:34 +00:00
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint32x16 ) GreaterMasked ( y Uint32x16 , z Mask32x16 ) Mask32x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Greater compares for greater than.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint64x2 ) GreaterMasked ( y Uint64x2 , z Mask64x2 ) Mask64x2
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Greater compares for greater than.
2025-06-12 03:54:34 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint64x4 ) GreaterMasked ( y Uint64x4 , z Mask64x4 ) Mask64x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Greater compares for greater than.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint64x8 ) GreaterMasked ( y Uint64x8 , z Mask64x8 ) Mask64x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
/* IsNan */
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// IsNan checks if elements are NaN. Use as x.IsNan(x).
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPS, CPU Feature: AVX
func ( x Float32x4 ) IsNan ( y Float32x4 ) Mask32x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// IsNan checks if elements are NaN. Use as x.IsNan(x).
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPS, CPU Feature: AVX
func ( x Float32x8 ) IsNan ( y Float32x8 ) Mask32x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// IsNan checks if elements are NaN. Use as x.IsNan(x).
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) IsNan ( y Float32x16 ) Mask32x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// IsNan checks if elements are NaN. Use as x.IsNan(x).
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPD, CPU Feature: AVX
func ( x Float64x2 ) IsNan ( y Float64x2 ) Mask64x2
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// IsNan checks if elements are NaN. Use as x.IsNan(x).
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPD, CPU Feature: AVX
func ( x Float64x4 ) IsNan ( y Float64x4 ) Mask64x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// IsNan checks if elements are NaN. Use as x.IsNan(x).
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) IsNan ( y Float64x8 ) Mask64x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
/* IsNanMasked */
// IsNan checks if elements are NaN. Use as x.IsNan(x).
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func ( x Float32x4 ) IsNanMasked ( y Float32x4 , z Mask32x4 ) Mask32x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// IsNan checks if elements are NaN. Use as x.IsNan(x).
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func ( x Float32x8 ) IsNanMasked ( y Float32x8 , z Mask32x8 ) Mask32x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// IsNan checks if elements are NaN. Use as x.IsNan(x).
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) IsNanMasked ( y Float32x16 , z Mask32x16 ) Mask32x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// IsNan checks if elements are NaN. Use as x.IsNan(x).
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func ( x Float64x2 ) IsNanMasked ( y Float64x2 , z Mask64x2 ) Mask64x2
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// IsNan checks if elements are NaN. Use as x.IsNan(x).
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func ( x Float64x4 ) IsNanMasked ( y Float64x4 , z Mask64x4 ) Mask64x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// IsNan checks if elements are NaN. Use as x.IsNan(x).
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) IsNanMasked ( y Float64x8 , z Mask64x8 ) Mask64x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
/* Less */
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPS, CPU Feature: AVX
func ( x Float32x4 ) Less ( y Float32x4 ) Mask32x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPS, CPU Feature: AVX
func ( x Float32x8 ) Less ( y Float32x8 ) Mask32x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) Less ( y Float32x16 ) Mask32x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPD, CPU Feature: AVX
func ( x Float64x2 ) Less ( y Float64x2 ) Mask64x2
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPD, CPU Feature: AVX
func ( x Float64x4 ) Less ( y Float64x4 ) Mask64x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) Less ( y Float64x8 ) Mask64x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func ( x Int8x16 ) Less ( y Int8x16 ) Mask8x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func ( x Int8x32 ) Less ( y Int8x32 ) Mask8x32
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func ( x Int8x64 ) Less ( y Int8x64 ) Mask8x64
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) Less ( y Int16x8 ) Mask16x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) Less ( y Int16x16 ) Mask16x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) Less ( y Int16x32 ) Mask16x32
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) Less ( y Int32x4 ) Mask32x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) Less ( y Int32x8 ) Mask32x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) Less ( y Int32x16 ) Mask32x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) Less ( y Int64x2 ) Mask64x2
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) Less ( y Int64x4 ) Mask64x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) Less ( y Int64x8 ) Mask64x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func ( x Uint8x16 ) Less ( y Uint8x16 ) Mask8x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func ( x Uint8x32 ) Less ( y Uint8x32 ) Mask8x32
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func ( x Uint8x64 ) Less ( y Uint8x64 ) Mask8x64
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) Less ( y Uint16x8 ) Mask16x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) Less ( y Uint16x16 ) Mask16x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) Less ( y Uint16x32 ) Mask16x32
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) Less ( y Uint32x4 ) Mask32x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) Less ( y Uint32x8 ) Mask32x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) Less ( y Uint32x16 ) Mask32x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) Less ( y Uint64x2 ) Mask64x2
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) Less ( y Uint64x4 ) Mask64x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) Less ( y Uint64x8 ) Mask64x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
/* LessEqual */
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPS, CPU Feature: AVX
func ( x Float32x4 ) LessEqual ( y Float32x4 ) Mask32x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPS, CPU Feature: AVX
func ( x Float32x8 ) LessEqual ( y Float32x8 ) Mask32x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) LessEqual ( y Float32x16 ) Mask32x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPD, CPU Feature: AVX
func ( x Float64x2 ) LessEqual ( y Float64x2 ) Mask64x2
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPD, CPU Feature: AVX
func ( x Float64x4 ) LessEqual ( y Float64x4 ) Mask64x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) LessEqual ( y Float64x8 ) Mask64x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func ( x Int8x16 ) LessEqual ( y Int8x16 ) Mask8x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func ( x Int8x32 ) LessEqual ( y Int8x32 ) Mask8x32
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func ( x Int8x64 ) LessEqual ( y Int8x64 ) Mask8x64
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) LessEqual ( y Int16x8 ) Mask16x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) LessEqual ( y Int16x16 ) Mask16x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) LessEqual ( y Int16x32 ) Mask16x32
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) LessEqual ( y Int32x4 ) Mask32x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) LessEqual ( y Int32x8 ) Mask32x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) LessEqual ( y Int32x16 ) Mask32x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) LessEqual ( y Int64x2 ) Mask64x2
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) LessEqual ( y Int64x4 ) Mask64x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) LessEqual ( y Int64x8 ) Mask64x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func ( x Uint8x16 ) LessEqual ( y Uint8x16 ) Mask8x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func ( x Uint8x32 ) LessEqual ( y Uint8x32 ) Mask8x32
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func ( x Uint8x64 ) LessEqual ( y Uint8x64 ) Mask8x64
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) LessEqual ( y Uint16x8 ) Mask16x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) LessEqual ( y Uint16x16 ) Mask16x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) LessEqual ( y Uint16x32 ) Mask16x32
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) LessEqual ( y Uint32x4 ) Mask32x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) LessEqual ( y Uint32x8 ) Mask32x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) LessEqual ( y Uint32x16 ) Mask32x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) LessEqual ( y Uint64x2 ) Mask64x2
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) LessEqual ( y Uint64x4 ) Mask64x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) LessEqual ( y Uint64x8 ) Mask64x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
/* LessEqualMasked */
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func ( x Float32x4 ) LessEqualMasked ( y Float32x4 , z Mask32x4 ) Mask32x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func ( x Float32x8 ) LessEqualMasked ( y Float32x8 , z Mask32x8 ) Mask32x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) LessEqualMasked ( y Float32x16 , z Mask32x16 ) Mask32x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func ( x Float64x2 ) LessEqualMasked ( y Float64x2 , z Mask64x2 ) Mask64x2
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func ( x Float64x4 ) LessEqualMasked ( y Float64x4 , z Mask64x4 ) Mask64x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) LessEqualMasked ( y Float64x8 , z Mask64x8 ) Mask64x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func ( x Int8x16 ) LessEqualMasked ( y Int8x16 , z Mask8x16 ) Mask8x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func ( x Int8x32 ) LessEqualMasked ( y Int8x32 , z Mask8x32 ) Mask8x32
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func ( x Int8x64 ) LessEqualMasked ( y Int8x64 , z Mask8x64 ) Mask8x64
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) LessEqualMasked ( y Int16x8 , z Mask16x8 ) Mask16x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) LessEqualMasked ( y Int16x16 , z Mask16x16 ) Mask16x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) LessEqualMasked ( y Int16x32 , z Mask16x32 ) Mask16x32
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) LessEqualMasked ( y Int32x4 , z Mask32x4 ) Mask32x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) LessEqualMasked ( y Int32x8 , z Mask32x8 ) Mask32x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) LessEqualMasked ( y Int32x16 , z Mask32x16 ) Mask32x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) LessEqualMasked ( y Int64x2 , z Mask64x2 ) Mask64x2
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) LessEqualMasked ( y Int64x4 , z Mask64x4 ) Mask64x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) LessEqualMasked ( y Int64x8 , z Mask64x8 ) Mask64x8
2025-06-12 16:24:24 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func ( x Uint8x16 ) LessEqualMasked ( y Uint8x16 , z Mask8x16 ) Mask8x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func ( x Uint8x32 ) LessEqualMasked ( y Uint8x32 , z Mask8x32 ) Mask8x32
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func ( x Uint8x64 ) LessEqualMasked ( y Uint8x64 , z Mask8x64 ) Mask8x64
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) LessEqualMasked ( y Uint16x8 , z Mask16x8 ) Mask16x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) LessEqualMasked ( y Uint16x16 , z Mask16x16 ) Mask16x16
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 03:54:34 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) LessEqualMasked ( y Uint16x32 , z Mask16x32 ) Mask16x32
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) LessEqualMasked ( y Uint32x4 , z Mask32x4 ) Mask32x4
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) LessEqualMasked ( y Uint32x8 , z Mask32x8 ) Mask32x8
2025-05-22 19:59:12 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:45:00 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) LessEqualMasked ( y Uint32x16 , z Mask32x16 ) Mask32x16
2025-06-12 16:45:00 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:45:00 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) LessEqualMasked ( y Uint64x2 , z Mask64x2 ) Mask64x2
2025-06-12 16:45:00 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:45:00 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) LessEqualMasked ( y Uint64x4 , z Mask64x4 ) Mask64x4
2025-06-12 16:45:00 +00:00
2025-07-08 18:18:55 +00:00
// LessEqual compares for less than or equal.
2025-06-12 16:45:00 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) LessEqualMasked ( y Uint64x8 , z Mask64x8 ) Mask64x8
2025-06-12 16:45:00 +00:00
2025-07-08 18:18:55 +00:00
/* LessMasked */
2025-06-12 16:45:00 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 16:45:00 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VCMPPS, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Float32x4 ) LessMasked ( y Float32x4 , z Mask32x4 ) Mask32x4
2025-06-12 16:45:00 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 16:45:00 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VCMPPS, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Float32x8 ) LessMasked ( y Float32x8 , z Mask32x8 ) Mask32x8
2025-06-12 16:45:00 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 16:45:00 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VCMPPS, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Float32x16 ) LessMasked ( y Float32x16 , z Mask32x16 ) Mask32x16
2025-06-12 16:45:00 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 16:45:00 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VCMPPD, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Float64x2 ) LessMasked ( y Float64x2 , z Mask64x2 ) Mask64x2
2025-06-12 16:45:00 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 16:45:00 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VCMPPD, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Float64x4 ) LessMasked ( y Float64x4 , z Mask64x4 ) Mask64x4
2025-06-12 16:45:00 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 16:45:00 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VCMPPD, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Float64x8 ) LessMasked ( y Float64x8 , z Mask64x8 ) Mask64x8
2025-06-12 16:45:00 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func ( x Int8x16 ) LessMasked ( y Int8x16 , z Mask8x16 ) Mask8x16
2025-06-12 16:21:35 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func ( x Int8x32 ) LessMasked ( y Int8x32 , z Mask8x32 ) Mask8x32
2025-06-12 16:21:35 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func ( x Int8x64 ) LessMasked ( y Int8x64 , z Mask8x64 ) Mask8x64
2025-06-12 16:21:35 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) LessMasked ( y Int16x8 , z Mask16x8 ) Mask16x8
2025-06-12 16:21:35 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) LessMasked ( y Int16x16 , z Mask16x16 ) Mask16x16
2025-06-16 20:11:27 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-16 20:11:27 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) LessMasked ( y Int16x32 , z Mask16x32 ) Mask16x32
2025-06-16 20:11:27 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-16 20:11:27 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) LessMasked ( y Int32x4 , z Mask32x4 ) Mask32x4
2025-06-16 20:11:27 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-16 20:11:27 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) LessMasked ( y Int32x8 , z Mask32x8 ) Mask32x8
2025-06-16 20:11:27 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) LessMasked ( y Int32x16 , z Mask32x16 ) Mask32x16
2025-06-16 20:11:27 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-16 20:11:27 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) LessMasked ( y Int64x2 , z Mask64x2 ) Mask64x2
2025-06-16 20:11:27 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-16 20:11:27 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) LessMasked ( y Int64x4 , z Mask64x4 ) Mask64x4
2025-06-16 20:11:27 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-16 20:11:27 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) LessMasked ( y Int64x8 , z Mask64x8 ) Mask64x8
2025-06-16 20:11:27 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-16 20:11:27 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func ( x Uint8x16 ) LessMasked ( y Uint8x16 , z Mask8x16 ) Mask8x16
2025-06-16 20:11:27 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-16 20:11:27 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func ( x Uint8x32 ) LessMasked ( y Uint8x32 , z Mask8x32 ) Mask8x32
2025-06-16 20:11:27 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-16 20:11:27 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func ( x Uint8x64 ) LessMasked ( y Uint8x64 , z Mask8x64 ) Mask8x64
2025-06-16 20:11:27 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) LessMasked ( y Uint16x8 , z Mask16x8 ) Mask16x8
2025-06-16 20:11:27 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-16 20:11:27 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) LessMasked ( y Uint16x16 , z Mask16x16 ) Mask16x16
2025-06-16 20:11:27 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-16 20:11:27 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) LessMasked ( y Uint16x32 , z Mask16x32 ) Mask16x32
2025-06-16 20:11:27 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-16 20:11:27 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) LessMasked ( y Uint32x4 , z Mask32x4 ) Mask32x4
2025-06-16 20:11:27 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-16 20:11:27 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) LessMasked ( y Uint32x8 , z Mask32x8 ) Mask32x8
2025-06-16 20:11:27 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-16 20:11:27 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) LessMasked ( y Uint32x16 , z Mask32x16 ) Mask32x16
2025-06-16 20:11:27 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-16 20:11:27 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) LessMasked ( y Uint64x2 , z Mask64x2 ) Mask64x2
2025-06-16 20:11:27 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-16 20:11:27 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) LessMasked ( y Uint64x4 , z Mask64x4 ) Mask64x4
2025-06-16 20:11:27 +00:00
2025-07-08 18:18:55 +00:00
// Less compares for less than.
2025-06-16 20:11:27 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) LessMasked ( y Uint64x8 , z Mask64x8 ) Mask64x8
2025-06-16 20:11:27 +00:00
2025-07-08 18:18:55 +00:00
/* Max */
// Max computes the maximum of corresponding elements.
2025-06-16 20:11:27 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VMAXPS, CPU Feature: AVX
func ( x Float32x4 ) Max ( y Float32x4 ) Float32x4
2025-06-16 20:11:27 +00:00
2025-07-08 18:18:55 +00:00
// Max computes the maximum of corresponding elements.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VMAXPS, CPU Feature: AVX
func ( x Float32x8 ) Max ( y Float32x8 ) Float32x8
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Max computes the maximum of corresponding elements.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VMAXPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) Max ( y Float32x16 ) Float32x16
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Max computes the maximum of corresponding elements.
//
// Asm: VMAXPD, CPU Feature: AVX
func ( x Float64x2 ) Max ( y Float64x2 ) Float64x2
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Max computes the maximum of corresponding elements.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VMAXPD, CPU Feature: AVX
func ( x Float64x4 ) Max ( y Float64x4 ) Float64x4
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Max computes the maximum of corresponding elements.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VMAXPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) Max ( y Float64x8 ) Float64x8
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Max computes the maximum of corresponding elements.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXSB, CPU Feature: AVX
func ( x Int8x16 ) Max ( y Int8x16 ) Int8x16
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Max computes the maximum of corresponding elements.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXSB, CPU Feature: AVX2
func ( x Int8x32 ) Max ( y Int8x32 ) Int8x32
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Max computes the maximum of corresponding elements.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXSB, CPU Feature: AVX512EVEX
func ( x Int8x64 ) Max ( y Int8x64 ) Int8x64
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Max computes the maximum of corresponding elements.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXSW, CPU Feature: AVX
func ( x Int16x8 ) Max ( y Int16x8 ) Int16x8
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Max computes the maximum of corresponding elements.
//
// Asm: VPMAXSW, CPU Feature: AVX2
func ( x Int16x16 ) Max ( y Int16x16 ) Int16x16
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Max computes the maximum of corresponding elements.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXSW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) Max ( y Int16x32 ) Int16x32
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Max computes the maximum of corresponding elements.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXSD, CPU Feature: AVX
func ( x Int32x4 ) Max ( y Int32x4 ) Int32x4
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Max computes the maximum of corresponding elements.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXSD, CPU Feature: AVX2
func ( x Int32x8 ) Max ( y Int32x8 ) Int32x8
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Max computes the maximum of corresponding elements.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXSD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) Max ( y Int32x16 ) Int32x16
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Max computes the maximum of corresponding elements.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXSQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) Max ( y Int64x2 ) Int64x2
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Max computes the maximum of corresponding elements.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXSQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) Max ( y Int64x4 ) Int64x4
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Max computes the maximum of corresponding elements.
//
// Asm: VPMAXSQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) Max ( y Int64x8 ) Int64x8
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Max computes the maximum of corresponding elements.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXUB, CPU Feature: AVX
func ( x Uint8x16 ) Max ( y Uint8x16 ) Uint8x16
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Max computes the maximum of corresponding elements.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXUB, CPU Feature: AVX2
func ( x Uint8x32 ) Max ( y Uint8x32 ) Uint8x32
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Max computes the maximum of corresponding elements.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXUB, CPU Feature: AVX512EVEX
func ( x Uint8x64 ) Max ( y Uint8x64 ) Uint8x64
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Max computes the maximum of corresponding elements.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXUW, CPU Feature: AVX
func ( x Uint16x8 ) Max ( y Uint16x8 ) Uint16x8
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Max computes the maximum of corresponding elements.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXUW, CPU Feature: AVX2
func ( x Uint16x16 ) Max ( y Uint16x16 ) Uint16x16
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Max computes the maximum of corresponding elements.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXUW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) Max ( y Uint16x32 ) Uint16x32
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Max computes the maximum of corresponding elements.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXUD, CPU Feature: AVX
func ( x Uint32x4 ) Max ( y Uint32x4 ) Uint32x4
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Max computes the maximum of corresponding elements.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXUD, CPU Feature: AVX2
func ( x Uint32x8 ) Max ( y Uint32x8 ) Uint32x8
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Max computes the maximum of corresponding elements.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXUD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) Max ( y Uint32x16 ) Uint32x16
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Max computes the maximum of corresponding elements.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXUQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) Max ( y Uint64x2 ) Uint64x2
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Max computes the maximum of corresponding elements.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXUQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) Max ( y Uint64x4 ) Uint64x4
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Max computes the maximum of corresponding elements.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXUQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) Max ( y Uint64x8 ) Uint64x8
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
/* MaxMasked */
2025-06-24 15:21:29 +00:00
// Max computes the maximum of corresponding elements.
//
2025-07-08 18:18:55 +00:00
// Asm: VMAXPS, CPU Feature: AVX512EVEX
func ( x Float32x4 ) MaxMasked ( y Float32x4 , z Mask32x4 ) Float32x4
2025-06-24 15:21:29 +00:00
// Max computes the maximum of corresponding elements.
//
2025-07-08 18:18:55 +00:00
// Asm: VMAXPS, CPU Feature: AVX512EVEX
func ( x Float32x8 ) MaxMasked ( y Float32x8 , z Mask32x8 ) Float32x8
2025-06-24 15:21:29 +00:00
// Max computes the maximum of corresponding elements.
//
// Asm: VMAXPS, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Float32x16 ) MaxMasked ( y Float32x16 , z Mask32x16 ) Float32x16
2025-06-24 15:21:29 +00:00
// Max computes the maximum of corresponding elements.
//
2025-07-08 18:18:55 +00:00
// Asm: VMAXPD, CPU Feature: AVX512EVEX
func ( x Float64x2 ) MaxMasked ( y Float64x2 , z Mask64x2 ) Float64x2
2025-06-24 15:21:29 +00:00
// Max computes the maximum of corresponding elements.
//
2025-07-08 18:18:55 +00:00
// Asm: VMAXPD, CPU Feature: AVX512EVEX
func ( x Float64x4 ) MaxMasked ( y Float64x4 , z Mask64x4 ) Float64x4
2025-06-24 15:21:29 +00:00
// Max computes the maximum of corresponding elements.
//
// Asm: VMAXPD, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Float64x8 ) MaxMasked ( y Float64x8 , z Mask64x8 ) Float64x8
2025-06-24 15:21:29 +00:00
// Max computes the maximum of corresponding elements.
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXSB, CPU Feature: AVX512EVEX
func ( x Int8x16 ) MaxMasked ( y Int8x16 , z Mask8x16 ) Int8x16
2025-06-24 15:21:29 +00:00
// Max computes the maximum of corresponding elements.
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXSB, CPU Feature: AVX512EVEX
func ( x Int8x32 ) MaxMasked ( y Int8x32 , z Mask8x32 ) Int8x32
2025-06-24 15:21:29 +00:00
// Max computes the maximum of corresponding elements.
//
// Asm: VPMAXSB, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int8x64 ) MaxMasked ( y Int8x64 , z Mask8x64 ) Int8x64
2025-06-24 15:21:29 +00:00
// Max computes the maximum of corresponding elements.
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXSW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) MaxMasked ( y Int16x8 , z Mask16x8 ) Int16x8
2025-06-24 15:21:29 +00:00
// Max computes the maximum of corresponding elements.
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXSW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) MaxMasked ( y Int16x16 , z Mask16x16 ) Int16x16
2025-06-24 15:21:29 +00:00
// Max computes the maximum of corresponding elements.
//
// Asm: VPMAXSW, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int16x32 ) MaxMasked ( y Int16x32 , z Mask16x32 ) Int16x32
2025-06-24 15:21:29 +00:00
// Max computes the maximum of corresponding elements.
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXSD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) MaxMasked ( y Int32x4 , z Mask32x4 ) Int32x4
2025-06-24 15:21:29 +00:00
// Max computes the maximum of corresponding elements.
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXSD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) MaxMasked ( y Int32x8 , z Mask32x8 ) Int32x8
2025-06-24 15:21:29 +00:00
// Max computes the maximum of corresponding elements.
//
// Asm: VPMAXSD, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int32x16 ) MaxMasked ( y Int32x16 , z Mask32x16 ) Int32x16
2025-06-24 15:21:29 +00:00
// Max computes the maximum of corresponding elements.
//
// Asm: VPMAXSQ, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int64x2 ) MaxMasked ( y Int64x2 , z Mask64x2 ) Int64x2
2025-06-24 15:21:29 +00:00
// Max computes the maximum of corresponding elements.
//
// Asm: VPMAXSQ, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int64x4 ) MaxMasked ( y Int64x4 , z Mask64x4 ) Int64x4
2025-06-24 15:21:29 +00:00
// Max computes the maximum of corresponding elements.
//
// Asm: VPMAXSQ, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int64x8 ) MaxMasked ( y Int64x8 , z Mask64x8 ) Int64x8
2025-06-24 15:21:29 +00:00
// Max computes the maximum of corresponding elements.
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXUB, CPU Feature: AVX512EVEX
func ( x Uint8x16 ) MaxMasked ( y Uint8x16 , z Mask8x16 ) Uint8x16
2025-06-24 15:21:29 +00:00
// Max computes the maximum of corresponding elements.
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXUB, CPU Feature: AVX512EVEX
func ( x Uint8x32 ) MaxMasked ( y Uint8x32 , z Mask8x32 ) Uint8x32
2025-06-24 15:21:29 +00:00
// Max computes the maximum of corresponding elements.
//
// Asm: VPMAXUB, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint8x64 ) MaxMasked ( y Uint8x64 , z Mask8x64 ) Uint8x64
2025-06-24 15:21:29 +00:00
// Max computes the maximum of corresponding elements.
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXUW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) MaxMasked ( y Uint16x8 , z Mask16x8 ) Uint16x8
2025-06-24 15:21:29 +00:00
// Max computes the maximum of corresponding elements.
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXUW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) MaxMasked ( y Uint16x16 , z Mask16x16 ) Uint16x16
2025-06-24 15:21:29 +00:00
// Max computes the maximum of corresponding elements.
//
// Asm: VPMAXUW, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint16x32 ) MaxMasked ( y Uint16x32 , z Mask16x32 ) Uint16x32
2025-06-24 15:21:29 +00:00
// Max computes the maximum of corresponding elements.
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXUD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) MaxMasked ( y Uint32x4 , z Mask32x4 ) Uint32x4
2025-06-24 15:21:29 +00:00
// Max computes the maximum of corresponding elements.
//
2025-07-08 18:18:55 +00:00
// Asm: VPMAXUD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) MaxMasked ( y Uint32x8 , z Mask32x8 ) Uint32x8
2025-06-24 15:21:29 +00:00
// Max computes the maximum of corresponding elements.
//
// Asm: VPMAXUD, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint32x16 ) MaxMasked ( y Uint32x16 , z Mask32x16 ) Uint32x16
2025-06-24 15:21:29 +00:00
// Max computes the maximum of corresponding elements.
//
// Asm: VPMAXUQ, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint64x2 ) MaxMasked ( y Uint64x2 , z Mask64x2 ) Uint64x2
2025-06-24 15:21:29 +00:00
// Max computes the maximum of corresponding elements.
//
// Asm: VPMAXUQ, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint64x4 ) MaxMasked ( y Uint64x4 , z Mask64x4 ) Uint64x4
2025-06-24 15:21:29 +00:00
// Max computes the maximum of corresponding elements.
//
// Asm: VPMAXUQ, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint64x8 ) MaxMasked ( y Uint64x8 , z Mask64x8 ) Uint64x8
2025-06-24 15:21:29 +00:00
/* Min */
// Min computes the minimum of corresponding elements.
//
// Asm: VMINPS, CPU Feature: AVX
func ( x Float32x4 ) Min ( y Float32x4 ) Float32x4
// Min computes the minimum of corresponding elements.
//
// Asm: VMINPS, CPU Feature: AVX
func ( x Float32x8 ) Min ( y Float32x8 ) Float32x8
// Min computes the minimum of corresponding elements.
//
// Asm: VMINPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) Min ( y Float32x16 ) Float32x16
// Min computes the minimum of corresponding elements.
//
// Asm: VMINPD, CPU Feature: AVX
func ( x Float64x2 ) Min ( y Float64x2 ) Float64x2
// Min computes the minimum of corresponding elements.
//
// Asm: VMINPD, CPU Feature: AVX
func ( x Float64x4 ) Min ( y Float64x4 ) Float64x4
// Min computes the minimum of corresponding elements.
//
// Asm: VMINPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) Min ( y Float64x8 ) Float64x8
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINSB, CPU Feature: AVX
func ( x Int8x16 ) Min ( y Int8x16 ) Int8x16
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINSB, CPU Feature: AVX2
func ( x Int8x32 ) Min ( y Int8x32 ) Int8x32
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINSB, CPU Feature: AVX512EVEX
func ( x Int8x64 ) Min ( y Int8x64 ) Int8x64
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINSW, CPU Feature: AVX
func ( x Int16x8 ) Min ( y Int16x8 ) Int16x8
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINSW, CPU Feature: AVX2
func ( x Int16x16 ) Min ( y Int16x16 ) Int16x16
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINSW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) Min ( y Int16x32 ) Int16x32
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINSD, CPU Feature: AVX
func ( x Int32x4 ) Min ( y Int32x4 ) Int32x4
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINSD, CPU Feature: AVX2
func ( x Int32x8 ) Min ( y Int32x8 ) Int32x8
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINSD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) Min ( y Int32x16 ) Int32x16
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINSQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) Min ( y Int64x2 ) Int64x2
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINSQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) Min ( y Int64x4 ) Int64x4
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINSQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) Min ( y Int64x8 ) Int64x8
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINUB, CPU Feature: AVX
func ( x Uint8x16 ) Min ( y Uint8x16 ) Uint8x16
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINUB, CPU Feature: AVX2
func ( x Uint8x32 ) Min ( y Uint8x32 ) Uint8x32
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINUB, CPU Feature: AVX512EVEX
func ( x Uint8x64 ) Min ( y Uint8x64 ) Uint8x64
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINUW, CPU Feature: AVX
func ( x Uint16x8 ) Min ( y Uint16x8 ) Uint16x8
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINUW, CPU Feature: AVX2
func ( x Uint16x16 ) Min ( y Uint16x16 ) Uint16x16
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINUW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) Min ( y Uint16x32 ) Uint16x32
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINUD, CPU Feature: AVX
func ( x Uint32x4 ) Min ( y Uint32x4 ) Uint32x4
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINUD, CPU Feature: AVX2
func ( x Uint32x8 ) Min ( y Uint32x8 ) Uint32x8
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINUD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) Min ( y Uint32x16 ) Uint32x16
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINUQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) Min ( y Uint64x2 ) Uint64x2
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINUQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) Min ( y Uint64x4 ) Uint64x4
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINUQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) Min ( y Uint64x8 ) Uint64x8
2025-07-08 18:18:55 +00:00
/* MinMasked */
// Min computes the minimum of corresponding elements.
//
// Asm: VMINPS, CPU Feature: AVX512EVEX
func ( x Float32x4 ) MinMasked ( y Float32x4 , z Mask32x4 ) Float32x4
// Min computes the minimum of corresponding elements.
//
// Asm: VMINPS, CPU Feature: AVX512EVEX
func ( x Float32x8 ) MinMasked ( y Float32x8 , z Mask32x8 ) Float32x8
// Min computes the minimum of corresponding elements.
//
// Asm: VMINPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) MinMasked ( y Float32x16 , z Mask32x16 ) Float32x16
// Min computes the minimum of corresponding elements.
//
// Asm: VMINPD, CPU Feature: AVX512EVEX
func ( x Float64x2 ) MinMasked ( y Float64x2 , z Mask64x2 ) Float64x2
// Min computes the minimum of corresponding elements.
//
// Asm: VMINPD, CPU Feature: AVX512EVEX
func ( x Float64x4 ) MinMasked ( y Float64x4 , z Mask64x4 ) Float64x4
// Min computes the minimum of corresponding elements.
//
// Asm: VMINPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) MinMasked ( y Float64x8 , z Mask64x8 ) Float64x8
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINSB, CPU Feature: AVX512EVEX
func ( x Int8x16 ) MinMasked ( y Int8x16 , z Mask8x16 ) Int8x16
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINSB, CPU Feature: AVX512EVEX
func ( x Int8x32 ) MinMasked ( y Int8x32 , z Mask8x32 ) Int8x32
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINSB, CPU Feature: AVX512EVEX
func ( x Int8x64 ) MinMasked ( y Int8x64 , z Mask8x64 ) Int8x64
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINSW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) MinMasked ( y Int16x8 , z Mask16x8 ) Int16x8
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINSW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) MinMasked ( y Int16x16 , z Mask16x16 ) Int16x16
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINSW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) MinMasked ( y Int16x32 , z Mask16x32 ) Int16x32
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINSD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) MinMasked ( y Int32x4 , z Mask32x4 ) Int32x4
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINSD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) MinMasked ( y Int32x8 , z Mask32x8 ) Int32x8
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINSD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) MinMasked ( y Int32x16 , z Mask32x16 ) Int32x16
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINSQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) MinMasked ( y Int64x2 , z Mask64x2 ) Int64x2
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINSQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) MinMasked ( y Int64x4 , z Mask64x4 ) Int64x4
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINSQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) MinMasked ( y Int64x8 , z Mask64x8 ) Int64x8
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINUB, CPU Feature: AVX512EVEX
func ( x Uint8x16 ) MinMasked ( y Uint8x16 , z Mask8x16 ) Uint8x16
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINUB, CPU Feature: AVX512EVEX
func ( x Uint8x32 ) MinMasked ( y Uint8x32 , z Mask8x32 ) Uint8x32
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINUB, CPU Feature: AVX512EVEX
func ( x Uint8x64 ) MinMasked ( y Uint8x64 , z Mask8x64 ) Uint8x64
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINUW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) MinMasked ( y Uint16x8 , z Mask16x8 ) Uint16x8
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINUW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) MinMasked ( y Uint16x16 , z Mask16x16 ) Uint16x16
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINUW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) MinMasked ( y Uint16x32 , z Mask16x32 ) Uint16x32
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINUD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) MinMasked ( y Uint32x4 , z Mask32x4 ) Uint32x4
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINUD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) MinMasked ( y Uint32x8 , z Mask32x8 ) Uint32x8
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINUD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) MinMasked ( y Uint32x16 , z Mask32x16 ) Uint32x16
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINUQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) MinMasked ( y Uint64x2 , z Mask64x2 ) Uint64x2
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINUQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) MinMasked ( y Uint64x4 , z Mask64x4 ) Uint64x4
// Min computes the minimum of corresponding elements.
//
// Asm: VPMINUQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) MinMasked ( y Uint64x8 , z Mask64x8 ) Uint64x8
2025-06-24 15:21:29 +00:00
/* Mul */
// Mul multiplies corresponding elements of two vectors.
//
// Asm: VMULPS, CPU Feature: AVX
func ( x Float32x4 ) Mul ( y Float32x4 ) Float32x4
// Mul multiplies corresponding elements of two vectors.
//
// Asm: VMULPS, CPU Feature: AVX
func ( x Float32x8 ) Mul ( y Float32x8 ) Float32x8
// Mul multiplies corresponding elements of two vectors, masked.
//
// Asm: VMULPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) Mul ( y Float32x16 ) Float32x16
// Mul multiplies corresponding elements of two vectors.
//
// Asm: VMULPD, CPU Feature: AVX
func ( x Float64x2 ) Mul ( y Float64x2 ) Float64x2
// Mul multiplies corresponding elements of two vectors.
//
// Asm: VMULPD, CPU Feature: AVX
func ( x Float64x4 ) Mul ( y Float64x4 ) Float64x4
// Mul multiplies corresponding elements of two vectors, masked.
//
// Asm: VMULPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) Mul ( y Float64x8 ) Float64x8
/* MulByPowOf2 */
// MulByPowOf2 multiplies elements by a power of 2.
//
// Asm: VSCALEFPS, CPU Feature: AVX512EVEX
func ( x Float32x4 ) MulByPowOf2 ( y Float32x4 ) Float32x4
// MulByPowOf2 multiplies elements by a power of 2.
//
// Asm: VSCALEFPS, CPU Feature: AVX512EVEX
func ( x Float32x8 ) MulByPowOf2 ( y Float32x8 ) Float32x8
// MulByPowOf2 multiplies elements by a power of 2.
//
// Asm: VSCALEFPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) MulByPowOf2 ( y Float32x16 ) Float32x16
// MulByPowOf2 multiplies elements by a power of 2.
//
// Asm: VSCALEFPD, CPU Feature: AVX512EVEX
func ( x Float64x2 ) MulByPowOf2 ( y Float64x2 ) Float64x2
// MulByPowOf2 multiplies elements by a power of 2.
//
// Asm: VSCALEFPD, CPU Feature: AVX512EVEX
func ( x Float64x4 ) MulByPowOf2 ( y Float64x4 ) Float64x4
// MulByPowOf2 multiplies elements by a power of 2.
//
// Asm: VSCALEFPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) MulByPowOf2 ( y Float64x8 ) Float64x8
2025-07-08 18:18:55 +00:00
/* MulByPowOf2Masked */
// MulByPowOf2 multiplies elements by a power of 2.
//
// Asm: VSCALEFPS, CPU Feature: AVX512EVEX
func ( x Float32x4 ) MulByPowOf2Masked ( y Float32x4 , z Mask32x4 ) Float32x4
// MulByPowOf2 multiplies elements by a power of 2.
//
// Asm: VSCALEFPS, CPU Feature: AVX512EVEX
func ( x Float32x8 ) MulByPowOf2Masked ( y Float32x8 , z Mask32x8 ) Float32x8
// MulByPowOf2 multiplies elements by a power of 2.
//
// Asm: VSCALEFPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) MulByPowOf2Masked ( y Float32x16 , z Mask32x16 ) Float32x16
// MulByPowOf2 multiplies elements by a power of 2.
//
// Asm: VSCALEFPD, CPU Feature: AVX512EVEX
func ( x Float64x2 ) MulByPowOf2Masked ( y Float64x2 , z Mask64x2 ) Float64x2
// MulByPowOf2 multiplies elements by a power of 2.
//
// Asm: VSCALEFPD, CPU Feature: AVX512EVEX
func ( x Float64x4 ) MulByPowOf2Masked ( y Float64x4 , z Mask64x4 ) Float64x4
// MulByPowOf2 multiplies elements by a power of 2.
//
// Asm: VSCALEFPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) MulByPowOf2Masked ( y Float64x8 , z Mask64x8 ) Float64x8
2025-06-24 15:21:29 +00:00
/* MulEvenWiden */
// MulEvenWiden multiplies even-indexed elements, widening the result.
// Result[i] = v1.Even[i] * v2.Even[i].
//
// Asm: VPMULDQ, CPU Feature: AVX
func ( x Int32x4 ) MulEvenWiden ( y Int32x4 ) Int64x2
// MulEvenWiden multiplies even-indexed elements, widening the result.
// Result[i] = v1.Even[i] * v2.Even[i].
//
// Asm: VPMULDQ, CPU Feature: AVX2
func ( x Int32x8 ) MulEvenWiden ( y Int32x8 ) Int64x4
// MulEvenWiden multiplies even-indexed elements, widening the result, masked.
// Result[i] = v1.Even[i] * v2.Even[i].
//
// Asm: VPMULDQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) MulEvenWiden ( y Int64x2 ) Int64x2
// MulEvenWiden multiplies even-indexed elements, widening the result, masked.
// Result[i] = v1.Even[i] * v2.Even[i].
//
// Asm: VPMULDQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) MulEvenWiden ( y Int64x4 ) Int64x4
// MulEvenWiden multiplies even-indexed elements, widening the result, masked.
// Result[i] = v1.Even[i] * v2.Even[i].
//
// Asm: VPMULDQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) MulEvenWiden ( y Int64x8 ) Int64x8
// MulEvenWiden multiplies even-indexed elements, widening the result.
// Result[i] = v1.Even[i] * v2.Even[i].
//
// Asm: VPMULUDQ, CPU Feature: AVX
func ( x Uint32x4 ) MulEvenWiden ( y Uint32x4 ) Uint64x2
// MulEvenWiden multiplies even-indexed elements, widening the result.
// Result[i] = v1.Even[i] * v2.Even[i].
//
// Asm: VPMULUDQ, CPU Feature: AVX2
func ( x Uint32x8 ) MulEvenWiden ( y Uint32x8 ) Uint64x4
// MulEvenWiden multiplies even-indexed elements, widening the result, masked.
// Result[i] = v1.Even[i] * v2.Even[i].
//
// Asm: VPMULUDQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) MulEvenWiden ( y Uint64x2 ) Uint64x2
// MulEvenWiden multiplies even-indexed elements, widening the result, masked.
// Result[i] = v1.Even[i] * v2.Even[i].
//
// Asm: VPMULUDQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) MulEvenWiden ( y Uint64x4 ) Uint64x4
// MulEvenWiden multiplies even-indexed elements, widening the result, masked.
// Result[i] = v1.Even[i] * v2.Even[i].
//
// Asm: VPMULUDQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) MulEvenWiden ( y Uint64x8 ) Uint64x8
2025-07-08 18:18:55 +00:00
/* MulEvenWidenMasked */
// MulEvenWiden multiplies even-indexed elements, widening the result, masked.
// Result[i] = v1.Even[i] * v2.Even[i].
//
// Asm: VPMULDQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) MulEvenWidenMasked ( y Int64x2 , z Mask64x2 ) Int64x2
// MulEvenWiden multiplies even-indexed elements, widening the result, masked.
// Result[i] = v1.Even[i] * v2.Even[i].
//
// Asm: VPMULDQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) MulEvenWidenMasked ( y Int64x4 , z Mask64x4 ) Int64x4
// MulEvenWiden multiplies even-indexed elements, widening the result, masked.
// Result[i] = v1.Even[i] * v2.Even[i].
//
// Asm: VPMULDQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) MulEvenWidenMasked ( y Int64x8 , z Mask64x8 ) Int64x8
// MulEvenWiden multiplies even-indexed elements, widening the result, masked.
// Result[i] = v1.Even[i] * v2.Even[i].
//
// Asm: VPMULUDQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) MulEvenWidenMasked ( y Uint64x2 , z Mask64x2 ) Uint64x2
// MulEvenWiden multiplies even-indexed elements, widening the result, masked.
// Result[i] = v1.Even[i] * v2.Even[i].
//
// Asm: VPMULUDQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) MulEvenWidenMasked ( y Uint64x4 , z Mask64x4 ) Uint64x4
// MulEvenWiden multiplies even-indexed elements, widening the result, masked.
// Result[i] = v1.Even[i] * v2.Even[i].
//
// Asm: VPMULUDQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) MulEvenWidenMasked ( y Uint64x8 , z Mask64x8 ) Uint64x8
2025-06-24 15:21:29 +00:00
/* MulHigh */
// MulHigh multiplies elements and stores the high part of the result.
//
// Asm: VPMULHW, CPU Feature: AVX
func ( x Int16x8 ) MulHigh ( y Int16x8 ) Int16x8
2025-07-08 18:18:55 +00:00
// MulHigh multiplies elements and stores the high part of the result.
//
// Asm: VPMULHW, CPU Feature: AVX2
func ( x Int16x16 ) MulHigh ( y Int16x16 ) Int16x16
// MulHigh multiplies elements and stores the high part of the result, masked.
//
// Asm: VPMULHW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) MulHigh ( y Int16x32 ) Int16x32
// MulHigh multiplies elements and stores the high part of the result.
//
// Asm: VPMULHUW, CPU Feature: AVX
func ( x Uint16x8 ) MulHigh ( y Uint16x8 ) Uint16x8
// MulHigh multiplies elements and stores the high part of the result.
//
// Asm: VPMULHUW, CPU Feature: AVX2
func ( x Uint16x16 ) MulHigh ( y Uint16x16 ) Uint16x16
// MulHigh multiplies elements and stores the high part of the result, masked.
//
// Asm: VPMULHUW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) MulHigh ( y Uint16x32 ) Uint16x32
/* MulHighMasked */
// MulHigh multiplies elements and stores the high part of the result, masked.
//
// Asm: VPMULHW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) MulHighMasked ( y Int16x8 , z Mask16x8 ) Int16x8
// MulHigh multiplies elements and stores the high part of the result, masked.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPMULHW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) MulHighMasked ( y Int16x16 , z Mask16x16 ) Int16x16
2025-06-24 15:21:29 +00:00
// MulHigh multiplies elements and stores the high part of the result, masked.
//
// Asm: VPMULHW, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int16x32 ) MulHighMasked ( y Int16x32 , z Mask16x32 ) Int16x32
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// MulHigh multiplies elements and stores the high part of the result, masked.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPMULHUW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) MulHighMasked ( y Uint16x8 , z Mask16x8 ) Uint16x8
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// MulHigh multiplies elements and stores the high part of the result, masked.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPMULHUW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) MulHighMasked ( y Uint16x16 , z Mask16x16 ) Uint16x16
2025-06-24 15:21:29 +00:00
// MulHigh multiplies elements and stores the high part of the result, masked.
//
// Asm: VPMULHUW, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint16x32 ) MulHighMasked ( y Uint16x32 , z Mask16x32 ) Uint16x32
2025-06-24 15:21:29 +00:00
/* MulLow */
// MulLow multiplies elements and stores the low part of the result.
//
// Asm: VPMULLW, CPU Feature: AVX
func ( x Int16x8 ) MulLow ( y Int16x8 ) Int16x8
// MulLow multiplies elements and stores the low part of the result.
//
// Asm: VPMULLW, CPU Feature: AVX2
func ( x Int16x16 ) MulLow ( y Int16x16 ) Int16x16
// MulLow multiplies elements and stores the low part of the result, masked.
//
// Asm: VPMULLW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) MulLow ( y Int16x32 ) Int16x32
// MulLow multiplies elements and stores the low part of the result.
//
// Asm: VPMULLD, CPU Feature: AVX
func ( x Int32x4 ) MulLow ( y Int32x4 ) Int32x4
// MulLow multiplies elements and stores the low part of the result.
//
// Asm: VPMULLD, CPU Feature: AVX2
func ( x Int32x8 ) MulLow ( y Int32x8 ) Int32x8
// MulLow multiplies elements and stores the low part of the result, masked.
//
// Asm: VPMULLD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) MulLow ( y Int32x16 ) Int32x16
// MulLow multiplies elements and stores the low part of the result, masked.
//
// Asm: VPMULLQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) MulLow ( y Int64x2 ) Int64x2
// MulLow multiplies elements and stores the low part of the result, masked.
//
// Asm: VPMULLQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) MulLow ( y Int64x4 ) Int64x4
// MulLow multiplies elements and stores the low part of the result, masked.
//
// Asm: VPMULLQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) MulLow ( y Int64x8 ) Int64x8
2025-07-08 18:18:55 +00:00
/* MulLowMasked */
// MulLow multiplies elements and stores the low part of the result, masked.
//
// Asm: VPMULLW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) MulLowMasked ( y Int16x8 , z Mask16x8 ) Int16x8
// MulLow multiplies elements and stores the low part of the result, masked.
//
// Asm: VPMULLW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) MulLowMasked ( y Int16x16 , z Mask16x16 ) Int16x16
// MulLow multiplies elements and stores the low part of the result, masked.
//
// Asm: VPMULLW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) MulLowMasked ( y Int16x32 , z Mask16x32 ) Int16x32
// MulLow multiplies elements and stores the low part of the result, masked.
//
// Asm: VPMULLD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) MulLowMasked ( y Int32x4 , z Mask32x4 ) Int32x4
// MulLow multiplies elements and stores the low part of the result, masked.
//
// Asm: VPMULLD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) MulLowMasked ( y Int32x8 , z Mask32x8 ) Int32x8
// MulLow multiplies elements and stores the low part of the result, masked.
//
// Asm: VPMULLD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) MulLowMasked ( y Int32x16 , z Mask32x16 ) Int32x16
// MulLow multiplies elements and stores the low part of the result, masked.
//
// Asm: VPMULLQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) MulLowMasked ( y Int64x2 , z Mask64x2 ) Int64x2
// MulLow multiplies elements and stores the low part of the result, masked.
//
// Asm: VPMULLQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) MulLowMasked ( y Int64x4 , z Mask64x4 ) Int64x4
// MulLow multiplies elements and stores the low part of the result, masked.
//
// Asm: VPMULLQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) MulLowMasked ( y Int64x8 , z Mask64x8 ) Int64x8
/* MulMasked */
// Mul multiplies corresponding elements of two vectors, masked.
//
// Asm: VMULPS, CPU Feature: AVX512EVEX
func ( x Float32x4 ) MulMasked ( y Float32x4 , z Mask32x4 ) Float32x4
// Mul multiplies corresponding elements of two vectors, masked.
//
// Asm: VMULPS, CPU Feature: AVX512EVEX
func ( x Float32x8 ) MulMasked ( y Float32x8 , z Mask32x8 ) Float32x8
// Mul multiplies corresponding elements of two vectors, masked.
//
// Asm: VMULPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) MulMasked ( y Float32x16 , z Mask32x16 ) Float32x16
// Mul multiplies corresponding elements of two vectors, masked.
//
// Asm: VMULPD, CPU Feature: AVX512EVEX
func ( x Float64x2 ) MulMasked ( y Float64x2 , z Mask64x2 ) Float64x2
// Mul multiplies corresponding elements of two vectors, masked.
//
// Asm: VMULPD, CPU Feature: AVX512EVEX
func ( x Float64x4 ) MulMasked ( y Float64x4 , z Mask64x4 ) Float64x4
// Mul multiplies corresponding elements of two vectors, masked.
//
// Asm: VMULPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) MulMasked ( y Float64x8 , z Mask64x8 ) Float64x8
2025-06-24 15:21:29 +00:00
/* NotEqual */
// NotEqual compares for inequality.
//
// Asm: VCMPPS, CPU Feature: AVX
func ( x Float32x4 ) NotEqual ( y Float32x4 ) Mask32x4
// NotEqual compares for inequality.
//
// Asm: VCMPPS, CPU Feature: AVX
func ( x Float32x8 ) NotEqual ( y Float32x8 ) Mask32x8
// NotEqual compares for inequality.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) NotEqual ( y Float32x16 ) Mask32x16
// NotEqual compares for inequality.
//
// Asm: VCMPPD, CPU Feature: AVX
func ( x Float64x2 ) NotEqual ( y Float64x2 ) Mask64x2
// NotEqual compares for inequality.
//
// Asm: VCMPPD, CPU Feature: AVX
func ( x Float64x4 ) NotEqual ( y Float64x4 ) Mask64x4
// NotEqual compares for inequality.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) NotEqual ( y Float64x8 ) Mask64x8
// NotEqual compares for inequality.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func ( x Int8x16 ) NotEqual ( y Int8x16 ) Mask8x16
// NotEqual compares for inequality.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func ( x Int8x32 ) NotEqual ( y Int8x32 ) Mask8x32
// NotEqual compares for inequality.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func ( x Int8x64 ) NotEqual ( y Int8x64 ) Mask8x64
// NotEqual compares for inequality.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) NotEqual ( y Int16x8 ) Mask16x8
// NotEqual compares for inequality.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) NotEqual ( y Int16x16 ) Mask16x16
// NotEqual compares for inequality.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) NotEqual ( y Int16x32 ) Mask16x32
// NotEqual compares for inequality.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) NotEqual ( y Int32x4 ) Mask32x4
// NotEqual compares for inequality.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) NotEqual ( y Int32x8 ) Mask32x8
// NotEqual compares for inequality.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) NotEqual ( y Int32x16 ) Mask32x16
// NotEqual compares for inequality.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) NotEqual ( y Int64x2 ) Mask64x2
// NotEqual compares for inequality.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) NotEqual ( y Int64x4 ) Mask64x4
// NotEqual compares for inequality.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) NotEqual ( y Int64x8 ) Mask64x8
// NotEqual compares for inequality.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func ( x Uint8x16 ) NotEqual ( y Uint8x16 ) Mask8x16
// NotEqual compares for inequality.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func ( x Uint8x32 ) NotEqual ( y Uint8x32 ) Mask8x32
// NotEqual compares for inequality.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func ( x Uint8x64 ) NotEqual ( y Uint8x64 ) Mask8x64
// NotEqual compares for inequality.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) NotEqual ( y Uint16x8 ) Mask16x8
// NotEqual compares for inequality.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) NotEqual ( y Uint16x16 ) Mask16x16
// NotEqual compares for inequality.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) NotEqual ( y Uint16x32 ) Mask16x32
// NotEqual compares for inequality.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) NotEqual ( y Uint32x4 ) Mask32x4
// NotEqual compares for inequality.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) NotEqual ( y Uint32x8 ) Mask32x8
// NotEqual compares for inequality.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) NotEqual ( y Uint32x16 ) Mask32x16
// NotEqual compares for inequality.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) NotEqual ( y Uint64x2 ) Mask64x2
// NotEqual compares for inequality.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) NotEqual ( y Uint64x4 ) Mask64x4
// NotEqual compares for inequality.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) NotEqual ( y Uint64x8 ) Mask64x8
2025-07-08 18:18:55 +00:00
/* NotEqualMasked */
// NotEqual compares for inequality.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func ( x Float32x4 ) NotEqualMasked ( y Float32x4 , z Mask32x4 ) Mask32x4
// NotEqual compares for inequality.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func ( x Float32x8 ) NotEqualMasked ( y Float32x8 , z Mask32x8 ) Mask32x8
// NotEqual compares for inequality.
//
// Asm: VCMPPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) NotEqualMasked ( y Float32x16 , z Mask32x16 ) Mask32x16
// NotEqual compares for inequality.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func ( x Float64x2 ) NotEqualMasked ( y Float64x2 , z Mask64x2 ) Mask64x2
// NotEqual compares for inequality.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func ( x Float64x4 ) NotEqualMasked ( y Float64x4 , z Mask64x4 ) Mask64x4
// NotEqual compares for inequality.
//
// Asm: VCMPPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) NotEqualMasked ( y Float64x8 , z Mask64x8 ) Mask64x8
// NotEqual compares for inequality.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func ( x Int8x16 ) NotEqualMasked ( y Int8x16 , z Mask8x16 ) Mask8x16
// NotEqual compares for inequality.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func ( x Int8x32 ) NotEqualMasked ( y Int8x32 , z Mask8x32 ) Mask8x32
// NotEqual compares for inequality.
//
// Asm: VPCMPB, CPU Feature: AVX512EVEX
func ( x Int8x64 ) NotEqualMasked ( y Int8x64 , z Mask8x64 ) Mask8x64
// NotEqual compares for inequality.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) NotEqualMasked ( y Int16x8 , z Mask16x8 ) Mask16x8
// NotEqual compares for inequality.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) NotEqualMasked ( y Int16x16 , z Mask16x16 ) Mask16x16
// NotEqual compares for inequality.
//
// Asm: VPCMPW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) NotEqualMasked ( y Int16x32 , z Mask16x32 ) Mask16x32
// NotEqual compares for inequality.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) NotEqualMasked ( y Int32x4 , z Mask32x4 ) Mask32x4
// NotEqual compares for inequality.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) NotEqualMasked ( y Int32x8 , z Mask32x8 ) Mask32x8
// NotEqual compares for inequality.
//
// Asm: VPCMPD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) NotEqualMasked ( y Int32x16 , z Mask32x16 ) Mask32x16
// NotEqual compares for inequality.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) NotEqualMasked ( y Int64x2 , z Mask64x2 ) Mask64x2
// NotEqual compares for inequality.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) NotEqualMasked ( y Int64x4 , z Mask64x4 ) Mask64x4
// NotEqual compares for inequality.
//
// Asm: VPCMPQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) NotEqualMasked ( y Int64x8 , z Mask64x8 ) Mask64x8
// NotEqual compares for inequality.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func ( x Uint8x16 ) NotEqualMasked ( y Uint8x16 , z Mask8x16 ) Mask8x16
// NotEqual compares for inequality.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func ( x Uint8x32 ) NotEqualMasked ( y Uint8x32 , z Mask8x32 ) Mask8x32
// NotEqual compares for inequality.
//
// Asm: VPCMPUB, CPU Feature: AVX512EVEX
func ( x Uint8x64 ) NotEqualMasked ( y Uint8x64 , z Mask8x64 ) Mask8x64
// NotEqual compares for inequality.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) NotEqualMasked ( y Uint16x8 , z Mask16x8 ) Mask16x8
// NotEqual compares for inequality.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) NotEqualMasked ( y Uint16x16 , z Mask16x16 ) Mask16x16
// NotEqual compares for inequality.
//
// Asm: VPCMPUW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) NotEqualMasked ( y Uint16x32 , z Mask16x32 ) Mask16x32
// NotEqual compares for inequality.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) NotEqualMasked ( y Uint32x4 , z Mask32x4 ) Mask32x4
// NotEqual compares for inequality.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) NotEqualMasked ( y Uint32x8 , z Mask32x8 ) Mask32x8
// NotEqual compares for inequality.
//
// Asm: VPCMPUD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) NotEqualMasked ( y Uint32x16 , z Mask32x16 ) Mask32x16
// NotEqual compares for inequality.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) NotEqualMasked ( y Uint64x2 , z Mask64x2 ) Mask64x2
// NotEqual compares for inequality.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) NotEqualMasked ( y Uint64x4 , z Mask64x4 ) Mask64x4
// NotEqual compares for inequality.
//
// Asm: VPCMPUQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) NotEqualMasked ( y Uint64x8 , z Mask64x8 ) Mask64x8
2025-06-24 15:21:29 +00:00
/* Or */
// Or performs a bitwise OR operation between two vectors.
//
// Asm: VPOR, CPU Feature: AVX
func ( x Int8x16 ) Or ( y Int8x16 ) Int8x16
// Or performs a bitwise OR operation between two vectors.
//
// Asm: VPOR, CPU Feature: AVX2
func ( x Int8x32 ) Or ( y Int8x32 ) Int8x32
// Or performs a bitwise OR operation between two vectors.
//
// Asm: VPOR, CPU Feature: AVX
func ( x Int16x8 ) Or ( y Int16x8 ) Int16x8
// Or performs a bitwise OR operation between two vectors.
//
// Asm: VPOR, CPU Feature: AVX2
func ( x Int16x16 ) Or ( y Int16x16 ) Int16x16
// Or performs a bitwise OR operation between two vectors.
//
// Asm: VPOR, CPU Feature: AVX
func ( x Int32x4 ) Or ( y Int32x4 ) Int32x4
// Or performs a bitwise OR operation between two vectors.
//
// Asm: VPOR, CPU Feature: AVX2
func ( x Int32x8 ) Or ( y Int32x8 ) Int32x8
// Or performs a masked bitwise OR operation between two vectors.
//
// Asm: VPORD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) Or ( y Int32x16 ) Int32x16
// Or performs a bitwise OR operation between two vectors.
//
// Asm: VPOR, CPU Feature: AVX
func ( x Int64x2 ) Or ( y Int64x2 ) Int64x2
// Or performs a bitwise OR operation between two vectors.
//
// Asm: VPOR, CPU Feature: AVX2
func ( x Int64x4 ) Or ( y Int64x4 ) Int64x4
// Or performs a masked bitwise OR operation between two vectors.
//
// Asm: VPORQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) Or ( y Int64x8 ) Int64x8
// Or performs a bitwise OR operation between two vectors.
//
// Asm: VPOR, CPU Feature: AVX
func ( x Uint8x16 ) Or ( y Uint8x16 ) Uint8x16
// Or performs a bitwise OR operation between two vectors.
//
// Asm: VPOR, CPU Feature: AVX2
func ( x Uint8x32 ) Or ( y Uint8x32 ) Uint8x32
// Or performs a bitwise OR operation between two vectors.
//
// Asm: VPOR, CPU Feature: AVX
func ( x Uint16x8 ) Or ( y Uint16x8 ) Uint16x8
2025-07-08 18:18:55 +00:00
// Or performs a bitwise OR operation between two vectors.
//
// Asm: VPOR, CPU Feature: AVX2
func ( x Uint16x16 ) Or ( y Uint16x16 ) Uint16x16
// Or performs a bitwise OR operation between two vectors.
//
// Asm: VPOR, CPU Feature: AVX
func ( x Uint32x4 ) Or ( y Uint32x4 ) Uint32x4
// Or performs a bitwise OR operation between two vectors.
//
// Asm: VPOR, CPU Feature: AVX2
func ( x Uint32x8 ) Or ( y Uint32x8 ) Uint32x8
// Or performs a masked bitwise OR operation between two vectors.
//
// Asm: VPORD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) Or ( y Uint32x16 ) Uint32x16
// Or performs a bitwise OR operation between two vectors.
//
// Asm: VPOR, CPU Feature: AVX
func ( x Uint64x2 ) Or ( y Uint64x2 ) Uint64x2
// Or performs a bitwise OR operation between two vectors.
//
// Asm: VPOR, CPU Feature: AVX2
func ( x Uint64x4 ) Or ( y Uint64x4 ) Uint64x4
// Or performs a masked bitwise OR operation between two vectors.
//
// Asm: VPORQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) Or ( y Uint64x8 ) Uint64x8
/* OrMasked */
// Or performs a masked bitwise OR operation between two vectors.
//
// Asm: VPORD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) OrMasked ( y Int32x4 , z Mask32x4 ) Int32x4
// Or performs a masked bitwise OR operation between two vectors.
//
// Asm: VPORD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) OrMasked ( y Int32x8 , z Mask32x8 ) Int32x8
// Or performs a masked bitwise OR operation between two vectors.
//
// Asm: VPORD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) OrMasked ( y Int32x16 , z Mask32x16 ) Int32x16
// Or performs a masked bitwise OR operation between two vectors.
//
// Asm: VPORQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) OrMasked ( y Int64x2 , z Mask64x2 ) Int64x2
// Or performs a masked bitwise OR operation between two vectors.
//
// Asm: VPORQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) OrMasked ( y Int64x4 , z Mask64x4 ) Int64x4
// Or performs a masked bitwise OR operation between two vectors.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPORQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) OrMasked ( y Int64x8 , z Mask64x8 ) Int64x8
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Or performs a masked bitwise OR operation between two vectors.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPORD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) OrMasked ( y Uint32x4 , z Mask32x4 ) Uint32x4
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Or performs a masked bitwise OR operation between two vectors.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPORD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) OrMasked ( y Uint32x8 , z Mask32x8 ) Uint32x8
2025-06-24 15:21:29 +00:00
// Or performs a masked bitwise OR operation between two vectors.
//
// Asm: VPORD, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint32x16 ) OrMasked ( y Uint32x16 , z Mask32x16 ) Uint32x16
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Or performs a masked bitwise OR operation between two vectors.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPORQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) OrMasked ( y Uint64x2 , z Mask64x2 ) Uint64x2
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// Or performs a masked bitwise OR operation between two vectors.
2025-06-24 15:21:29 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPORQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) OrMasked ( y Uint64x4 , z Mask64x4 ) Uint64x4
2025-06-24 15:21:29 +00:00
// Or performs a masked bitwise OR operation between two vectors.
//
// Asm: VPORQ, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint64x8 ) OrMasked ( y Uint64x8 , z Mask64x8 ) Uint64x8
2025-06-24 15:21:29 +00:00
/* PairDotProd */
// PairDotProd multiplies the elements and add the pairs together,
// yielding a vector of half as many elements with twice the input element size.
//
// Asm: VPMADDWD, CPU Feature: AVX
func ( x Int16x8 ) PairDotProd ( y Int16x8 ) Int32x4
// PairDotProd multiplies the elements and add the pairs together,
// yielding a vector of half as many elements with twice the input element size.
//
// Asm: VPMADDWD, CPU Feature: AVX2
func ( x Int16x16 ) PairDotProd ( y Int16x16 ) Int32x8
// PairDotProd multiplies the elements and add the pairs together,
// yielding a vector of half as many elements with twice the input element size.
//
// Asm: VPMADDWD, CPU Feature: AVX512EVEX
func ( x Int16x32 ) PairDotProd ( y Int16x32 ) Int32x16
/* PairDotProdAccumulate */
// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x.
//
// Asm: VPDPWSSD, CPU Feature: AVX_VNNI
func ( x Int32x4 ) PairDotProdAccumulate ( y Int16x8 , z Int16x8 ) Int32x4
// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x.
//
// Asm: VPDPWSSD, CPU Feature: AVX_VNNI
func ( x Int32x8 ) PairDotProdAccumulate ( y Int16x16 , z Int16x16 ) Int32x8
// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x.
//
// Asm: VPDPWSSD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) PairDotProdAccumulate ( y Int16x32 , z Int16x32 ) Int32x16
2025-07-08 18:18:55 +00:00
/* PairDotProdAccumulateMasked */
// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x.
//
// Asm: VPDPWSSD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) PairDotProdAccumulateMasked ( y Int16x8 , z Int16x8 , u Mask32x4 ) Int32x4
// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x.
//
// Asm: VPDPWSSD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) PairDotProdAccumulateMasked ( y Int16x16 , z Int16x16 , u Mask32x8 ) Int32x8
// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x.
//
// Asm: VPDPWSSD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) PairDotProdAccumulateMasked ( y Int16x32 , z Int16x32 , u Mask32x16 ) Int32x16
/* PairDotProdMasked */
// PairDotProd multiplies the elements and add the pairs together,
// yielding a vector of half as many elements with twice the input element size.
//
// Asm: VPMADDWD, CPU Feature: AVX512EVEX
func ( x Int16x8 ) PairDotProdMasked ( y Int16x8 , z Mask16x8 ) Int32x4
// PairDotProd multiplies the elements and add the pairs together,
// yielding a vector of half as many elements with twice the input element size.
//
// Asm: VPMADDWD, CPU Feature: AVX512EVEX
func ( x Int16x16 ) PairDotProdMasked ( y Int16x16 , z Mask16x16 ) Int32x8
// PairDotProd multiplies the elements and add the pairs together,
// yielding a vector of half as many elements with twice the input element size.
//
// Asm: VPMADDWD, CPU Feature: AVX512EVEX
func ( x Int16x32 ) PairDotProdMasked ( y Int16x32 , z Mask16x32 ) Int32x16
2025-06-24 15:21:29 +00:00
/* PairwiseAdd */
// PairwiseAdd horizontally adds adjacent pairs of elements.
// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...].
//
// Asm: VHADDPS, CPU Feature: AVX
func ( x Float32x4 ) PairwiseAdd ( y Float32x4 ) Float32x4
// PairwiseAdd horizontally adds adjacent pairs of elements.
// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...].
//
// Asm: VHADDPS, CPU Feature: AVX
func ( x Float32x8 ) PairwiseAdd ( y Float32x8 ) Float32x8
// PairwiseAdd horizontally adds adjacent pairs of elements.
// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...].
//
// Asm: VHADDPD, CPU Feature: AVX
func ( x Float64x2 ) PairwiseAdd ( y Float64x2 ) Float64x2
// PairwiseAdd horizontally adds adjacent pairs of elements.
// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...].
//
// Asm: VHADDPD, CPU Feature: AVX
func ( x Float64x4 ) PairwiseAdd ( y Float64x4 ) Float64x4
// PairwiseAdd horizontally adds adjacent pairs of elements.
// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...].
//
// Asm: VPHADDW, CPU Feature: AVX
func ( x Int16x8 ) PairwiseAdd ( y Int16x8 ) Int16x8
// PairwiseAdd horizontally adds adjacent pairs of elements.
// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...].
//
// Asm: VPHADDW, CPU Feature: AVX2
func ( x Int16x16 ) PairwiseAdd ( y Int16x16 ) Int16x16
// PairwiseAdd horizontally adds adjacent pairs of elements.
// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...].
//
// Asm: VPHADDD, CPU Feature: AVX
func ( x Int32x4 ) PairwiseAdd ( y Int32x4 ) Int32x4
// PairwiseAdd horizontally adds adjacent pairs of elements.
// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...].
//
// Asm: VPHADDD, CPU Feature: AVX2
func ( x Int32x8 ) PairwiseAdd ( y Int32x8 ) Int32x8
// PairwiseAdd horizontally adds adjacent pairs of elements.
// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...].
//
// Asm: VPHADDW, CPU Feature: AVX
func ( x Uint16x8 ) PairwiseAdd ( y Uint16x8 ) Uint16x8
// PairwiseAdd horizontally adds adjacent pairs of elements.
// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...].
//
// Asm: VPHADDW, CPU Feature: AVX2
func ( x Uint16x16 ) PairwiseAdd ( y Uint16x16 ) Uint16x16
// PairwiseAdd horizontally adds adjacent pairs of elements.
// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...].
//
// Asm: VPHADDD, CPU Feature: AVX
func ( x Uint32x4 ) PairwiseAdd ( y Uint32x4 ) Uint32x4
// PairwiseAdd horizontally adds adjacent pairs of elements.
// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...].
//
// Asm: VPHADDD, CPU Feature: AVX2
func ( x Uint32x8 ) PairwiseAdd ( y Uint32x8 ) Uint32x8
/* PairwiseSub */
// PairwiseSub horizontally subtracts adjacent pairs of elements.
// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...].
//
// Asm: VHSUBPS, CPU Feature: AVX
func ( x Float32x4 ) PairwiseSub ( y Float32x4 ) Float32x4
// PairwiseSub horizontally subtracts adjacent pairs of elements.
// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...].
//
// Asm: VHSUBPS, CPU Feature: AVX
func ( x Float32x8 ) PairwiseSub ( y Float32x8 ) Float32x8
// PairwiseSub horizontally subtracts adjacent pairs of elements.
// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...].
//
// Asm: VHSUBPD, CPU Feature: AVX
func ( x Float64x2 ) PairwiseSub ( y Float64x2 ) Float64x2
// PairwiseSub horizontally subtracts adjacent pairs of elements.
// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...].
//
// Asm: VHSUBPD, CPU Feature: AVX
func ( x Float64x4 ) PairwiseSub ( y Float64x4 ) Float64x4
// PairwiseSub horizontally subtracts adjacent pairs of elements.
// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...].
//
// Asm: VPHSUBW, CPU Feature: AVX
func ( x Int16x8 ) PairwiseSub ( y Int16x8 ) Int16x8
// PairwiseSub horizontally subtracts adjacent pairs of elements.
// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...].
//
// Asm: VPHSUBW, CPU Feature: AVX2
func ( x Int16x16 ) PairwiseSub ( y Int16x16 ) Int16x16
// PairwiseSub horizontally subtracts adjacent pairs of elements.
// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...].
//
// Asm: VPHSUBD, CPU Feature: AVX
func ( x Int32x4 ) PairwiseSub ( y Int32x4 ) Int32x4
// PairwiseSub horizontally subtracts adjacent pairs of elements.
// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...].
//
// Asm: VPHSUBD, CPU Feature: AVX2
func ( x Int32x8 ) PairwiseSub ( y Int32x8 ) Int32x8
// PairwiseSub horizontally subtracts adjacent pairs of elements.
// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...].
//
// Asm: VPHSUBW, CPU Feature: AVX
func ( x Uint16x8 ) PairwiseSub ( y Uint16x8 ) Uint16x8
// PairwiseSub horizontally subtracts adjacent pairs of elements.
// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...].
//
// Asm: VPHSUBW, CPU Feature: AVX2
func ( x Uint16x16 ) PairwiseSub ( y Uint16x16 ) Uint16x16
// PairwiseSub horizontally subtracts adjacent pairs of elements.
// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...].
//
// Asm: VPHSUBD, CPU Feature: AVX
func ( x Uint32x4 ) PairwiseSub ( y Uint32x4 ) Uint32x4
// PairwiseSub horizontally subtracts adjacent pairs of elements.
// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...].
//
// Asm: VPHSUBD, CPU Feature: AVX2
func ( x Uint32x8 ) PairwiseSub ( y Uint32x8 ) Uint32x8
/* PopCount */
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTB, CPU Feature: AVX512EVEX
func ( x Int8x16 ) PopCount ( ) Int8x16
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTB, CPU Feature: AVX512EVEX
func ( x Int8x32 ) PopCount ( ) Int8x32
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTB, CPU Feature: AVX512EVEX
func ( x Int8x64 ) PopCount ( ) Int8x64
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) PopCount ( ) Int16x8
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) PopCount ( ) Int16x16
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) PopCount ( ) Int16x32
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) PopCount ( ) Int32x4
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) PopCount ( ) Int32x8
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) PopCount ( ) Int32x16
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) PopCount ( ) Int64x2
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) PopCount ( ) Int64x4
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) PopCount ( ) Int64x8
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTB, CPU Feature: AVX512EVEX
func ( x Uint8x16 ) PopCount ( ) Uint8x16
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTB, CPU Feature: AVX512EVEX
func ( x Uint8x32 ) PopCount ( ) Uint8x32
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTB, CPU Feature: AVX512EVEX
func ( x Uint8x64 ) PopCount ( ) Uint8x64
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) PopCount ( ) Uint16x8
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) PopCount ( ) Uint16x16
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) PopCount ( ) Uint16x32
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) PopCount ( ) Uint32x4
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) PopCount ( ) Uint32x8
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) PopCount ( ) Uint32x16
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) PopCount ( ) Uint64x2
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) PopCount ( ) Uint64x4
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) PopCount ( ) Uint64x8
2025-07-08 18:18:55 +00:00
/* PopCountMasked */
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTB, CPU Feature: AVX512EVEX
func ( x Int8x16 ) PopCountMasked ( y Mask8x16 ) Int8x16
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTB, CPU Feature: AVX512EVEX
func ( x Int8x32 ) PopCountMasked ( y Mask8x32 ) Int8x32
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTB, CPU Feature: AVX512EVEX
func ( x Int8x64 ) PopCountMasked ( y Mask8x64 ) Int8x64
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) PopCountMasked ( y Mask16x8 ) Int16x8
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) PopCountMasked ( y Mask16x16 ) Int16x16
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) PopCountMasked ( y Mask16x32 ) Int16x32
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) PopCountMasked ( y Mask32x4 ) Int32x4
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) PopCountMasked ( y Mask32x8 ) Int32x8
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) PopCountMasked ( y Mask32x16 ) Int32x16
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) PopCountMasked ( y Mask64x2 ) Int64x2
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) PopCountMasked ( y Mask64x4 ) Int64x4
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) PopCountMasked ( y Mask64x8 ) Int64x8
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTB, CPU Feature: AVX512EVEX
func ( x Uint8x16 ) PopCountMasked ( y Mask8x16 ) Uint8x16
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTB, CPU Feature: AVX512EVEX
func ( x Uint8x32 ) PopCountMasked ( y Mask8x32 ) Uint8x32
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTB, CPU Feature: AVX512EVEX
func ( x Uint8x64 ) PopCountMasked ( y Mask8x64 ) Uint8x64
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) PopCountMasked ( y Mask16x8 ) Uint16x8
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) PopCountMasked ( y Mask16x16 ) Uint16x16
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) PopCountMasked ( y Mask16x32 ) Uint16x32
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) PopCountMasked ( y Mask32x4 ) Uint32x4
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) PopCountMasked ( y Mask32x8 ) Uint32x8
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) PopCountMasked ( y Mask32x16 ) Uint32x16
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) PopCountMasked ( y Mask64x2 ) Uint64x2
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) PopCountMasked ( y Mask64x4 ) Uint64x4
// PopCount counts the number of set bits in each element.
//
// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) PopCountMasked ( y Mask64x8 ) Uint64x8
2025-06-24 15:21:29 +00:00
/* RotateAllLeft */
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
// Asm: VPROLD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Int32x4 ) RotateAllLeft ( imm uint8 ) Int32x4
2025-06-24 15:21:29 +00:00
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
// Asm: VPROLD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Int32x8 ) RotateAllLeft ( imm uint8 ) Int32x8
2025-06-24 15:21:29 +00:00
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
// Asm: VPROLD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Int32x16 ) RotateAllLeft ( imm uint8 ) Int32x16
2025-06-24 15:21:29 +00:00
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
// Asm: VPROLQ, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Int64x2 ) RotateAllLeft ( imm uint8 ) Int64x2
2025-06-24 15:21:29 +00:00
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
// Asm: VPROLQ, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Int64x4 ) RotateAllLeft ( imm uint8 ) Int64x4
2025-06-24 15:21:29 +00:00
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
// Asm: VPROLQ, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Int64x8 ) RotateAllLeft ( imm uint8 ) Int64x8
2025-06-24 15:21:29 +00:00
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
// Asm: VPROLD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Uint32x4 ) RotateAllLeft ( imm uint8 ) Uint32x4
2025-06-24 15:21:29 +00:00
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
// Asm: VPROLD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Uint32x8 ) RotateAllLeft ( imm uint8 ) Uint32x8
2025-06-24 15:21:29 +00:00
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
// Asm: VPROLD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Uint32x16 ) RotateAllLeft ( imm uint8 ) Uint32x16
2025-06-24 15:21:29 +00:00
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
// Asm: VPROLQ, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Uint64x2 ) RotateAllLeft ( imm uint8 ) Uint64x2
2025-06-24 15:21:29 +00:00
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
// Asm: VPROLQ, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Uint64x4 ) RotateAllLeft ( imm uint8 ) Uint64x4
2025-06-24 15:21:29 +00:00
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
// Asm: VPROLQ, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Uint64x8 ) RotateAllLeft ( imm uint8 ) Uint64x8
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
/* RotateAllLeftMasked */
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
// Asm: VPROLD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) RotateAllLeftMasked ( imm uint8 , y Mask32x4 ) Int32x4
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
// Asm: VPROLD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) RotateAllLeftMasked ( imm uint8 , y Mask32x8 ) Int32x8
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
// Asm: VPROLD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) RotateAllLeftMasked ( imm uint8 , y Mask32x16 ) Int32x16
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
// Asm: VPROLQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) RotateAllLeftMasked ( imm uint8 , y Mask64x2 ) Int64x2
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
// Asm: VPROLQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) RotateAllLeftMasked ( imm uint8 , y Mask64x4 ) Int64x4
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
// Asm: VPROLQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) RotateAllLeftMasked ( imm uint8 , y Mask64x8 ) Int64x8
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
// Asm: VPROLD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) RotateAllLeftMasked ( imm uint8 , y Mask32x4 ) Uint32x4
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
// Asm: VPROLD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) RotateAllLeftMasked ( imm uint8 , y Mask32x8 ) Uint32x8
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
// Asm: VPROLD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) RotateAllLeftMasked ( imm uint8 , y Mask32x16 ) Uint32x16
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
// Asm: VPROLQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) RotateAllLeftMasked ( imm uint8 , y Mask64x2 ) Uint64x2
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
// Asm: VPROLQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) RotateAllLeftMasked ( imm uint8 , y Mask64x4 ) Uint64x4
// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate.
//
// Asm: VPROLQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) RotateAllLeftMasked ( imm uint8 , y Mask64x8 ) Uint64x8
2025-06-24 15:21:29 +00:00
/* RotateAllRight */
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
// Asm: VPRORD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Int32x4 ) RotateAllRight ( imm uint8 ) Int32x4
2025-06-24 15:21:29 +00:00
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
// Asm: VPRORD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Int32x8 ) RotateAllRight ( imm uint8 ) Int32x8
2025-06-24 15:21:29 +00:00
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
// Asm: VPRORD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Int32x16 ) RotateAllRight ( imm uint8 ) Int32x16
2025-06-24 15:21:29 +00:00
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
// Asm: VPRORQ, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Int64x2 ) RotateAllRight ( imm uint8 ) Int64x2
2025-06-24 15:21:29 +00:00
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
// Asm: VPRORQ, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Int64x4 ) RotateAllRight ( imm uint8 ) Int64x4
2025-06-24 15:21:29 +00:00
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
// Asm: VPRORQ, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Int64x8 ) RotateAllRight ( imm uint8 ) Int64x8
2025-06-24 15:21:29 +00:00
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
// Asm: VPRORD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Uint32x4 ) RotateAllRight ( imm uint8 ) Uint32x4
2025-06-24 15:21:29 +00:00
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
// Asm: VPRORD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Uint32x8 ) RotateAllRight ( imm uint8 ) Uint32x8
2025-06-24 15:21:29 +00:00
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
// Asm: VPRORD, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint32x16 ) RotateAllRight ( imm uint8 ) Uint32x16
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
// Asm: VPRORQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) RotateAllRight ( imm uint8 ) Uint64x2
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
// Asm: VPRORQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) RotateAllRight ( imm uint8 ) Uint64x4
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
// Asm: VPRORQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) RotateAllRight ( imm uint8 ) Uint64x8
/* RotateAllRightMasked */
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
// Asm: VPRORD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) RotateAllRightMasked ( imm uint8 , y Mask32x4 ) Int32x4
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
// Asm: VPRORD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) RotateAllRightMasked ( imm uint8 , y Mask32x8 ) Int32x8
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
// Asm: VPRORD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) RotateAllRightMasked ( imm uint8 , y Mask32x16 ) Int32x16
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
// Asm: VPRORQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) RotateAllRightMasked ( imm uint8 , y Mask64x2 ) Int64x2
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
// Asm: VPRORQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) RotateAllRightMasked ( imm uint8 , y Mask64x4 ) Int64x4
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
// Asm: VPRORQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) RotateAllRightMasked ( imm uint8 , y Mask64x8 ) Int64x8
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
// Asm: VPRORD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) RotateAllRightMasked ( imm uint8 , y Mask32x4 ) Uint32x4
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
// Asm: VPRORD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) RotateAllRightMasked ( imm uint8 , y Mask32x8 ) Uint32x8
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
// Asm: VPRORD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) RotateAllRightMasked ( imm uint8 , y Mask32x16 ) Uint32x16
2025-06-24 15:21:29 +00:00
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
// Asm: VPRORQ, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint64x2 ) RotateAllRightMasked ( imm uint8 , y Mask64x2 ) Uint64x2
2025-06-24 15:21:29 +00:00
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
// Asm: VPRORQ, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint64x4 ) RotateAllRightMasked ( imm uint8 , y Mask64x4 ) Uint64x4
2025-06-24 15:21:29 +00:00
// RotateAllRight rotates each element to the right by the number of bits specified by the immediate.
//
// Asm: VPRORQ, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint64x8 ) RotateAllRightMasked ( imm uint8 , y Mask64x8 ) Uint64x8
2025-06-24 15:21:29 +00:00
/* RotateLeft */
// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements.
//
// Asm: VPROLVD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) RotateLeft ( y Int32x4 ) Int32x4
// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements.
//
// Asm: VPROLVD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) RotateLeft ( y Int32x8 ) Int32x8
// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements.
//
// Asm: VPROLVD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) RotateLeft ( y Int32x16 ) Int32x16
// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements.
//
// Asm: VPROLVQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) RotateLeft ( y Int64x2 ) Int64x2
// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements.
//
// Asm: VPROLVQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) RotateLeft ( y Int64x4 ) Int64x4
// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements.
//
// Asm: VPROLVQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) RotateLeft ( y Int64x8 ) Int64x8
// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements.
//
// Asm: VPROLVD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) RotateLeft ( y Uint32x4 ) Uint32x4
// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements.
//
// Asm: VPROLVD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) RotateLeft ( y Uint32x8 ) Uint32x8
// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements.
//
// Asm: VPROLVD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) RotateLeft ( y Uint32x16 ) Uint32x16
// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements.
//
// Asm: VPROLVQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) RotateLeft ( y Uint64x2 ) Uint64x2
// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements.
//
// Asm: VPROLVQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) RotateLeft ( y Uint64x4 ) Uint64x4
// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements.
//
// Asm: VPROLVQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) RotateLeft ( y Uint64x8 ) Uint64x8
2025-07-08 18:18:55 +00:00
/* RotateLeftMasked */
// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements.
//
// Asm: VPROLVD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) RotateLeftMasked ( y Int32x4 , z Mask32x4 ) Int32x4
// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements.
//
// Asm: VPROLVD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) RotateLeftMasked ( y Int32x8 , z Mask32x8 ) Int32x8
// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements.
//
// Asm: VPROLVD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) RotateLeftMasked ( y Int32x16 , z Mask32x16 ) Int32x16
// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements.
//
// Asm: VPROLVQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) RotateLeftMasked ( y Int64x2 , z Mask64x2 ) Int64x2
// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements.
//
// Asm: VPROLVQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) RotateLeftMasked ( y Int64x4 , z Mask64x4 ) Int64x4
// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements.
//
// Asm: VPROLVQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) RotateLeftMasked ( y Int64x8 , z Mask64x8 ) Int64x8
// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements.
//
// Asm: VPROLVD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) RotateLeftMasked ( y Uint32x4 , z Mask32x4 ) Uint32x4
// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements.
//
// Asm: VPROLVD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) RotateLeftMasked ( y Uint32x8 , z Mask32x8 ) Uint32x8
// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements.
//
// Asm: VPROLVD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) RotateLeftMasked ( y Uint32x16 , z Mask32x16 ) Uint32x16
// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements.
//
// Asm: VPROLVQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) RotateLeftMasked ( y Uint64x2 , z Mask64x2 ) Uint64x2
// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements.
//
// Asm: VPROLVQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) RotateLeftMasked ( y Uint64x4 , z Mask64x4 ) Uint64x4
// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements.
//
// Asm: VPROLVQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) RotateLeftMasked ( y Uint64x8 , z Mask64x8 ) Uint64x8
2025-06-24 15:21:29 +00:00
/* RotateRight */
// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements.
//
// Asm: VPRORVD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) RotateRight ( y Int32x4 ) Int32x4
// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements.
//
// Asm: VPRORVD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) RotateRight ( y Int32x8 ) Int32x8
// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements.
//
// Asm: VPRORVD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) RotateRight ( y Int32x16 ) Int32x16
// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements.
//
// Asm: VPRORVQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) RotateRight ( y Int64x2 ) Int64x2
// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements.
//
// Asm: VPRORVQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) RotateRight ( y Int64x4 ) Int64x4
// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements.
//
// Asm: VPRORVQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) RotateRight ( y Int64x8 ) Int64x8
// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements.
//
// Asm: VPRORVD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) RotateRight ( y Uint32x4 ) Uint32x4
// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements.
//
// Asm: VPRORVD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) RotateRight ( y Uint32x8 ) Uint32x8
// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements.
//
// Asm: VPRORVD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) RotateRight ( y Uint32x16 ) Uint32x16
// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements.
//
// Asm: VPRORVQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) RotateRight ( y Uint64x2 ) Uint64x2
// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements.
//
// Asm: VPRORVQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) RotateRight ( y Uint64x4 ) Uint64x4
// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements.
//
// Asm: VPRORVQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) RotateRight ( y Uint64x8 ) Uint64x8
2025-07-08 18:18:55 +00:00
/* RotateRightMasked */
// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements.
//
// Asm: VPRORVD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) RotateRightMasked ( y Int32x4 , z Mask32x4 ) Int32x4
// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements.
//
// Asm: VPRORVD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) RotateRightMasked ( y Int32x8 , z Mask32x8 ) Int32x8
// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements.
//
// Asm: VPRORVD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) RotateRightMasked ( y Int32x16 , z Mask32x16 ) Int32x16
// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements.
//
// Asm: VPRORVQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) RotateRightMasked ( y Int64x2 , z Mask64x2 ) Int64x2
// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements.
//
// Asm: VPRORVQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) RotateRightMasked ( y Int64x4 , z Mask64x4 ) Int64x4
// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements.
//
// Asm: VPRORVQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) RotateRightMasked ( y Int64x8 , z Mask64x8 ) Int64x8
// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements.
//
// Asm: VPRORVD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) RotateRightMasked ( y Uint32x4 , z Mask32x4 ) Uint32x4
// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements.
//
// Asm: VPRORVD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) RotateRightMasked ( y Uint32x8 , z Mask32x8 ) Uint32x8
// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements.
//
// Asm: VPRORVD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) RotateRightMasked ( y Uint32x16 , z Mask32x16 ) Uint32x16
// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements.
//
// Asm: VPRORVQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) RotateRightMasked ( y Uint64x2 , z Mask64x2 ) Uint64x2
// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements.
//
// Asm: VPRORVQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) RotateRightMasked ( y Uint64x4 , z Mask64x4 ) Uint64x4
// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements.
//
// Asm: VPRORVQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) RotateRightMasked ( y Uint64x8 , z Mask64x8 ) Uint64x8
2025-06-24 15:21:29 +00:00
/* Round */
// Round rounds elements to the nearest integer.
//
// Asm: VROUNDPS, CPU Feature: AVX
func ( x Float32x4 ) Round ( ) Float32x4
// Round rounds elements to the nearest integer.
//
// Asm: VROUNDPS, CPU Feature: AVX
func ( x Float32x8 ) Round ( ) Float32x8
// Round rounds elements to the nearest integer.
//
// Asm: VROUNDPD, CPU Feature: AVX
func ( x Float64x2 ) Round ( ) Float64x2
// Round rounds elements to the nearest integer.
//
// Asm: VROUNDPD, CPU Feature: AVX
func ( x Float64x4 ) Round ( ) Float64x4
/* RoundWithPrecision */
// RoundWithPrecision rounds elements with specified precision.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float32x4 ) RoundWithPrecision ( imm uint8 ) Float32x4
2025-06-24 15:21:29 +00:00
// RoundWithPrecision rounds elements with specified precision.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float32x8 ) RoundWithPrecision ( imm uint8 ) Float32x8
2025-06-24 15:21:29 +00:00
// RoundWithPrecision rounds elements with specified precision.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float32x16 ) RoundWithPrecision ( imm uint8 ) Float32x16
2025-06-24 15:21:29 +00:00
// RoundWithPrecision rounds elements with specified precision.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float64x2 ) RoundWithPrecision ( imm uint8 ) Float64x2
2025-06-24 15:21:29 +00:00
// RoundWithPrecision rounds elements with specified precision.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float64x4 ) RoundWithPrecision ( imm uint8 ) Float64x4
2025-06-24 15:21:29 +00:00
// RoundWithPrecision rounds elements with specified precision.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float64x8 ) RoundWithPrecision ( imm uint8 ) Float64x8
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
/* RoundWithPrecisionMasked */
// RoundWithPrecision rounds elements with specified precision.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func ( x Float32x4 ) RoundWithPrecisionMasked ( imm uint8 , y Mask32x4 ) Float32x4
// RoundWithPrecision rounds elements with specified precision.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func ( x Float32x8 ) RoundWithPrecisionMasked ( imm uint8 , y Mask32x8 ) Float32x8
// RoundWithPrecision rounds elements with specified precision.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) RoundWithPrecisionMasked ( imm uint8 , y Mask32x16 ) Float32x16
// RoundWithPrecision rounds elements with specified precision.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func ( x Float64x2 ) RoundWithPrecisionMasked ( imm uint8 , y Mask64x2 ) Float64x2
// RoundWithPrecision rounds elements with specified precision.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func ( x Float64x4 ) RoundWithPrecisionMasked ( imm uint8 , y Mask64x4 ) Float64x4
// RoundWithPrecision rounds elements with specified precision.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) RoundWithPrecisionMasked ( imm uint8 , y Mask64x8 ) Float64x8
2025-06-24 15:21:29 +00:00
/* SaturatedAdd */
// SaturatedAdd adds corresponding elements of two vectors with saturation.
//
// Asm: VPADDSB, CPU Feature: AVX
func ( x Int8x16 ) SaturatedAdd ( y Int8x16 ) Int8x16
// SaturatedAdd adds corresponding elements of two vectors with saturation.
//
// Asm: VPADDSB, CPU Feature: AVX2
func ( x Int8x32 ) SaturatedAdd ( y Int8x32 ) Int8x32
// SaturatedAdd adds corresponding elements of two vectors with saturation.
//
// Asm: VPADDSB, CPU Feature: AVX512EVEX
func ( x Int8x64 ) SaturatedAdd ( y Int8x64 ) Int8x64
// SaturatedAdd adds corresponding elements of two vectors with saturation.
//
// Asm: VPADDSW, CPU Feature: AVX
func ( x Int16x8 ) SaturatedAdd ( y Int16x8 ) Int16x8
// SaturatedAdd adds corresponding elements of two vectors with saturation.
//
// Asm: VPADDSW, CPU Feature: AVX2
func ( x Int16x16 ) SaturatedAdd ( y Int16x16 ) Int16x16
// SaturatedAdd adds corresponding elements of two vectors with saturation.
//
// Asm: VPADDSW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) SaturatedAdd ( y Int16x32 ) Int16x32
// SaturatedAdd adds corresponding elements of two vectors with saturation.
//
// Asm: VPADDSB, CPU Feature: AVX
func ( x Uint8x16 ) SaturatedAdd ( y Uint8x16 ) Uint8x16
// SaturatedAdd adds corresponding elements of two vectors with saturation.
//
// Asm: VPADDSB, CPU Feature: AVX2
func ( x Uint8x32 ) SaturatedAdd ( y Uint8x32 ) Uint8x32
// SaturatedAdd adds corresponding elements of two vectors with saturation.
//
// Asm: VPADDSB, CPU Feature: AVX512EVEX
func ( x Uint8x64 ) SaturatedAdd ( y Uint8x64 ) Uint8x64
// SaturatedAdd adds corresponding elements of two vectors with saturation.
//
// Asm: VPADDSW, CPU Feature: AVX
func ( x Uint16x8 ) SaturatedAdd ( y Uint16x8 ) Uint16x8
// SaturatedAdd adds corresponding elements of two vectors with saturation.
//
// Asm: VPADDSW, CPU Feature: AVX2
func ( x Uint16x16 ) SaturatedAdd ( y Uint16x16 ) Uint16x16
// SaturatedAdd adds corresponding elements of two vectors with saturation.
//
// Asm: VPADDSW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) SaturatedAdd ( y Uint16x32 ) Uint16x32
2025-07-08 18:18:55 +00:00
/* SaturatedAddMasked */
// SaturatedAdd adds corresponding elements of two vectors with saturation.
//
// Asm: VPADDSB, CPU Feature: AVX512EVEX
func ( x Int8x16 ) SaturatedAddMasked ( y Int8x16 , z Mask8x16 ) Int8x16
// SaturatedAdd adds corresponding elements of two vectors with saturation.
//
// Asm: VPADDSB, CPU Feature: AVX512EVEX
func ( x Int8x32 ) SaturatedAddMasked ( y Int8x32 , z Mask8x32 ) Int8x32
// SaturatedAdd adds corresponding elements of two vectors with saturation.
//
// Asm: VPADDSB, CPU Feature: AVX512EVEX
func ( x Int8x64 ) SaturatedAddMasked ( y Int8x64 , z Mask8x64 ) Int8x64
// SaturatedAdd adds corresponding elements of two vectors with saturation.
//
// Asm: VPADDSW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) SaturatedAddMasked ( y Int16x8 , z Mask16x8 ) Int16x8
// SaturatedAdd adds corresponding elements of two vectors with saturation.
//
// Asm: VPADDSW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) SaturatedAddMasked ( y Int16x16 , z Mask16x16 ) Int16x16
// SaturatedAdd adds corresponding elements of two vectors with saturation.
//
// Asm: VPADDSW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) SaturatedAddMasked ( y Int16x32 , z Mask16x32 ) Int16x32
// SaturatedAdd adds corresponding elements of two vectors with saturation.
//
// Asm: VPADDSB, CPU Feature: AVX512EVEX
func ( x Uint8x16 ) SaturatedAddMasked ( y Uint8x16 , z Mask8x16 ) Uint8x16
// SaturatedAdd adds corresponding elements of two vectors with saturation.
//
// Asm: VPADDSB, CPU Feature: AVX512EVEX
func ( x Uint8x32 ) SaturatedAddMasked ( y Uint8x32 , z Mask8x32 ) Uint8x32
// SaturatedAdd adds corresponding elements of two vectors with saturation.
//
// Asm: VPADDSB, CPU Feature: AVX512EVEX
func ( x Uint8x64 ) SaturatedAddMasked ( y Uint8x64 , z Mask8x64 ) Uint8x64
// SaturatedAdd adds corresponding elements of two vectors with saturation.
//
// Asm: VPADDSW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) SaturatedAddMasked ( y Uint16x8 , z Mask16x8 ) Uint16x8
// SaturatedAdd adds corresponding elements of two vectors with saturation.
//
// Asm: VPADDSW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) SaturatedAddMasked ( y Uint16x16 , z Mask16x16 ) Uint16x16
// SaturatedAdd adds corresponding elements of two vectors with saturation.
//
// Asm: VPADDSW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) SaturatedAddMasked ( y Uint16x32 , z Mask16x32 ) Uint16x32
2025-06-24 15:21:29 +00:00
/* SaturatedPairDotProdAccumulate */
// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x.
//
// Asm: VPDPWSSDS, CPU Feature: AVX_VNNI
func ( x Int32x4 ) SaturatedPairDotProdAccumulate ( y Int16x8 , z Int16x8 ) Int32x4
// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x.
//
// Asm: VPDPWSSDS, CPU Feature: AVX_VNNI
func ( x Int32x8 ) SaturatedPairDotProdAccumulate ( y Int16x16 , z Int16x16 ) Int32x8
// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x.
//
// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX
func ( x Int32x16 ) SaturatedPairDotProdAccumulate ( y Int16x32 , z Int16x32 ) Int32x16
2025-07-08 18:18:55 +00:00
/* SaturatedPairDotProdAccumulateMasked */
// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x.
//
// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX
func ( x Int32x4 ) SaturatedPairDotProdAccumulateMasked ( y Int16x8 , z Int16x8 , u Mask32x4 ) Int32x4
// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x.
//
// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX
func ( x Int32x8 ) SaturatedPairDotProdAccumulateMasked ( y Int16x16 , z Int16x16 , u Mask32x8 ) Int32x8
// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x.
//
// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX
func ( x Int32x16 ) SaturatedPairDotProdAccumulateMasked ( y Int16x32 , z Int16x32 , u Mask32x16 ) Int32x16
2025-06-24 15:21:29 +00:00
/* SaturatedPairwiseAdd */
// SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation.
// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...].
//
// Asm: VPHADDSW, CPU Feature: AVX
func ( x Int16x8 ) SaturatedPairwiseAdd ( y Int16x8 ) Int16x8
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation.
// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...].
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPHADDSW, CPU Feature: AVX2
func ( x Int16x16 ) SaturatedPairwiseAdd ( y Int16x16 ) Int16x16
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
/* SaturatedPairwiseSub */
// SaturatedPairwiseSub horizontally subtracts adjacent pairs of elements with saturation.
// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...].
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPHSUBSW, CPU Feature: AVX
func ( x Int16x8 ) SaturatedPairwiseSub ( y Int16x8 ) Int16x8
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// SaturatedPairwiseSub horizontally subtracts adjacent pairs of elements with saturation.
// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...].
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPHSUBSW, CPU Feature: AVX2
func ( x Int16x16 ) SaturatedPairwiseSub ( y Int16x16 ) Int16x16
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
/* SaturatedSub */
// SaturatedSub subtracts corresponding elements of two vectors with saturation.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSUBSB, CPU Feature: AVX
func ( x Int8x16 ) SaturatedSub ( y Int8x16 ) Int8x16
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// SaturatedSub subtracts corresponding elements of two vectors with saturation.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSUBSB, CPU Feature: AVX2
func ( x Int8x32 ) SaturatedSub ( y Int8x32 ) Int8x32
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// SaturatedSub subtracts corresponding elements of two vectors with saturation.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSUBSB, CPU Feature: AVX512EVEX
func ( x Int8x64 ) SaturatedSub ( y Int8x64 ) Int8x64
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// SaturatedSub subtracts corresponding elements of two vectors with saturation.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSUBSW, CPU Feature: AVX
func ( x Int16x8 ) SaturatedSub ( y Int16x8 ) Int16x8
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// SaturatedSub subtracts corresponding elements of two vectors with saturation.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSUBSW, CPU Feature: AVX2
func ( x Int16x16 ) SaturatedSub ( y Int16x16 ) Int16x16
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// SaturatedSub subtracts corresponding elements of two vectors with saturation.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSUBSW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) SaturatedSub ( y Int16x32 ) Int16x32
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// SaturatedSub subtracts corresponding elements of two vectors with saturation.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSUBSB, CPU Feature: AVX
func ( x Uint8x16 ) SaturatedSub ( y Uint8x16 ) Uint8x16
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// SaturatedSub subtracts corresponding elements of two vectors with saturation.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSUBSB, CPU Feature: AVX2
func ( x Uint8x32 ) SaturatedSub ( y Uint8x32 ) Uint8x32
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// SaturatedSub subtracts corresponding elements of two vectors with saturation.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSUBSB, CPU Feature: AVX512EVEX
func ( x Uint8x64 ) SaturatedSub ( y Uint8x64 ) Uint8x64
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// SaturatedSub subtracts corresponding elements of two vectors with saturation.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSUBSW, CPU Feature: AVX
func ( x Uint16x8 ) SaturatedSub ( y Uint16x8 ) Uint16x8
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// SaturatedSub subtracts corresponding elements of two vectors with saturation.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSUBSW, CPU Feature: AVX2
func ( x Uint16x16 ) SaturatedSub ( y Uint16x16 ) Uint16x16
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// SaturatedSub subtracts corresponding elements of two vectors with saturation.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSUBSW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) SaturatedSub ( y Uint16x32 ) Uint16x32
2025-06-16 20:11:27 +00:00
2025-07-08 18:18:55 +00:00
/* SaturatedSubMasked */
// SaturatedSub subtracts corresponding elements of two vectors with saturation.
//
// Asm: VPSUBSB, CPU Feature: AVX512EVEX
func ( x Int8x16 ) SaturatedSubMasked ( y Int8x16 , z Mask8x16 ) Int8x16
// SaturatedSub subtracts corresponding elements of two vectors with saturation.
//
// Asm: VPSUBSB, CPU Feature: AVX512EVEX
func ( x Int8x32 ) SaturatedSubMasked ( y Int8x32 , z Mask8x32 ) Int8x32
// SaturatedSub subtracts corresponding elements of two vectors with saturation.
//
// Asm: VPSUBSB, CPU Feature: AVX512EVEX
func ( x Int8x64 ) SaturatedSubMasked ( y Int8x64 , z Mask8x64 ) Int8x64
// SaturatedSub subtracts corresponding elements of two vectors with saturation.
//
// Asm: VPSUBSW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) SaturatedSubMasked ( y Int16x8 , z Mask16x8 ) Int16x8
// SaturatedSub subtracts corresponding elements of two vectors with saturation.
//
// Asm: VPSUBSW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) SaturatedSubMasked ( y Int16x16 , z Mask16x16 ) Int16x16
// SaturatedSub subtracts corresponding elements of two vectors with saturation.
//
// Asm: VPSUBSW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) SaturatedSubMasked ( y Int16x32 , z Mask16x32 ) Int16x32
// SaturatedSub subtracts corresponding elements of two vectors with saturation.
//
// Asm: VPSUBSB, CPU Feature: AVX512EVEX
func ( x Uint8x16 ) SaturatedSubMasked ( y Uint8x16 , z Mask8x16 ) Uint8x16
// SaturatedSub subtracts corresponding elements of two vectors with saturation.
//
// Asm: VPSUBSB, CPU Feature: AVX512EVEX
func ( x Uint8x32 ) SaturatedSubMasked ( y Uint8x32 , z Mask8x32 ) Uint8x32
// SaturatedSub subtracts corresponding elements of two vectors with saturation.
//
// Asm: VPSUBSB, CPU Feature: AVX512EVEX
func ( x Uint8x64 ) SaturatedSubMasked ( y Uint8x64 , z Mask8x64 ) Uint8x64
// SaturatedSub subtracts corresponding elements of two vectors with saturation.
//
// Asm: VPSUBSW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) SaturatedSubMasked ( y Uint16x8 , z Mask16x8 ) Uint16x8
// SaturatedSub subtracts corresponding elements of two vectors with saturation.
//
// Asm: VPSUBSW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) SaturatedSubMasked ( y Uint16x16 , z Mask16x16 ) Uint16x16
// SaturatedSub subtracts corresponding elements of two vectors with saturation.
//
// Asm: VPSUBSW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) SaturatedSubMasked ( y Uint16x32 , z Mask16x32 ) Uint16x32
/* SaturatedUnsignedSignedPairDotProd */
// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation,
// yielding a vector of half as many elements with twice the input element size.
//
// Asm: VPMADDUBSW, CPU Feature: AVX
func ( x Uint8x16 ) SaturatedUnsignedSignedPairDotProd ( y Int8x16 ) Int16x8
// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation,
// yielding a vector of half as many elements with twice the input element size.
//
// Asm: VPMADDUBSW, CPU Feature: AVX2
func ( x Uint8x32 ) SaturatedUnsignedSignedPairDotProd ( y Int8x32 ) Int16x16
// SaturatedPairDotProdMasked multiplies the elements and add the pairs together with saturation,
// yielding a vector of half as many elements with twice the input element size.
//
// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX
func ( x Uint8x64 ) SaturatedUnsignedSignedPairDotProd ( y Int8x64 ) Int16x32
/* SaturatedUnsignedSignedPairDotProdMasked */
2025-06-24 15:21:29 +00:00
2025-07-08 18:18:55 +00:00
// SaturatedPairDotProdMasked multiplies the elements and add the pairs together with saturation,
2025-06-24 15:21:29 +00:00
// yielding a vector of half as many elements with twice the input element size.
2025-06-16 20:11:27 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX
func ( x Uint8x16 ) SaturatedUnsignedSignedPairDotProdMasked ( y Int8x16 , z Mask16x8 ) Int16x8
2025-06-16 20:11:27 +00:00
2025-07-08 18:18:55 +00:00
// SaturatedPairDotProdMasked multiplies the elements and add the pairs together with saturation,
2025-06-24 15:21:29 +00:00
// yielding a vector of half as many elements with twice the input element size.
2025-06-16 20:11:27 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX
func ( x Uint8x32 ) SaturatedUnsignedSignedPairDotProdMasked ( y Int8x32 , z Mask16x16 ) Int16x16
2025-06-16 20:11:27 +00:00
2025-07-08 18:18:55 +00:00
// SaturatedPairDotProdMasked multiplies the elements and add the pairs together with saturation,
2025-06-24 15:21:29 +00:00
// yielding a vector of half as many elements with twice the input element size.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint8x64 ) SaturatedUnsignedSignedPairDotProdMasked ( y Int8x64 , z Mask16x32 ) Int16x32
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
/* SaturatedUnsignedSignedQuadDotProdAccumulate */
// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI
func ( x Int32x4 ) SaturatedUnsignedSignedQuadDotProdAccumulate ( y Uint8x16 , z Int8x16 ) Int32x4
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI
func ( x Int32x8 ) SaturatedUnsignedSignedQuadDotProdAccumulate ( y Uint8x32 , z Int8x32 ) Int32x8
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX
func ( x Int32x16 ) SaturatedUnsignedSignedQuadDotProdAccumulate ( y Uint8x64 , z Int8x64 ) Int32x16
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI
func ( x Uint32x4 ) SaturatedUnsignedSignedQuadDotProdAccumulate ( y Uint8x16 , z Int8x16 ) Uint32x4
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI
func ( x Uint32x8 ) SaturatedUnsignedSignedQuadDotProdAccumulate ( y Uint8x32 , z Int8x32 ) Uint32x8
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) SaturatedUnsignedSignedQuadDotProdAccumulate ( y Uint8x64 , z Int8x64 ) Uint32x16
2025-06-16 20:11:27 +00:00
2025-07-08 18:18:55 +00:00
/* SaturatedUnsignedSignedQuadDotProdAccumulateMasked */
// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x.
//
// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX
func ( x Int32x4 ) SaturatedUnsignedSignedQuadDotProdAccumulateMasked ( y Uint8x16 , z Int8x16 , u Mask32x4 ) Int32x4
// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x.
//
// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX
func ( x Int32x8 ) SaturatedUnsignedSignedQuadDotProdAccumulateMasked ( y Uint8x32 , z Int8x32 , u Mask32x8 ) Int32x8
// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x.
//
// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX
func ( x Int32x16 ) SaturatedUnsignedSignedQuadDotProdAccumulateMasked ( y Uint8x64 , z Int8x64 , u Mask32x16 ) Int32x16
// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x.
//
// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) SaturatedUnsignedSignedQuadDotProdAccumulateMasked ( y Uint8x16 , z Int8x16 , u Mask32x4 ) Uint32x4
// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x.
//
// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) SaturatedUnsignedSignedQuadDotProdAccumulateMasked ( y Uint8x32 , z Int8x32 , u Mask32x8 ) Uint32x8
// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x.
//
// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) SaturatedUnsignedSignedQuadDotProdAccumulateMasked ( y Uint8x64 , z Int8x64 , u Mask32x16 ) Uint32x16
2025-06-25 16:06:00 -04:00
/* Set128 */
// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector.
//
// Asm: VINSERTF128, CPU Feature: AVX
func ( x Float32x8 ) Set128 ( imm uint8 , y Float32x4 ) Float32x8
// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector.
//
// Asm: VINSERTF128, CPU Feature: AVX
func ( x Float64x4 ) Set128 ( imm uint8 , y Float64x2 ) Float64x4
// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector.
//
// Asm: VINSERTI128, CPU Feature: AVX2
func ( x Int8x32 ) Set128 ( imm uint8 , y Int8x16 ) Int8x32
// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector.
//
// Asm: VINSERTI128, CPU Feature: AVX2
func ( x Int16x16 ) Set128 ( imm uint8 , y Int16x8 ) Int16x16
// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector.
//
// Asm: VINSERTI128, CPU Feature: AVX2
func ( x Int32x8 ) Set128 ( imm uint8 , y Int32x4 ) Int32x8
// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector.
//
// Asm: VINSERTI128, CPU Feature: AVX2
func ( x Int64x4 ) Set128 ( imm uint8 , y Int64x2 ) Int64x4
// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector.
//
// Asm: VINSERTI128, CPU Feature: AVX2
func ( x Uint8x32 ) Set128 ( imm uint8 , y Uint8x16 ) Uint8x32
// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector.
//
// Asm: VINSERTI128, CPU Feature: AVX2
func ( x Uint16x16 ) Set128 ( imm uint8 , y Uint16x8 ) Uint16x16
// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector.
//
// Asm: VINSERTI128, CPU Feature: AVX2
func ( x Uint32x8 ) Set128 ( imm uint8 , y Uint32x4 ) Uint32x8
// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector.
//
// Asm: VINSERTI128, CPU Feature: AVX2
func ( x Uint64x4 ) Set128 ( imm uint8 , y Uint64x2 ) Uint64x4
2025-06-24 15:21:29 +00:00
/* SetElem */
// SetElem sets a single constant-indexed element's value.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPINSRB, CPU Feature: AVX
func ( x Int8x16 ) SetElem ( imm uint8 , y int8 ) Int8x16
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// SetElem sets a single constant-indexed element's value.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPINSRW, CPU Feature: AVX
func ( x Int16x8 ) SetElem ( imm uint8 , y int16 ) Int16x8
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// SetElem sets a single constant-indexed element's value.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPINSRD, CPU Feature: AVX
func ( x Int32x4 ) SetElem ( imm uint8 , y int32 ) Int32x4
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// SetElem sets a single constant-indexed element's value.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPINSRQ, CPU Feature: AVX
func ( x Int64x2 ) SetElem ( imm uint8 , y int64 ) Int64x2
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// SetElem sets a single constant-indexed element's value.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPINSRB, CPU Feature: AVX
func ( x Uint8x16 ) SetElem ( imm uint8 , y uint8 ) Uint8x16
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// SetElem sets a single constant-indexed element's value.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPINSRW, CPU Feature: AVX
func ( x Uint16x8 ) SetElem ( imm uint8 , y uint16 ) Uint16x8
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// SetElem sets a single constant-indexed element's value.
//
// Asm: VPINSRD, CPU Feature: AVX
func ( x Uint32x4 ) SetElem ( imm uint8 , y uint32 ) Uint32x4
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// SetElem sets a single constant-indexed element's value.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPINSRQ, CPU Feature: AVX
func ( x Uint64x2 ) SetElem ( imm uint8 , y uint64 ) Uint64x2
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
/* ShiftAllLeft */
// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSLLW, CPU Feature: AVX
func ( x Int16x8 ) ShiftAllLeft ( y uint64 ) Int16x8
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed.
//
// Asm: VPSLLW, CPU Feature: AVX2
func ( x Int16x16 ) ShiftAllLeft ( y uint64 ) Int16x16
// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed.
//
// Asm: VPSLLD, CPU Feature: AVX
func ( x Int32x4 ) ShiftAllLeft ( y uint64 ) Int32x4
// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed.
//
// Asm: VPSLLD, CPU Feature: AVX2
func ( x Int32x8 ) ShiftAllLeft ( y uint64 ) Int32x8
// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed.
//
// Asm: VPSLLQ, CPU Feature: AVX
func ( x Int64x2 ) ShiftAllLeft ( y uint64 ) Int64x2
// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed.
//
// Asm: VPSLLQ, CPU Feature: AVX2
func ( x Int64x4 ) ShiftAllLeft ( y uint64 ) Int64x4
// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSLLQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) ShiftAllLeft ( y uint64 ) Int64x8
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSLLW, CPU Feature: AVX
func ( x Uint16x8 ) ShiftAllLeft ( y uint64 ) Uint16x8
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSLLW, CPU Feature: AVX2
func ( x Uint16x16 ) ShiftAllLeft ( y uint64 ) Uint16x16
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSLLD, CPU Feature: AVX
func ( x Uint32x4 ) ShiftAllLeft ( y uint64 ) Uint32x4
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSLLD, CPU Feature: AVX2
func ( x Uint32x8 ) ShiftAllLeft ( y uint64 ) Uint32x8
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSLLQ, CPU Feature: AVX
func ( x Uint64x2 ) ShiftAllLeft ( y uint64 ) Uint64x2
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSLLQ, CPU Feature: AVX2
func ( x Uint64x4 ) ShiftAllLeft ( y uint64 ) Uint64x4
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSLLQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) ShiftAllLeft ( y uint64 ) Uint64x8
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
/* ShiftAllLeftAndFillUpperFrom */
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHLDW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) ShiftAllLeftAndFillUpperFrom ( imm uint8 , y Int16x8 ) Int16x8
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHLDW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) ShiftAllLeftAndFillUpperFrom ( imm uint8 , y Int16x16 ) Int16x16
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHLDW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) ShiftAllLeftAndFillUpperFrom ( imm uint8 , y Int16x32 ) Int16x32
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHLDD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) ShiftAllLeftAndFillUpperFrom ( imm uint8 , y Int32x4 ) Int32x4
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHLDD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) ShiftAllLeftAndFillUpperFrom ( imm uint8 , y Int32x8 ) Int32x8
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHLDD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) ShiftAllLeftAndFillUpperFrom ( imm uint8 , y Int32x16 ) Int32x16
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHLDQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) ShiftAllLeftAndFillUpperFrom ( imm uint8 , y Int64x2 ) Int64x2
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHLDQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) ShiftAllLeftAndFillUpperFrom ( imm uint8 , y Int64x4 ) Int64x4
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHLDQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) ShiftAllLeftAndFillUpperFrom ( imm uint8 , y Int64x8 ) Int64x8
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHLDW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) ShiftAllLeftAndFillUpperFrom ( imm uint8 , y Uint16x8 ) Uint16x8
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHLDW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) ShiftAllLeftAndFillUpperFrom ( imm uint8 , y Uint16x16 ) Uint16x16
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHLDW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) ShiftAllLeftAndFillUpperFrom ( imm uint8 , y Uint16x32 ) Uint16x32
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHLDD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) ShiftAllLeftAndFillUpperFrom ( imm uint8 , y Uint32x4 ) Uint32x4
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHLDD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) ShiftAllLeftAndFillUpperFrom ( imm uint8 , y Uint32x8 ) Uint32x8
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHLDD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) ShiftAllLeftAndFillUpperFrom ( imm uint8 , y Uint32x16 ) Uint32x16
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHLDQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) ShiftAllLeftAndFillUpperFrom ( imm uint8 , y Uint64x2 ) Uint64x2
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) ShiftAllLeftAndFillUpperFrom ( imm uint8 , y Uint64x4 ) Uint64x4
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHLDQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) ShiftAllLeftAndFillUpperFrom ( imm uint8 , y Uint64x8 ) Uint64x8
2025-06-16 20:11:27 +00:00
2025-07-08 18:18:55 +00:00
/* ShiftAllLeftAndFillUpperFromMasked */
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) ShiftAllLeftAndFillUpperFromMasked ( imm uint8 , y Int16x8 , z Mask16x8 ) Int16x8
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) ShiftAllLeftAndFillUpperFromMasked ( imm uint8 , y Int16x16 , z Mask16x16 ) Int16x16
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) ShiftAllLeftAndFillUpperFromMasked ( imm uint8 , y Int16x32 , z Mask16x32 ) Int16x32
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) ShiftAllLeftAndFillUpperFromMasked ( imm uint8 , y Int32x4 , z Mask32x4 ) Int32x4
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) ShiftAllLeftAndFillUpperFromMasked ( imm uint8 , y Int32x8 , z Mask32x8 ) Int32x8
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) ShiftAllLeftAndFillUpperFromMasked ( imm uint8 , y Int32x16 , z Mask32x16 ) Int32x16
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) ShiftAllLeftAndFillUpperFromMasked ( imm uint8 , y Int64x2 , z Mask64x2 ) Int64x2
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) ShiftAllLeftAndFillUpperFromMasked ( imm uint8 , y Int64x4 , z Mask64x4 ) Int64x4
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) ShiftAllLeftAndFillUpperFromMasked ( imm uint8 , y Int64x8 , z Mask64x8 ) Int64x8
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) ShiftAllLeftAndFillUpperFromMasked ( imm uint8 , y Uint16x8 , z Mask16x8 ) Uint16x8
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) ShiftAllLeftAndFillUpperFromMasked ( imm uint8 , y Uint16x16 , z Mask16x16 ) Uint16x16
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) ShiftAllLeftAndFillUpperFromMasked ( imm uint8 , y Uint16x32 , z Mask16x32 ) Uint16x32
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) ShiftAllLeftAndFillUpperFromMasked ( imm uint8 , y Uint32x4 , z Mask32x4 ) Uint32x4
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) ShiftAllLeftAndFillUpperFromMasked ( imm uint8 , y Uint32x8 , z Mask32x8 ) Uint32x8
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) ShiftAllLeftAndFillUpperFromMasked ( imm uint8 , y Uint32x16 , z Mask32x16 ) Uint32x16
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) ShiftAllLeftAndFillUpperFromMasked ( imm uint8 , y Uint64x2 , z Mask64x2 ) Uint64x2
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) ShiftAllLeftAndFillUpperFromMasked ( imm uint8 , y Uint64x4 , z Mask64x4 ) Uint64x4
// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) ShiftAllLeftAndFillUpperFromMasked ( imm uint8 , y Uint64x8 , z Mask64x8 ) Uint64x8
/* ShiftAllLeftMasked */
// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed.
//
// Asm: VPSLLQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) ShiftAllLeftMasked ( y uint64 , z Mask64x2 ) Int64x2
// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed.
//
// Asm: VPSLLQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) ShiftAllLeftMasked ( y uint64 , z Mask64x4 ) Int64x4
// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed.
//
// Asm: VPSLLQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) ShiftAllLeftMasked ( y uint64 , z Mask64x8 ) Int64x8
// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed.
//
// Asm: VPSLLQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) ShiftAllLeftMasked ( y uint64 , z Mask64x2 ) Uint64x2
// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed.
//
// Asm: VPSLLQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) ShiftAllLeftMasked ( y uint64 , z Mask64x4 ) Uint64x4
// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed.
//
// Asm: VPSLLQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) ShiftAllLeftMasked ( y uint64 , z Mask64x8 ) Uint64x8
2025-06-24 15:21:29 +00:00
/* ShiftAllRight */
// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRLW, CPU Feature: AVX
func ( x Int16x8 ) ShiftAllRight ( y uint64 ) Int16x8
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRLW, CPU Feature: AVX2
func ( x Int16x16 ) ShiftAllRight ( y uint64 ) Int16x16
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed.
//
// Asm: VPSRLD, CPU Feature: AVX
func ( x Int32x4 ) ShiftAllRight ( y uint64 ) Int32x4
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRLD, CPU Feature: AVX2
func ( x Int32x8 ) ShiftAllRight ( y uint64 ) Int32x8
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRLQ, CPU Feature: AVX
func ( x Int64x2 ) ShiftAllRight ( y uint64 ) Int64x2
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRLQ, CPU Feature: AVX2
func ( x Int64x4 ) ShiftAllRight ( y uint64 ) Int64x4
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed.
//
// Asm: VPSRLQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) ShiftAllRight ( y uint64 ) Int64x8
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRLW, CPU Feature: AVX
func ( x Uint16x8 ) ShiftAllRight ( y uint64 ) Uint16x8
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRLW, CPU Feature: AVX2
func ( x Uint16x16 ) ShiftAllRight ( y uint64 ) Uint16x16
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRLD, CPU Feature: AVX
func ( x Uint32x4 ) ShiftAllRight ( y uint64 ) Uint32x4
2025-06-12 16:21:35 +00:00
2025-07-08 18:18:55 +00:00
// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed.
//
// Asm: VPSRLD, CPU Feature: AVX2
func ( x Uint32x8 ) ShiftAllRight ( y uint64 ) Uint32x8
// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed.
//
// Asm: VPSRLQ, CPU Feature: AVX
func ( x Uint64x2 ) ShiftAllRight ( y uint64 ) Uint64x2
// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed.
//
// Asm: VPSRLQ, CPU Feature: AVX2
func ( x Uint64x4 ) ShiftAllRight ( y uint64 ) Uint64x4
// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed.
//
// Asm: VPSRLQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) ShiftAllRight ( y uint64 ) Uint64x8
/* ShiftAllRightAndFillUpperFrom */
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) ShiftAllRightAndFillUpperFrom ( imm uint8 , y Int16x8 ) Int16x8
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) ShiftAllRightAndFillUpperFrom ( imm uint8 , y Int16x16 ) Int16x16
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) ShiftAllRightAndFillUpperFrom ( imm uint8 , y Int16x32 ) Int16x32
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) ShiftAllRightAndFillUpperFrom ( imm uint8 , y Int32x4 ) Int32x4
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) ShiftAllRightAndFillUpperFrom ( imm uint8 , y Int32x8 ) Int32x8
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) ShiftAllRightAndFillUpperFrom ( imm uint8 , y Int32x16 ) Int32x16
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) ShiftAllRightAndFillUpperFrom ( imm uint8 , y Int64x2 ) Int64x2
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) ShiftAllRightAndFillUpperFrom ( imm uint8 , y Int64x4 ) Int64x4
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) ShiftAllRightAndFillUpperFrom ( imm uint8 , y Int64x8 ) Int64x8
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) ShiftAllRightAndFillUpperFrom ( imm uint8 , y Uint16x8 ) Uint16x8
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) ShiftAllRightAndFillUpperFrom ( imm uint8 , y Uint16x16 ) Uint16x16
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) ShiftAllRightAndFillUpperFrom ( imm uint8 , y Uint16x32 ) Uint16x32
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) ShiftAllRightAndFillUpperFrom ( imm uint8 , y Uint32x4 ) Uint32x4
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) ShiftAllRightAndFillUpperFrom ( imm uint8 , y Uint32x8 ) Uint32x8
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPSHRDD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) ShiftAllRightAndFillUpperFrom ( imm uint8 , y Uint32x16 ) Uint32x16
2025-06-12 16:21:35 +00:00
2025-07-08 18:18:55 +00:00
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPSHRDQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) ShiftAllRightAndFillUpperFrom ( imm uint8 , y Uint64x2 ) Uint64x2
2025-06-12 16:21:35 +00:00
2025-07-08 18:18:55 +00:00
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPSHRDQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) ShiftAllRightAndFillUpperFrom ( imm uint8 , y Uint64x4 ) Uint64x4
2025-06-12 16:21:35 +00:00
2025-07-08 18:18:55 +00:00
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPSHRDQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) ShiftAllRightAndFillUpperFrom ( imm uint8 , y Uint64x8 ) Uint64x8
2025-06-12 16:21:35 +00:00
2025-07-08 18:18:55 +00:00
/* ShiftAllRightAndFillUpperFromMasked */
2025-06-24 15:21:29 +00:00
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDW, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int16x8 ) ShiftAllRightAndFillUpperFromMasked ( imm uint8 , y Int16x8 , z Mask16x8 ) Int16x8
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDW, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int16x16 ) ShiftAllRightAndFillUpperFromMasked ( imm uint8 , y Int16x16 , z Mask16x16 ) Int16x16
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDW, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int16x32 ) ShiftAllRightAndFillUpperFromMasked ( imm uint8 , y Int16x32 , z Mask16x32 ) Int16x32
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDD, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int32x4 ) ShiftAllRightAndFillUpperFromMasked ( imm uint8 , y Int32x4 , z Mask32x4 ) Int32x4
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDD, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int32x8 ) ShiftAllRightAndFillUpperFromMasked ( imm uint8 , y Int32x8 , z Mask32x8 ) Int32x8
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDD, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int32x16 ) ShiftAllRightAndFillUpperFromMasked ( imm uint8 , y Int32x16 , z Mask32x16 ) Int32x16
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDQ, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int64x2 ) ShiftAllRightAndFillUpperFromMasked ( imm uint8 , y Int64x2 , z Mask64x2 ) Int64x2
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDQ, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int64x4 ) ShiftAllRightAndFillUpperFromMasked ( imm uint8 , y Int64x4 , z Mask64x4 ) Int64x4
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDQ, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Int64x8 ) ShiftAllRightAndFillUpperFromMasked ( imm uint8 , y Int64x8 , z Mask64x8 ) Int64x8
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDW, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint16x8 ) ShiftAllRightAndFillUpperFromMasked ( imm uint8 , y Uint16x8 , z Mask16x8 ) Uint16x8
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDW, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint16x16 ) ShiftAllRightAndFillUpperFromMasked ( imm uint8 , y Uint16x16 , z Mask16x16 ) Uint16x16
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDW, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint16x32 ) ShiftAllRightAndFillUpperFromMasked ( imm uint8 , y Uint16x32 , z Mask16x32 ) Uint16x32
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDD, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint32x4 ) ShiftAllRightAndFillUpperFromMasked ( imm uint8 , y Uint32x4 , z Mask32x4 ) Uint32x4
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDD, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint32x8 ) ShiftAllRightAndFillUpperFromMasked ( imm uint8 , y Uint32x8 , z Mask32x8 ) Uint32x8
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDD, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint32x16 ) ShiftAllRightAndFillUpperFromMasked ( imm uint8 , y Uint32x16 , z Mask32x16 ) Uint32x16
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDQ, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint64x2 ) ShiftAllRightAndFillUpperFromMasked ( imm uint8 , y Uint64x2 , z Mask64x2 ) Uint64x2
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDQ, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint64x4 ) ShiftAllRightAndFillUpperFromMasked ( imm uint8 , y Uint64x4 , z Mask64x4 ) Uint64x4
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDQ, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint64x8 ) ShiftAllRightAndFillUpperFromMasked ( imm uint8 , y Uint64x8 , z Mask64x8 ) Uint64x8
/* ShiftAllRightMasked */
// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed.
//
// Asm: VPSRLQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) ShiftAllRightMasked ( y uint64 , z Mask64x2 ) Int64x2
// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed.
//
// Asm: VPSRLQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) ShiftAllRightMasked ( y uint64 , z Mask64x4 ) Int64x4
// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed.
//
// Asm: VPSRLQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) ShiftAllRightMasked ( y uint64 , z Mask64x8 ) Int64x8
// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed.
//
// Asm: VPSRLQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) ShiftAllRightMasked ( y uint64 , z Mask64x2 ) Uint64x2
// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed.
//
// Asm: VPSRLQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) ShiftAllRightMasked ( y uint64 , z Mask64x4 ) Uint64x4
// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed.
//
// Asm: VPSRLQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) ShiftAllRightMasked ( y uint64 , z Mask64x8 ) Uint64x8
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
/* ShiftAllRightSignExtended */
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRAW, CPU Feature: AVX
func ( x Int16x8 ) ShiftAllRightSignExtended ( y uint64 ) Int16x8
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRAW, CPU Feature: AVX2
func ( x Int16x16 ) ShiftAllRightSignExtended ( y uint64 ) Int16x16
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRAD, CPU Feature: AVX
func ( x Int32x4 ) ShiftAllRightSignExtended ( y uint64 ) Int32x4
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRAD, CPU Feature: AVX2
func ( x Int32x8 ) ShiftAllRightSignExtended ( y uint64 ) Int32x8
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRAQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) ShiftAllRightSignExtended ( y uint64 ) Int64x2
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRAQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) ShiftAllRightSignExtended ( y uint64 ) Int64x4
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRAQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) ShiftAllRightSignExtended ( y uint64 ) Int64x8
2025-06-16 20:11:27 +00:00
2025-07-08 18:18:55 +00:00
/* ShiftAllRightSignExtendedMasked */
// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit.
//
// Asm: VPSRAQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) ShiftAllRightSignExtendedMasked ( y uint64 , z Mask64x2 ) Int64x2
// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit.
//
// Asm: VPSRAQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) ShiftAllRightSignExtendedMasked ( y uint64 , z Mask64x4 ) Int64x4
// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit.
//
// Asm: VPSRAQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) ShiftAllRightSignExtendedMasked ( y uint64 , z Mask64x8 ) Int64x8
2025-06-24 15:21:29 +00:00
/* ShiftLeft */
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSLLVW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) ShiftLeft ( y Int16x8 ) Int16x8
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSLLVW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) ShiftLeft ( y Int16x16 ) Int16x16
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
2025-06-16 20:11:27 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSLLVW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) ShiftLeft ( y Int16x32 ) Int16x32
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSLLVD, CPU Feature: AVX2
func ( x Int32x4 ) ShiftLeft ( y Int32x4 ) Int32x4
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSLLVD, CPU Feature: AVX2
func ( x Int32x8 ) ShiftLeft ( y Int32x8 ) Int32x8
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSLLVD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) ShiftLeft ( y Int32x16 ) Int32x16
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSLLVQ, CPU Feature: AVX2
func ( x Int64x2 ) ShiftLeft ( y Int64x2 ) Int64x2
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSLLVQ, CPU Feature: AVX2
func ( x Int64x4 ) ShiftLeft ( y Int64x4 ) Int64x4
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSLLVQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) ShiftLeft ( y Int64x8 ) Int64x8
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSLLVW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) ShiftLeft ( y Uint16x8 ) Uint16x8
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSLLVW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) ShiftLeft ( y Uint16x16 ) Uint16x16
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
//
// Asm: VPSLLVW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) ShiftLeft ( y Uint16x32 ) Uint16x32
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSLLVD, CPU Feature: AVX2
func ( x Uint32x4 ) ShiftLeft ( y Uint32x4 ) Uint32x4
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSLLVD, CPU Feature: AVX2
func ( x Uint32x8 ) ShiftLeft ( y Uint32x8 ) Uint32x8
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSLLVD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) ShiftLeft ( y Uint32x16 ) Uint32x16
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSLLVQ, CPU Feature: AVX2
func ( x Uint64x2 ) ShiftLeft ( y Uint64x2 ) Uint64x2
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSLLVQ, CPU Feature: AVX2
func ( x Uint64x4 ) ShiftLeft ( y Uint64x4 ) Uint64x4
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSLLVQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) ShiftLeft ( y Uint64x8 ) Uint64x8
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
/* ShiftLeftAndFillUpperFrom */
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHLDVW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) ShiftLeftAndFillUpperFrom ( y Int16x8 , z Int16x8 ) Int16x8
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHLDVW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) ShiftLeftAndFillUpperFrom ( y Int16x16 , z Int16x16 ) Int16x16
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHLDVW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) ShiftLeftAndFillUpperFrom ( y Int16x32 , z Int16x32 ) Int16x32
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHLDVD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) ShiftLeftAndFillUpperFrom ( y Int32x4 , z Int32x4 ) Int32x4
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDVD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) ShiftLeftAndFillUpperFrom ( y Int32x8 , z Int32x8 ) Int32x8
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHLDVD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) ShiftLeftAndFillUpperFrom ( y Int32x16 , z Int32x16 ) Int32x16
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) ShiftLeftAndFillUpperFrom ( y Int64x2 , z Int64x2 ) Int64x2
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) ShiftLeftAndFillUpperFrom ( y Int64x4 , z Int64x4 ) Int64x4
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) ShiftLeftAndFillUpperFrom ( y Int64x8 , z Int64x8 ) Int64x8
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHLDVW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) ShiftLeftAndFillUpperFrom ( y Uint16x8 , z Uint16x8 ) Uint16x8
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHLDVW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) ShiftLeftAndFillUpperFrom ( y Uint16x16 , z Uint16x16 ) Uint16x16
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDVW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) ShiftLeftAndFillUpperFrom ( y Uint16x32 , z Uint16x32 ) Uint16x32
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHLDVD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) ShiftLeftAndFillUpperFrom ( y Uint32x4 , z Uint32x4 ) Uint32x4
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHLDVD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) ShiftLeftAndFillUpperFrom ( y Uint32x8 , z Uint32x8 ) Uint32x8
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHLDVD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) ShiftLeftAndFillUpperFrom ( y Uint32x16 , z Uint32x16 ) Uint32x16
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) ShiftLeftAndFillUpperFrom ( y Uint64x2 , z Uint64x2 ) Uint64x2
2025-06-12 16:21:35 +00:00
2025-07-08 18:18:55 +00:00
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) ShiftLeftAndFillUpperFrom ( y Uint64x4 , z Uint64x4 ) Uint64x4
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) ShiftLeftAndFillUpperFrom ( y Uint64x8 , z Uint64x8 ) Uint64x8
/* ShiftLeftAndFillUpperFromMasked */
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDVW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) ShiftLeftAndFillUpperFromMasked ( y Int16x8 , z Int16x8 , u Mask16x8 ) Int16x8
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDVW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) ShiftLeftAndFillUpperFromMasked ( y Int16x16 , z Int16x16 , u Mask16x16 ) Int16x16
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDVW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) ShiftLeftAndFillUpperFromMasked ( y Int16x32 , z Int16x32 , u Mask16x32 ) Int16x32
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDVD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) ShiftLeftAndFillUpperFromMasked ( y Int32x4 , z Int32x4 , u Mask32x4 ) Int32x4
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDVD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) ShiftLeftAndFillUpperFromMasked ( y Int32x8 , z Int32x8 , u Mask32x8 ) Int32x8
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDVD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) ShiftLeftAndFillUpperFromMasked ( y Int32x16 , z Int32x16 , u Mask32x16 ) Int32x16
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) ShiftLeftAndFillUpperFromMasked ( y Int64x2 , z Int64x2 , u Mask64x2 ) Int64x2
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) ShiftLeftAndFillUpperFromMasked ( y Int64x4 , z Int64x4 , u Mask64x4 ) Int64x4
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) ShiftLeftAndFillUpperFromMasked ( y Int64x8 , z Int64x8 , u Mask64x8 ) Int64x8
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDVW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) ShiftLeftAndFillUpperFromMasked ( y Uint16x8 , z Uint16x8 , u Mask16x8 ) Uint16x8
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDVW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) ShiftLeftAndFillUpperFromMasked ( y Uint16x16 , z Uint16x16 , u Mask16x16 ) Uint16x16
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDVW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) ShiftLeftAndFillUpperFromMasked ( y Uint16x32 , z Uint16x32 , u Mask16x32 ) Uint16x32
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDVD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) ShiftLeftAndFillUpperFromMasked ( y Uint32x4 , z Uint32x4 , u Mask32x4 ) Uint32x4
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDVD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) ShiftLeftAndFillUpperFromMasked ( y Uint32x8 , z Uint32x8 , u Mask32x8 ) Uint32x8
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDVD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) ShiftLeftAndFillUpperFromMasked ( y Uint32x16 , z Uint32x16 , u Mask32x16 ) Uint32x16
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) ShiftLeftAndFillUpperFromMasked ( y Uint64x2 , z Uint64x2 , u Mask64x2 ) Uint64x2
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) ShiftLeftAndFillUpperFromMasked ( y Uint64x4 , z Uint64x4 , u Mask64x4 ) Uint64x4
// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x.
//
// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) ShiftLeftAndFillUpperFromMasked ( y Uint64x8 , z Uint64x8 , u Mask64x8 ) Uint64x8
/* ShiftLeftMasked */
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
//
// Asm: VPSLLVW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) ShiftLeftMasked ( y Int16x8 , z Mask16x8 ) Int16x8
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
//
// Asm: VPSLLVW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) ShiftLeftMasked ( y Int16x16 , z Mask16x16 ) Int16x16
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
//
// Asm: VPSLLVW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) ShiftLeftMasked ( y Int16x32 , z Mask16x32 ) Int16x32
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
//
// Asm: VPSLLVD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) ShiftLeftMasked ( y Int32x4 , z Mask32x4 ) Int32x4
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
//
// Asm: VPSLLVD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) ShiftLeftMasked ( y Int32x8 , z Mask32x8 ) Int32x8
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
//
// Asm: VPSLLVD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) ShiftLeftMasked ( y Int32x16 , z Mask32x16 ) Int32x16
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
//
// Asm: VPSLLVQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) ShiftLeftMasked ( y Int64x2 , z Mask64x2 ) Int64x2
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
//
// Asm: VPSLLVQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) ShiftLeftMasked ( y Int64x4 , z Mask64x4 ) Int64x4
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
//
// Asm: VPSLLVQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) ShiftLeftMasked ( y Int64x8 , z Mask64x8 ) Int64x8
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
//
// Asm: VPSLLVW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) ShiftLeftMasked ( y Uint16x8 , z Mask16x8 ) Uint16x8
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
//
// Asm: VPSLLVW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) ShiftLeftMasked ( y Uint16x16 , z Mask16x16 ) Uint16x16
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
//
// Asm: VPSLLVW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) ShiftLeftMasked ( y Uint16x32 , z Mask16x32 ) Uint16x32
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
//
// Asm: VPSLLVD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) ShiftLeftMasked ( y Uint32x4 , z Mask32x4 ) Uint32x4
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
//
// Asm: VPSLLVD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) ShiftLeftMasked ( y Uint32x8 , z Mask32x8 ) Uint32x8
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
//
// Asm: VPSLLVD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) ShiftLeftMasked ( y Uint32x16 , z Mask32x16 ) Uint32x16
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
//
// Asm: VPSLLVQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) ShiftLeftMasked ( y Uint64x2 , z Mask64x2 ) Uint64x2
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPSLLVQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) ShiftLeftMasked ( y Uint64x4 , z Mask64x4 ) Uint64x4
2025-06-12 16:21:35 +00:00
2025-07-08 18:18:55 +00:00
// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPSLLVQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) ShiftLeftMasked ( y Uint64x8 , z Mask64x8 ) Uint64x8
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
/* ShiftRight */
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRLVW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) ShiftRight ( y Int16x8 ) Int16x8
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRLVW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) ShiftRight ( y Int16x16 ) Int16x16
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRLVW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) ShiftRight ( y Int16x32 ) Int16x32
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRLVD, CPU Feature: AVX2
func ( x Int32x4 ) ShiftRight ( y Int32x4 ) Int32x4
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRLVD, CPU Feature: AVX2
func ( x Int32x8 ) ShiftRight ( y Int32x8 ) Int32x8
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRLVD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) ShiftRight ( y Int32x16 ) Int32x16
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRLVQ, CPU Feature: AVX2
func ( x Int64x2 ) ShiftRight ( y Int64x2 ) Int64x2
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRLVQ, CPU Feature: AVX2
func ( x Int64x4 ) ShiftRight ( y Int64x4 ) Int64x4
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRLVQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) ShiftRight ( y Int64x8 ) Int64x8
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRLVW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) ShiftRight ( y Uint16x8 ) Uint16x8
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRLVW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) ShiftRight ( y Uint16x16 ) Uint16x16
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRLVW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) ShiftRight ( y Uint16x32 ) Uint16x32
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRLVD, CPU Feature: AVX2
func ( x Uint32x4 ) ShiftRight ( y Uint32x4 ) Uint32x4
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRLVD, CPU Feature: AVX2
func ( x Uint32x8 ) ShiftRight ( y Uint32x8 ) Uint32x8
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRLVD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) ShiftRight ( y Uint32x16 ) Uint32x16
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
//
// Asm: VPSRLVQ, CPU Feature: AVX2
func ( x Uint64x2 ) ShiftRight ( y Uint64x2 ) Uint64x2
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRLVQ, CPU Feature: AVX2
func ( x Uint64x4 ) ShiftRight ( y Uint64x4 ) Uint64x4
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRLVQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) ShiftRight ( y Uint64x8 ) Uint64x8
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
/* ShiftRightAndFillUpperFrom */
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDVW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) ShiftRightAndFillUpperFrom ( y Int16x8 , z Int16x8 ) Int16x8
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDVW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) ShiftRightAndFillUpperFrom ( y Int16x16 , z Int16x16 ) Int16x16
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDVW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) ShiftRightAndFillUpperFrom ( y Int16x32 , z Int16x32 ) Int16x32
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDVD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) ShiftRightAndFillUpperFrom ( y Int32x4 , z Int32x4 ) Int32x4
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDVD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) ShiftRightAndFillUpperFrom ( y Int32x8 , z Int32x8 ) Int32x8
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDVD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) ShiftRightAndFillUpperFrom ( y Int32x16 , z Int32x16 ) Int32x16
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) ShiftRightAndFillUpperFrom ( y Int64x2 , z Int64x2 ) Int64x2
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) ShiftRightAndFillUpperFrom ( y Int64x4 , z Int64x4 ) Int64x4
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) ShiftRightAndFillUpperFrom ( y Int64x8 , z Int64x8 ) Int64x8
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDVW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) ShiftRightAndFillUpperFrom ( y Uint16x8 , z Uint16x8 ) Uint16x8
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDVW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) ShiftRightAndFillUpperFrom ( y Uint16x16 , z Uint16x16 ) Uint16x16
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDVW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) ShiftRightAndFillUpperFrom ( y Uint16x32 , z Uint16x32 ) Uint16x32
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDVD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) ShiftRightAndFillUpperFrom ( y Uint32x4 , z Uint32x4 ) Uint32x4
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDVD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) ShiftRightAndFillUpperFrom ( y Uint32x8 , z Uint32x8 ) Uint32x8
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDVD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) ShiftRightAndFillUpperFrom ( y Uint32x16 , z Uint32x16 ) Uint32x16
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) ShiftRightAndFillUpperFrom ( y Uint64x2 , z Uint64x2 ) Uint64x2
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) ShiftRightAndFillUpperFrom ( y Uint64x4 , z Uint64x4 ) Uint64x4
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) ShiftRightAndFillUpperFrom ( y Uint64x8 , z Uint64x8 ) Uint64x8
2025-06-12 16:21:35 +00:00
2025-07-08 18:18:55 +00:00
/* ShiftRightAndFillUpperFromMasked */
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDVW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) ShiftRightAndFillUpperFromMasked ( y Int16x8 , z Int16x8 , u Mask16x8 ) Int16x8
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDVW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) ShiftRightAndFillUpperFromMasked ( y Int16x16 , z Int16x16 , u Mask16x16 ) Int16x16
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDVW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) ShiftRightAndFillUpperFromMasked ( y Int16x32 , z Int16x32 , u Mask16x32 ) Int16x32
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDVD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) ShiftRightAndFillUpperFromMasked ( y Int32x4 , z Int32x4 , u Mask32x4 ) Int32x4
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDVD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) ShiftRightAndFillUpperFromMasked ( y Int32x8 , z Int32x8 , u Mask32x8 ) Int32x8
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDVD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) ShiftRightAndFillUpperFromMasked ( y Int32x16 , z Int32x16 , u Mask32x16 ) Int32x16
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) ShiftRightAndFillUpperFromMasked ( y Int64x2 , z Int64x2 , u Mask64x2 ) Int64x2
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) ShiftRightAndFillUpperFromMasked ( y Int64x4 , z Int64x4 , u Mask64x4 ) Int64x4
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) ShiftRightAndFillUpperFromMasked ( y Int64x8 , z Int64x8 , u Mask64x8 ) Int64x8
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDVW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) ShiftRightAndFillUpperFromMasked ( y Uint16x8 , z Uint16x8 , u Mask16x8 ) Uint16x8
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDVW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) ShiftRightAndFillUpperFromMasked ( y Uint16x16 , z Uint16x16 , u Mask16x16 ) Uint16x16
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDVW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) ShiftRightAndFillUpperFromMasked ( y Uint16x32 , z Uint16x32 , u Mask16x32 ) Uint16x32
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDVD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) ShiftRightAndFillUpperFromMasked ( y Uint32x4 , z Uint32x4 , u Mask32x4 ) Uint32x4
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDVD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) ShiftRightAndFillUpperFromMasked ( y Uint32x8 , z Uint32x8 , u Mask32x8 ) Uint32x8
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDVD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) ShiftRightAndFillUpperFromMasked ( y Uint32x16 , z Uint32x16 , u Mask32x16 ) Uint32x16
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) ShiftRightAndFillUpperFromMasked ( y Uint64x2 , z Uint64x2 , u Mask64x2 ) Uint64x2
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) ShiftRightAndFillUpperFromMasked ( y Uint64x4 , z Uint64x4 , u Mask64x4 ) Uint64x4
// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the
// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x.
//
// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) ShiftRightAndFillUpperFromMasked ( y Uint64x8 , z Uint64x8 , u Mask64x8 ) Uint64x8
/* ShiftRightMasked */
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
//
// Asm: VPSRLVW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) ShiftRightMasked ( y Int16x8 , z Mask16x8 ) Int16x8
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
//
// Asm: VPSRLVW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) ShiftRightMasked ( y Int16x16 , z Mask16x16 ) Int16x16
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
//
// Asm: VPSRLVW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) ShiftRightMasked ( y Int16x32 , z Mask16x32 ) Int16x32
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
//
// Asm: VPSRLVD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) ShiftRightMasked ( y Int32x4 , z Mask32x4 ) Int32x4
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
//
// Asm: VPSRLVD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) ShiftRightMasked ( y Int32x8 , z Mask32x8 ) Int32x8
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
//
// Asm: VPSRLVD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) ShiftRightMasked ( y Int32x16 , z Mask32x16 ) Int32x16
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
//
// Asm: VPSRLVQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) ShiftRightMasked ( y Int64x2 , z Mask64x2 ) Int64x2
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
//
// Asm: VPSRLVQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) ShiftRightMasked ( y Int64x4 , z Mask64x4 ) Int64x4
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
//
// Asm: VPSRLVQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) ShiftRightMasked ( y Int64x8 , z Mask64x8 ) Int64x8
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
//
// Asm: VPSRLVW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) ShiftRightMasked ( y Uint16x8 , z Mask16x8 ) Uint16x8
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
//
// Asm: VPSRLVW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) ShiftRightMasked ( y Uint16x16 , z Mask16x16 ) Uint16x16
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
//
// Asm: VPSRLVW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) ShiftRightMasked ( y Uint16x32 , z Mask16x32 ) Uint16x32
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
//
// Asm: VPSRLVD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) ShiftRightMasked ( y Uint32x4 , z Mask32x4 ) Uint32x4
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
//
// Asm: VPSRLVD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) ShiftRightMasked ( y Uint32x8 , z Mask32x8 ) Uint32x8
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
//
// Asm: VPSRLVD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) ShiftRightMasked ( y Uint32x16 , z Mask32x16 ) Uint32x16
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
//
// Asm: VPSRLVQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) ShiftRightMasked ( y Uint64x2 , z Mask64x2 ) Uint64x2
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
//
// Asm: VPSRLVQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) ShiftRightMasked ( y Uint64x4 , z Mask64x4 ) Uint64x4
// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed.
//
// Asm: VPSRLVQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) ShiftRightMasked ( y Uint64x8 , z Mask64x8 ) Uint64x8
2025-06-24 15:21:29 +00:00
/* ShiftRightSignExtended */
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRAVW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) ShiftRightSignExtended ( y Int16x8 ) Int16x8
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
//
// Asm: VPSRAVW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) ShiftRightSignExtended ( y Int16x16 ) Int16x16
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRAVW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) ShiftRightSignExtended ( y Int16x32 ) Int16x32
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRAVD, CPU Feature: AVX2
func ( x Int32x4 ) ShiftRightSignExtended ( y Int32x4 ) Int32x4
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRAVD, CPU Feature: AVX2
func ( x Int32x8 ) ShiftRightSignExtended ( y Int32x8 ) Int32x8
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRAVD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) ShiftRightSignExtended ( y Int32x16 ) Int32x16
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRAVQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) ShiftRightSignExtended ( y Int64x2 ) Int64x2
2025-06-12 16:21:35 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
2025-06-12 16:42:02 +00:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRAVQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) ShiftRightSignExtended ( y Int64x4 ) Int64x4
2025-06-16 20:11:27 +00:00
2025-06-24 15:21:29 +00:00
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
//
// Asm: VPSRAVQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) ShiftRightSignExtended ( y Int64x8 ) Int64x8
2025-06-20 15:30:55 -04:00
2025-06-24 15:21:29 +00:00
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
2025-06-20 15:30:55 -04:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRAVW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) ShiftRightSignExtended ( y Uint16x8 ) Uint16x8
2025-06-20 15:30:55 -04:00
2025-06-24 15:21:29 +00:00
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
2025-06-20 15:30:55 -04:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRAVW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) ShiftRightSignExtended ( y Uint16x16 ) Uint16x16
2025-06-20 15:30:55 -04:00
2025-06-24 15:21:29 +00:00
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
2025-06-20 15:30:55 -04:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRAVW, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint16x32 ) ShiftRightSignExtended ( y Uint16x32 ) Uint16x32
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
//
// Asm: VPSRAVD, CPU Feature: AVX2
func ( x Uint32x4 ) ShiftRightSignExtended ( y Uint32x4 ) Uint32x4
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
//
// Asm: VPSRAVD, CPU Feature: AVX2
func ( x Uint32x8 ) ShiftRightSignExtended ( y Uint32x8 ) Uint32x8
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
//
// Asm: VPSRAVD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) ShiftRightSignExtended ( y Uint32x16 ) Uint32x16
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
//
// Asm: VPSRAVQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) ShiftRightSignExtended ( y Uint64x2 ) Uint64x2
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
//
// Asm: VPSRAVQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) ShiftRightSignExtended ( y Uint64x4 ) Uint64x4
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
//
// Asm: VPSRAVQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) ShiftRightSignExtended ( y Uint64x8 ) Uint64x8
/* ShiftRightSignExtendedMasked */
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
//
// Asm: VPSRAVW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) ShiftRightSignExtendedMasked ( y Int16x8 , z Mask16x8 ) Int16x8
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
//
// Asm: VPSRAVW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) ShiftRightSignExtendedMasked ( y Int16x16 , z Mask16x16 ) Int16x16
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
//
// Asm: VPSRAVW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) ShiftRightSignExtendedMasked ( y Int16x32 , z Mask16x32 ) Int16x32
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
//
// Asm: VPSRAVD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) ShiftRightSignExtendedMasked ( y Int32x4 , z Mask32x4 ) Int32x4
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
//
// Asm: VPSRAVD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) ShiftRightSignExtendedMasked ( y Int32x8 , z Mask32x8 ) Int32x8
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
//
// Asm: VPSRAVD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) ShiftRightSignExtendedMasked ( y Int32x16 , z Mask32x16 ) Int32x16
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
//
// Asm: VPSRAVQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) ShiftRightSignExtendedMasked ( y Int64x2 , z Mask64x2 ) Int64x2
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
//
// Asm: VPSRAVQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) ShiftRightSignExtendedMasked ( y Int64x4 , z Mask64x4 ) Int64x4
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
//
// Asm: VPSRAVQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) ShiftRightSignExtendedMasked ( y Int64x8 , z Mask64x8 ) Int64x8
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
//
// Asm: VPSRAVW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) ShiftRightSignExtendedMasked ( y Uint16x8 , z Mask16x8 ) Uint16x8
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
//
// Asm: VPSRAVW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) ShiftRightSignExtendedMasked ( y Uint16x16 , z Mask16x16 ) Uint16x16
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
//
// Asm: VPSRAVW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) ShiftRightSignExtendedMasked ( y Uint16x32 , z Mask16x32 ) Uint16x32
2025-06-20 15:30:55 -04:00
2025-06-24 15:21:29 +00:00
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
2025-06-20 15:30:55 -04:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPSRAVD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) ShiftRightSignExtendedMasked ( y Uint32x4 , z Mask32x4 ) Uint32x4
2025-06-20 15:30:55 -04:00
2025-06-24 15:21:29 +00:00
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
2025-06-20 15:30:55 -04:00
//
2025-07-08 18:18:55 +00:00
// Asm: VPSRAVD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) ShiftRightSignExtendedMasked ( y Uint32x8 , z Mask32x8 ) Uint32x8
2025-06-20 15:30:55 -04:00
2025-06-24 15:21:29 +00:00
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
2025-06-20 15:30:55 -04:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRAVD, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint32x16 ) ShiftRightSignExtendedMasked ( y Uint32x16 , z Mask32x16 ) Uint32x16
2025-06-20 15:30:55 -04:00
2025-06-24 15:21:29 +00:00
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
2025-06-20 15:30:55 -04:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRAVQ, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint64x2 ) ShiftRightSignExtendedMasked ( y Uint64x2 , z Mask64x2 ) Uint64x2
2025-06-20 15:30:55 -04:00
2025-06-24 15:21:29 +00:00
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
2025-06-20 15:30:55 -04:00
//
2025-06-24 15:21:29 +00:00
// Asm: VPSRAVQ, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint64x4 ) ShiftRightSignExtendedMasked ( y Uint64x4 , z Mask64x4 ) Uint64x4
2025-06-24 15:21:29 +00:00
// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit.
//
// Asm: VPSRAVQ, CPU Feature: AVX512EVEX
2025-07-08 18:18:55 +00:00
func ( x Uint64x8 ) ShiftRightSignExtendedMasked ( y Uint64x8 , z Mask64x8 ) Uint64x8
2025-06-20 15:30:55 -04:00
2025-06-16 20:11:27 +00:00
/* Sign */
// Sign returns the product of the first operand with -1, 0, or 1,
// whichever constant is nearest to the value of the second operand.
//
// Asm: VPSIGNB, CPU Feature: AVX
func ( x Int8x16 ) Sign ( y Int8x16 ) Int8x16
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sign returns the product of the first operand with -1, 0, or 1,
// whichever constant is nearest to the value of the second operand.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPSIGNB, CPU Feature: AVX2
func ( x Int8x32 ) Sign ( y Int8x32 ) Int8x32
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sign returns the product of the first operand with -1, 0, or 1,
// whichever constant is nearest to the value of the second operand.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPSIGNW, CPU Feature: AVX
func ( x Int16x8 ) Sign ( y Int16x8 ) Int16x8
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sign returns the product of the first operand with -1, 0, or 1,
// whichever constant is nearest to the value of the second operand.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPSIGNW, CPU Feature: AVX2
func ( x Int16x16 ) Sign ( y Int16x16 ) Int16x16
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sign returns the product of the first operand with -1, 0, or 1,
// whichever constant is nearest to the value of the second operand.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPSIGND, CPU Feature: AVX
func ( x Int32x4 ) Sign ( y Int32x4 ) Int32x4
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sign returns the product of the first operand with -1, 0, or 1,
// whichever constant is nearest to the value of the second operand.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPSIGND, CPU Feature: AVX2
func ( x Int32x8 ) Sign ( y Int32x8 ) Int32x8
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
/* Sqrt */
// Sqrt computes the square root of each element.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VSQRTPS, CPU Feature: AVX
func ( x Float32x4 ) Sqrt ( ) Float32x4
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sqrt computes the square root of each element.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VSQRTPS, CPU Feature: AVX
func ( x Float32x8 ) Sqrt ( ) Float32x8
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sqrt computes the square root of each element.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VSQRTPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) Sqrt ( ) Float32x16
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sqrt computes the square root of each element.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VSQRTPD, CPU Feature: AVX
func ( x Float64x2 ) Sqrt ( ) Float64x2
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sqrt computes the square root of each element.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VSQRTPD, CPU Feature: AVX
func ( x Float64x4 ) Sqrt ( ) Float64x4
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sqrt computes the square root of each element.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VSQRTPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) Sqrt ( ) Float64x8
2025-06-12 16:21:35 +00:00
2025-07-08 18:18:55 +00:00
/* SqrtMasked */
// Sqrt computes the square root of each element.
//
// Asm: VSQRTPS, CPU Feature: AVX512EVEX
func ( x Float32x4 ) SqrtMasked ( y Mask32x4 ) Float32x4
// Sqrt computes the square root of each element.
//
// Asm: VSQRTPS, CPU Feature: AVX512EVEX
func ( x Float32x8 ) SqrtMasked ( y Mask32x8 ) Float32x8
// Sqrt computes the square root of each element.
//
// Asm: VSQRTPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) SqrtMasked ( y Mask32x16 ) Float32x16
// Sqrt computes the square root of each element.
//
// Asm: VSQRTPD, CPU Feature: AVX512EVEX
func ( x Float64x2 ) SqrtMasked ( y Mask64x2 ) Float64x2
// Sqrt computes the square root of each element.
//
// Asm: VSQRTPD, CPU Feature: AVX512EVEX
func ( x Float64x4 ) SqrtMasked ( y Mask64x4 ) Float64x4
// Sqrt computes the square root of each element.
//
// Asm: VSQRTPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) SqrtMasked ( y Mask64x8 ) Float64x8
2025-06-16 20:11:27 +00:00
/* Sub */
// Sub subtracts corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VSUBPS, CPU Feature: AVX
func ( x Float32x4 ) Sub ( y Float32x4 ) Float32x4
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sub subtracts corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VSUBPS, CPU Feature: AVX
func ( x Float32x8 ) Sub ( y Float32x8 ) Float32x8
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sub subtracts corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VSUBPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) Sub ( y Float32x16 ) Float32x16
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sub subtracts corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VSUBPD, CPU Feature: AVX
func ( x Float64x2 ) Sub ( y Float64x2 ) Float64x2
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sub subtracts corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VSUBPD, CPU Feature: AVX
func ( x Float64x4 ) Sub ( y Float64x4 ) Float64x4
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sub subtracts corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VSUBPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) Sub ( y Float64x8 ) Float64x8
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sub subtracts corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPSUBB, CPU Feature: AVX
func ( x Int8x16 ) Sub ( y Int8x16 ) Int8x16
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sub subtracts corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPSUBB, CPU Feature: AVX2
func ( x Int8x32 ) Sub ( y Int8x32 ) Int8x32
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sub subtracts corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPSUBB, CPU Feature: AVX512EVEX
func ( x Int8x64 ) Sub ( y Int8x64 ) Int8x64
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sub subtracts corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPSUBW, CPU Feature: AVX
func ( x Int16x8 ) Sub ( y Int16x8 ) Int16x8
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sub subtracts corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPSUBW, CPU Feature: AVX2
func ( x Int16x16 ) Sub ( y Int16x16 ) Int16x16
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sub subtracts corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPSUBW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) Sub ( y Int16x32 ) Int16x32
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sub subtracts corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPSUBD, CPU Feature: AVX
func ( x Int32x4 ) Sub ( y Int32x4 ) Int32x4
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sub subtracts corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPSUBD, CPU Feature: AVX2
func ( x Int32x8 ) Sub ( y Int32x8 ) Int32x8
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sub subtracts corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPSUBD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) Sub ( y Int32x16 ) Int32x16
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sub subtracts corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPSUBQ, CPU Feature: AVX
func ( x Int64x2 ) Sub ( y Int64x2 ) Int64x2
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sub subtracts corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPSUBQ, CPU Feature: AVX2
func ( x Int64x4 ) Sub ( y Int64x4 ) Int64x4
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sub subtracts corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPSUBQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) Sub ( y Int64x8 ) Int64x8
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sub subtracts corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPSUBB, CPU Feature: AVX
func ( x Uint8x16 ) Sub ( y Uint8x16 ) Uint8x16
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sub subtracts corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPSUBB, CPU Feature: AVX2
func ( x Uint8x32 ) Sub ( y Uint8x32 ) Uint8x32
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sub subtracts corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPSUBB, CPU Feature: AVX512EVEX
func ( x Uint8x64 ) Sub ( y Uint8x64 ) Uint8x64
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sub subtracts corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPSUBW, CPU Feature: AVX
func ( x Uint16x8 ) Sub ( y Uint16x8 ) Uint16x8
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sub subtracts corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPSUBW, CPU Feature: AVX2
func ( x Uint16x16 ) Sub ( y Uint16x16 ) Uint16x16
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sub subtracts corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPSUBW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) Sub ( y Uint16x32 ) Uint16x32
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sub subtracts corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPSUBD, CPU Feature: AVX
func ( x Uint32x4 ) Sub ( y Uint32x4 ) Uint32x4
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sub subtracts corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPSUBD, CPU Feature: AVX2
func ( x Uint32x8 ) Sub ( y Uint32x8 ) Uint32x8
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sub subtracts corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPSUBD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) Sub ( y Uint32x16 ) Uint32x16
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sub subtracts corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPSUBQ, CPU Feature: AVX
func ( x Uint64x2 ) Sub ( y Uint64x2 ) Uint64x2
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sub subtracts corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPSUBQ, CPU Feature: AVX2
func ( x Uint64x4 ) Sub ( y Uint64x4 ) Uint64x4
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Sub subtracts corresponding elements of two vectors.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VPSUBQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) Sub ( y Uint64x8 ) Uint64x8
2025-06-12 16:21:35 +00:00
2025-07-08 18:18:55 +00:00
/* SubMasked */
// Sub subtracts corresponding elements of two vectors.
//
// Asm: VSUBPS, CPU Feature: AVX512EVEX
func ( x Float32x4 ) SubMasked ( y Float32x4 , z Mask32x4 ) Float32x4
// Sub subtracts corresponding elements of two vectors.
//
// Asm: VSUBPS, CPU Feature: AVX512EVEX
func ( x Float32x8 ) SubMasked ( y Float32x8 , z Mask32x8 ) Float32x8
// Sub subtracts corresponding elements of two vectors.
//
// Asm: VSUBPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) SubMasked ( y Float32x16 , z Mask32x16 ) Float32x16
// Sub subtracts corresponding elements of two vectors.
//
// Asm: VSUBPD, CPU Feature: AVX512EVEX
func ( x Float64x2 ) SubMasked ( y Float64x2 , z Mask64x2 ) Float64x2
// Sub subtracts corresponding elements of two vectors.
//
// Asm: VSUBPD, CPU Feature: AVX512EVEX
func ( x Float64x4 ) SubMasked ( y Float64x4 , z Mask64x4 ) Float64x4
// Sub subtracts corresponding elements of two vectors.
//
// Asm: VSUBPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) SubMasked ( y Float64x8 , z Mask64x8 ) Float64x8
// Sub subtracts corresponding elements of two vectors.
//
// Asm: VPSUBB, CPU Feature: AVX512EVEX
func ( x Int8x16 ) SubMasked ( y Int8x16 , z Mask8x16 ) Int8x16
// Sub subtracts corresponding elements of two vectors.
//
// Asm: VPSUBB, CPU Feature: AVX512EVEX
func ( x Int8x32 ) SubMasked ( y Int8x32 , z Mask8x32 ) Int8x32
// Sub subtracts corresponding elements of two vectors.
//
// Asm: VPSUBB, CPU Feature: AVX512EVEX
func ( x Int8x64 ) SubMasked ( y Int8x64 , z Mask8x64 ) Int8x64
// Sub subtracts corresponding elements of two vectors.
//
// Asm: VPSUBW, CPU Feature: AVX512EVEX
func ( x Int16x8 ) SubMasked ( y Int16x8 , z Mask16x8 ) Int16x8
// Sub subtracts corresponding elements of two vectors.
//
// Asm: VPSUBW, CPU Feature: AVX512EVEX
func ( x Int16x16 ) SubMasked ( y Int16x16 , z Mask16x16 ) Int16x16
// Sub subtracts corresponding elements of two vectors.
//
// Asm: VPSUBW, CPU Feature: AVX512EVEX
func ( x Int16x32 ) SubMasked ( y Int16x32 , z Mask16x32 ) Int16x32
// Sub subtracts corresponding elements of two vectors.
//
// Asm: VPSUBD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) SubMasked ( y Int32x4 , z Mask32x4 ) Int32x4
// Sub subtracts corresponding elements of two vectors.
//
// Asm: VPSUBD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) SubMasked ( y Int32x8 , z Mask32x8 ) Int32x8
// Sub subtracts corresponding elements of two vectors.
//
// Asm: VPSUBD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) SubMasked ( y Int32x16 , z Mask32x16 ) Int32x16
// Sub subtracts corresponding elements of two vectors.
//
// Asm: VPSUBQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) SubMasked ( y Int64x2 , z Mask64x2 ) Int64x2
// Sub subtracts corresponding elements of two vectors.
//
// Asm: VPSUBQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) SubMasked ( y Int64x4 , z Mask64x4 ) Int64x4
// Sub subtracts corresponding elements of two vectors.
//
// Asm: VPSUBQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) SubMasked ( y Int64x8 , z Mask64x8 ) Int64x8
// Sub subtracts corresponding elements of two vectors.
//
// Asm: VPSUBB, CPU Feature: AVX512EVEX
func ( x Uint8x16 ) SubMasked ( y Uint8x16 , z Mask8x16 ) Uint8x16
// Sub subtracts corresponding elements of two vectors.
//
// Asm: VPSUBB, CPU Feature: AVX512EVEX
func ( x Uint8x32 ) SubMasked ( y Uint8x32 , z Mask8x32 ) Uint8x32
// Sub subtracts corresponding elements of two vectors.
//
// Asm: VPSUBB, CPU Feature: AVX512EVEX
func ( x Uint8x64 ) SubMasked ( y Uint8x64 , z Mask8x64 ) Uint8x64
// Sub subtracts corresponding elements of two vectors.
//
// Asm: VPSUBW, CPU Feature: AVX512EVEX
func ( x Uint16x8 ) SubMasked ( y Uint16x8 , z Mask16x8 ) Uint16x8
// Sub subtracts corresponding elements of two vectors.
//
// Asm: VPSUBW, CPU Feature: AVX512EVEX
func ( x Uint16x16 ) SubMasked ( y Uint16x16 , z Mask16x16 ) Uint16x16
// Sub subtracts corresponding elements of two vectors.
//
// Asm: VPSUBW, CPU Feature: AVX512EVEX
func ( x Uint16x32 ) SubMasked ( y Uint16x32 , z Mask16x32 ) Uint16x32
// Sub subtracts corresponding elements of two vectors.
//
// Asm: VPSUBD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) SubMasked ( y Uint32x4 , z Mask32x4 ) Uint32x4
// Sub subtracts corresponding elements of two vectors.
//
// Asm: VPSUBD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) SubMasked ( y Uint32x8 , z Mask32x8 ) Uint32x8
// Sub subtracts corresponding elements of two vectors.
//
// Asm: VPSUBD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) SubMasked ( y Uint32x16 , z Mask32x16 ) Uint32x16
// Sub subtracts corresponding elements of two vectors.
//
// Asm: VPSUBQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) SubMasked ( y Uint64x2 , z Mask64x2 ) Uint64x2
// Sub subtracts corresponding elements of two vectors.
//
// Asm: VPSUBQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) SubMasked ( y Uint64x4 , z Mask64x4 ) Uint64x4
// Sub subtracts corresponding elements of two vectors.
//
// Asm: VPSUBQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) SubMasked ( y Uint64x8 , z Mask64x8 ) Uint64x8
2025-06-16 20:11:27 +00:00
/* Trunc */
// Trunc truncates elements towards zero.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VROUNDPS, CPU Feature: AVX
func ( x Float32x4 ) Trunc ( ) Float32x4
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Trunc truncates elements towards zero.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VROUNDPS, CPU Feature: AVX
func ( x Float32x8 ) Trunc ( ) Float32x8
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Trunc truncates elements towards zero.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VROUNDPD, CPU Feature: AVX
func ( x Float64x2 ) Trunc ( ) Float64x2
2025-06-12 16:21:35 +00:00
2025-06-16 20:11:27 +00:00
// Trunc truncates elements towards zero.
2025-06-12 16:42:02 +00:00
//
2025-06-16 20:11:27 +00:00
// Asm: VROUNDPD, CPU Feature: AVX
func ( x Float64x4 ) Trunc ( ) Float64x4
/* TruncWithPrecision */
2025-06-12 16:21:35 +00:00
2025-06-12 16:42:02 +00:00
// TruncWithPrecision truncates elements with specified precision.
//
2025-06-12 16:21:35 +00:00
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float32x4 ) TruncWithPrecision ( imm uint8 ) Float32x4
2025-06-12 16:21:35 +00:00
2025-06-12 16:42:02 +00:00
// TruncWithPrecision truncates elements with specified precision.
//
2025-06-12 16:21:35 +00:00
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float32x8 ) TruncWithPrecision ( imm uint8 ) Float32x8
2025-06-12 16:21:35 +00:00
2025-06-12 16:42:02 +00:00
// TruncWithPrecision truncates elements with specified precision.
//
2025-06-12 16:21:35 +00:00
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float32x16 ) TruncWithPrecision ( imm uint8 ) Float32x16
2025-06-12 16:21:35 +00:00
2025-06-12 16:42:02 +00:00
// TruncWithPrecision truncates elements with specified precision.
//
2025-06-12 16:21:35 +00:00
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float64x2 ) TruncWithPrecision ( imm uint8 ) Float64x2
2025-06-12 16:21:35 +00:00
2025-06-12 16:42:02 +00:00
// TruncWithPrecision truncates elements with specified precision.
//
2025-06-12 16:21:35 +00:00
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float64x4 ) TruncWithPrecision ( imm uint8 ) Float64x4
2025-06-12 16:21:35 +00:00
2025-06-12 16:42:02 +00:00
// TruncWithPrecision truncates elements with specified precision.
//
2025-06-12 16:21:35 +00:00
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
2025-06-28 10:20:53 -04:00
func ( x Float64x8 ) TruncWithPrecision ( imm uint8 ) Float64x8
2025-06-16 20:11:27 +00:00
2025-07-08 18:18:55 +00:00
/* TruncWithPrecisionMasked */
// TruncWithPrecision truncates elements with specified precision.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func ( x Float32x4 ) TruncWithPrecisionMasked ( imm uint8 , y Mask32x4 ) Float32x4
// TruncWithPrecision truncates elements with specified precision.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func ( x Float32x8 ) TruncWithPrecisionMasked ( imm uint8 , y Mask32x8 ) Float32x8
// TruncWithPrecision truncates elements with specified precision.
//
// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX
func ( x Float32x16 ) TruncWithPrecisionMasked ( imm uint8 , y Mask32x16 ) Float32x16
// TruncWithPrecision truncates elements with specified precision.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func ( x Float64x2 ) TruncWithPrecisionMasked ( imm uint8 , y Mask64x2 ) Float64x2
// TruncWithPrecision truncates elements with specified precision.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func ( x Float64x4 ) TruncWithPrecisionMasked ( imm uint8 , y Mask64x4 ) Float64x4
// TruncWithPrecision truncates elements with specified precision.
//
// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX
func ( x Float64x8 ) TruncWithPrecisionMasked ( imm uint8 , y Mask64x8 ) Float64x8
2025-06-16 20:11:27 +00:00
/* UnsignedSignedQuadDotProdAccumulate */
// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x.
//
// Asm: VPDPBUSD, CPU Feature: AVX_VNNI
2025-06-16 22:53:36 +00:00
func ( x Int32x4 ) UnsignedSignedQuadDotProdAccumulate ( y Uint8x16 , z Int8x16 ) Int32x4
2025-06-16 20:11:27 +00:00
// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x.
//
// Asm: VPDPBUSD, CPU Feature: AVX_VNNI
2025-06-16 22:53:36 +00:00
func ( x Int32x8 ) UnsignedSignedQuadDotProdAccumulate ( y Uint8x32 , z Int8x32 ) Int32x8
2025-06-16 20:11:27 +00:00
// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x.
//
// Asm: VPDPBUSD, CPU Feature: AVX512EVEX
2025-06-16 22:53:36 +00:00
func ( x Int32x16 ) UnsignedSignedQuadDotProdAccumulate ( y Uint8x64 , z Int8x64 ) Int32x16
2025-06-16 20:11:27 +00:00
// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x.
//
// Asm: VPDPBUSD, CPU Feature: AVX_VNNI
2025-06-16 22:53:36 +00:00
func ( x Uint32x4 ) UnsignedSignedQuadDotProdAccumulate ( y Uint8x16 , z Int8x16 ) Uint32x4
2025-06-16 20:11:27 +00:00
// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x.
//
// Asm: VPDPBUSD, CPU Feature: AVX_VNNI
2025-06-16 22:53:36 +00:00
func ( x Uint32x8 ) UnsignedSignedQuadDotProdAccumulate ( y Uint8x32 , z Int8x32 ) Uint32x8
2025-06-16 20:11:27 +00:00
// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x.
//
// Asm: VPDPBUSD, CPU Feature: AVX512EVEX
2025-06-16 22:53:36 +00:00
func ( x Uint32x16 ) UnsignedSignedQuadDotProdAccumulate ( y Uint8x64 , z Int8x64 ) Uint32x16
2025-06-16 20:11:27 +00:00
2025-07-08 18:18:55 +00:00
/* UnsignedSignedQuadDotProdAccumulateMasked */
// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x.
//
// Asm: VPDPBUSD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) UnsignedSignedQuadDotProdAccumulateMasked ( y Uint8x16 , z Int8x16 , u Mask32x4 ) Int32x4
// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x.
//
// Asm: VPDPBUSD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) UnsignedSignedQuadDotProdAccumulateMasked ( y Uint8x32 , z Int8x32 , u Mask32x8 ) Int32x8
// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x.
//
// Asm: VPDPBUSD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) UnsignedSignedQuadDotProdAccumulateMasked ( y Uint8x64 , z Int8x64 , u Mask32x16 ) Int32x16
// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x.
//
// Asm: VPDPBUSD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) UnsignedSignedQuadDotProdAccumulateMasked ( y Uint8x16 , z Int8x16 , u Mask32x4 ) Uint32x4
// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x.
//
// Asm: VPDPBUSD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) UnsignedSignedQuadDotProdAccumulateMasked ( y Uint8x32 , z Int8x32 , u Mask32x8 ) Uint32x8
// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x.
//
// Asm: VPDPBUSD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) UnsignedSignedQuadDotProdAccumulateMasked ( y Uint8x64 , z Int8x64 , u Mask32x16 ) Uint32x16
2025-06-16 20:11:27 +00:00
/* Xor */
// Xor performs a bitwise XOR operation between two vectors.
//
// Asm: VPXOR, CPU Feature: AVX
func ( x Int8x16 ) Xor ( y Int8x16 ) Int8x16
// Xor performs a bitwise XOR operation between two vectors.
//
// Asm: VPXOR, CPU Feature: AVX2
func ( x Int8x32 ) Xor ( y Int8x32 ) Int8x32
// Xor performs a bitwise XOR operation between two vectors.
//
// Asm: VPXOR, CPU Feature: AVX
func ( x Int16x8 ) Xor ( y Int16x8 ) Int16x8
// Xor performs a bitwise XOR operation between two vectors.
//
// Asm: VPXOR, CPU Feature: AVX2
func ( x Int16x16 ) Xor ( y Int16x16 ) Int16x16
// Xor performs a bitwise XOR operation between two vectors.
//
// Asm: VPXOR, CPU Feature: AVX
func ( x Int32x4 ) Xor ( y Int32x4 ) Int32x4
// Xor performs a bitwise XOR operation between two vectors.
//
// Asm: VPXOR, CPU Feature: AVX2
func ( x Int32x8 ) Xor ( y Int32x8 ) Int32x8
// Xor performs a masked bitwise XOR operation between two vectors.
//
// Asm: VPXORD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) Xor ( y Int32x16 ) Int32x16
// Xor performs a bitwise XOR operation between two vectors.
//
// Asm: VPXOR, CPU Feature: AVX
func ( x Int64x2 ) Xor ( y Int64x2 ) Int64x2
// Xor performs a bitwise XOR operation between two vectors.
//
// Asm: VPXOR, CPU Feature: AVX2
func ( x Int64x4 ) Xor ( y Int64x4 ) Int64x4
// Xor performs a masked bitwise XOR operation between two vectors.
//
// Asm: VPXORQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) Xor ( y Int64x8 ) Int64x8
// Xor performs a bitwise XOR operation between two vectors.
//
// Asm: VPXOR, CPU Feature: AVX
func ( x Uint8x16 ) Xor ( y Uint8x16 ) Uint8x16
// Xor performs a bitwise XOR operation between two vectors.
//
// Asm: VPXOR, CPU Feature: AVX2
func ( x Uint8x32 ) Xor ( y Uint8x32 ) Uint8x32
// Xor performs a bitwise XOR operation between two vectors.
//
// Asm: VPXOR, CPU Feature: AVX
func ( x Uint16x8 ) Xor ( y Uint16x8 ) Uint16x8
// Xor performs a bitwise XOR operation between two vectors.
//
// Asm: VPXOR, CPU Feature: AVX2
func ( x Uint16x16 ) Xor ( y Uint16x16 ) Uint16x16
// Xor performs a bitwise XOR operation between two vectors.
//
// Asm: VPXOR, CPU Feature: AVX
func ( x Uint32x4 ) Xor ( y Uint32x4 ) Uint32x4
// Xor performs a bitwise XOR operation between two vectors.
//
// Asm: VPXOR, CPU Feature: AVX2
func ( x Uint32x8 ) Xor ( y Uint32x8 ) Uint32x8
// Xor performs a masked bitwise XOR operation between two vectors.
//
// Asm: VPXORD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) Xor ( y Uint32x16 ) Uint32x16
// Xor performs a bitwise XOR operation between two vectors.
//
// Asm: VPXOR, CPU Feature: AVX
func ( x Uint64x2 ) Xor ( y Uint64x2 ) Uint64x2
// Xor performs a bitwise XOR operation between two vectors.
//
// Asm: VPXOR, CPU Feature: AVX2
func ( x Uint64x4 ) Xor ( y Uint64x4 ) Uint64x4
// Xor performs a masked bitwise XOR operation between two vectors.
//
// Asm: VPXORQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) Xor ( y Uint64x8 ) Uint64x8
2025-06-12 16:21:35 +00:00
2025-07-08 18:18:55 +00:00
/* XorMasked */
// Xor performs a masked bitwise XOR operation between two vectors.
//
// Asm: VPXORD, CPU Feature: AVX512EVEX
func ( x Int32x4 ) XorMasked ( y Int32x4 , z Mask32x4 ) Int32x4
// Xor performs a masked bitwise XOR operation between two vectors.
//
// Asm: VPXORD, CPU Feature: AVX512EVEX
func ( x Int32x8 ) XorMasked ( y Int32x8 , z Mask32x8 ) Int32x8
// Xor performs a masked bitwise XOR operation between two vectors.
//
// Asm: VPXORD, CPU Feature: AVX512EVEX
func ( x Int32x16 ) XorMasked ( y Int32x16 , z Mask32x16 ) Int32x16
// Xor performs a masked bitwise XOR operation between two vectors.
//
// Asm: VPXORQ, CPU Feature: AVX512EVEX
func ( x Int64x2 ) XorMasked ( y Int64x2 , z Mask64x2 ) Int64x2
// Xor performs a masked bitwise XOR operation between two vectors.
//
// Asm: VPXORQ, CPU Feature: AVX512EVEX
func ( x Int64x4 ) XorMasked ( y Int64x4 , z Mask64x4 ) Int64x4
// Xor performs a masked bitwise XOR operation between two vectors.
//
// Asm: VPXORQ, CPU Feature: AVX512EVEX
func ( x Int64x8 ) XorMasked ( y Int64x8 , z Mask64x8 ) Int64x8
// Xor performs a masked bitwise XOR operation between two vectors.
//
// Asm: VPXORD, CPU Feature: AVX512EVEX
func ( x Uint32x4 ) XorMasked ( y Uint32x4 , z Mask32x4 ) Uint32x4
// Xor performs a masked bitwise XOR operation between two vectors.
//
// Asm: VPXORD, CPU Feature: AVX512EVEX
func ( x Uint32x8 ) XorMasked ( y Uint32x8 , z Mask32x8 ) Uint32x8
// Xor performs a masked bitwise XOR operation between two vectors.
//
// Asm: VPXORD, CPU Feature: AVX512EVEX
func ( x Uint32x16 ) XorMasked ( y Uint32x16 , z Mask32x16 ) Uint32x16
// Xor performs a masked bitwise XOR operation between two vectors.
//
// Asm: VPXORQ, CPU Feature: AVX512EVEX
func ( x Uint64x2 ) XorMasked ( y Uint64x2 , z Mask64x2 ) Uint64x2
// Xor performs a masked bitwise XOR operation between two vectors.
//
// Asm: VPXORQ, CPU Feature: AVX512EVEX
func ( x Uint64x4 ) XorMasked ( y Uint64x4 , z Mask64x4 ) Uint64x4
// Xor performs a masked bitwise XOR operation between two vectors.
//
// Asm: VPXORQ, CPU Feature: AVX512EVEX
func ( x Uint64x8 ) XorMasked ( y Uint64x8 , z Mask64x8 ) Uint64x8
2025-06-12 03:54:34 +00:00
// Float64x2 converts from Float32x4 to Float64x2
func ( from Float32x4 ) AsFloat64x2 ( ) ( to Float64x2 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int8x16 converts from Float32x4 to Int8x16
func ( from Float32x4 ) AsInt8x16 ( ) ( to Int8x16 )
2025-06-12 03:54:34 +00:00
// Int16x8 converts from Float32x4 to Int16x8
func ( from Float32x4 ) AsInt16x8 ( ) ( to Int16x8 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Int32x4 converts from Float32x4 to Int32x4
func ( from Float32x4 ) AsInt32x4 ( ) ( to Int32x4 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Int64x2 converts from Float32x4 to Int64x2
func ( from Float32x4 ) AsInt64x2 ( ) ( to Int64x2 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint8x16 converts from Float32x4 to Uint8x16
func ( from Float32x4 ) AsUint8x16 ( ) ( to Uint8x16 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint16x8 converts from Float32x4 to Uint16x8
func ( from Float32x4 ) AsUint16x8 ( ) ( to Uint16x8 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint32x4 converts from Float32x4 to Uint32x4
func ( from Float32x4 ) AsUint32x4 ( ) ( to Uint32x4 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint64x2 converts from Float32x4 to Uint64x2
func ( from Float32x4 ) AsUint64x2 ( ) ( to Uint64x2 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Float64x4 converts from Float32x8 to Float64x4
func ( from Float32x8 ) AsFloat64x4 ( ) ( to Float64x4 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int8x32 converts from Float32x8 to Int8x32
func ( from Float32x8 ) AsInt8x32 ( ) ( to Int8x32 )
2025-06-12 03:54:34 +00:00
// Int16x16 converts from Float32x8 to Int16x16
func ( from Float32x8 ) AsInt16x16 ( ) ( to Int16x16 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Int32x8 converts from Float32x8 to Int32x8
func ( from Float32x8 ) AsInt32x8 ( ) ( to Int32x8 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Int64x4 converts from Float32x8 to Int64x4
func ( from Float32x8 ) AsInt64x4 ( ) ( to Int64x4 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint8x32 converts from Float32x8 to Uint8x32
func ( from Float32x8 ) AsUint8x32 ( ) ( to Uint8x32 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint16x16 converts from Float32x8 to Uint16x16
func ( from Float32x8 ) AsUint16x16 ( ) ( to Uint16x16 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint32x8 converts from Float32x8 to Uint32x8
func ( from Float32x8 ) AsUint32x8 ( ) ( to Uint32x8 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint64x4 converts from Float32x8 to Uint64x4
func ( from Float32x8 ) AsUint64x4 ( ) ( to Uint64x4 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Float64x8 converts from Float32x16 to Float64x8
func ( from Float32x16 ) AsFloat64x8 ( ) ( to Float64x8 )
// Int8x64 converts from Float32x16 to Int8x64
func ( from Float32x16 ) AsInt8x64 ( ) ( to Int8x64 )
// Int16x32 converts from Float32x16 to Int16x32
func ( from Float32x16 ) AsInt16x32 ( ) ( to Int16x32 )
// Int32x16 converts from Float32x16 to Int32x16
func ( from Float32x16 ) AsInt32x16 ( ) ( to Int32x16 )
// Int64x8 converts from Float32x16 to Int64x8
func ( from Float32x16 ) AsInt64x8 ( ) ( to Int64x8 )
// Uint8x64 converts from Float32x16 to Uint8x64
func ( from Float32x16 ) AsUint8x64 ( ) ( to Uint8x64 )
// Uint16x32 converts from Float32x16 to Uint16x32
func ( from Float32x16 ) AsUint16x32 ( ) ( to Uint16x32 )
// Uint32x16 converts from Float32x16 to Uint32x16
func ( from Float32x16 ) AsUint32x16 ( ) ( to Uint32x16 )
// Uint64x8 converts from Float32x16 to Uint64x8
func ( from Float32x16 ) AsUint64x8 ( ) ( to Uint64x8 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Float32x4 converts from Float64x2 to Float32x4
func ( from Float64x2 ) AsFloat32x4 ( ) ( to Float32x4 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int8x16 converts from Float64x2 to Int8x16
func ( from Float64x2 ) AsInt8x16 ( ) ( to Int8x16 )
2025-06-12 03:54:34 +00:00
// Int16x8 converts from Float64x2 to Int16x8
func ( from Float64x2 ) AsInt16x8 ( ) ( to Int16x8 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Int32x4 converts from Float64x2 to Int32x4
func ( from Float64x2 ) AsInt32x4 ( ) ( to Int32x4 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Int64x2 converts from Float64x2 to Int64x2
func ( from Float64x2 ) AsInt64x2 ( ) ( to Int64x2 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint8x16 converts from Float64x2 to Uint8x16
func ( from Float64x2 ) AsUint8x16 ( ) ( to Uint8x16 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint16x8 converts from Float64x2 to Uint16x8
func ( from Float64x2 ) AsUint16x8 ( ) ( to Uint16x8 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint32x4 converts from Float64x2 to Uint32x4
func ( from Float64x2 ) AsUint32x4 ( ) ( to Uint32x4 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint64x2 converts from Float64x2 to Uint64x2
func ( from Float64x2 ) AsUint64x2 ( ) ( to Uint64x2 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Float32x8 converts from Float64x4 to Float32x8
func ( from Float64x4 ) AsFloat32x8 ( ) ( to Float32x8 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int8x32 converts from Float64x4 to Int8x32
func ( from Float64x4 ) AsInt8x32 ( ) ( to Int8x32 )
2025-05-22 19:59:12 +00:00
// Int16x16 converts from Float64x4 to Int16x16
func ( from Float64x4 ) AsInt16x16 ( ) ( to Int16x16 )
// Int32x8 converts from Float64x4 to Int32x8
func ( from Float64x4 ) AsInt32x8 ( ) ( to Int32x8 )
// Int64x4 converts from Float64x4 to Int64x4
func ( from Float64x4 ) AsInt64x4 ( ) ( to Int64x4 )
2025-06-17 11:57:19 -04:00
// Uint8x32 converts from Float64x4 to Uint8x32
func ( from Float64x4 ) AsUint8x32 ( ) ( to Uint8x32 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint16x16 converts from Float64x4 to Uint16x16
func ( from Float64x4 ) AsUint16x16 ( ) ( to Uint16x16 )
2025-05-22 19:59:12 +00:00
// Uint32x8 converts from Float64x4 to Uint32x8
func ( from Float64x4 ) AsUint32x8 ( ) ( to Uint32x8 )
2025-06-12 03:54:34 +00:00
// Uint64x4 converts from Float64x4 to Uint64x4
func ( from Float64x4 ) AsUint64x4 ( ) ( to Uint64x4 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Float32x16 converts from Float64x8 to Float32x16
func ( from Float64x8 ) AsFloat32x16 ( ) ( to Float32x16 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int8x64 converts from Float64x8 to Int8x64
func ( from Float64x8 ) AsInt8x64 ( ) ( to Int8x64 )
2025-06-12 03:54:34 +00:00
// Int16x32 converts from Float64x8 to Int16x32
func ( from Float64x8 ) AsInt16x32 ( ) ( to Int16x32 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Int32x16 converts from Float64x8 to Int32x16
func ( from Float64x8 ) AsInt32x16 ( ) ( to Int32x16 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Int64x8 converts from Float64x8 to Int64x8
func ( from Float64x8 ) AsInt64x8 ( ) ( to Int64x8 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint8x64 converts from Float64x8 to Uint8x64
func ( from Float64x8 ) AsUint8x64 ( ) ( to Uint8x64 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint16x32 converts from Float64x8 to Uint16x32
func ( from Float64x8 ) AsUint16x32 ( ) ( to Uint16x32 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint32x16 converts from Float64x8 to Uint32x16
func ( from Float64x8 ) AsUint32x16 ( ) ( to Uint32x16 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint64x8 converts from Float64x8 to Uint64x8
func ( from Float64x8 ) AsUint64x8 ( ) ( to Uint64x8 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Float32x4 converts from Int8x16 to Float32x4
func ( from Int8x16 ) AsFloat32x4 ( ) ( to Float32x4 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Float64x2 converts from Int8x16 to Float64x2
func ( from Int8x16 ) AsFloat64x2 ( ) ( to Float64x2 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int16x8 converts from Int8x16 to Int16x8
func ( from Int8x16 ) AsInt16x8 ( ) ( to Int16x8 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int32x4 converts from Int8x16 to Int32x4
func ( from Int8x16 ) AsInt32x4 ( ) ( to Int32x4 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int64x2 converts from Int8x16 to Int64x2
func ( from Int8x16 ) AsInt64x2 ( ) ( to Int64x2 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint8x16 converts from Int8x16 to Uint8x16
func ( from Int8x16 ) AsUint8x16 ( ) ( to Uint8x16 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint16x8 converts from Int8x16 to Uint16x8
func ( from Int8x16 ) AsUint16x8 ( ) ( to Uint16x8 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint32x4 converts from Int8x16 to Uint32x4
func ( from Int8x16 ) AsUint32x4 ( ) ( to Uint32x4 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint64x2 converts from Int8x16 to Uint64x2
func ( from Int8x16 ) AsUint64x2 ( ) ( to Uint64x2 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Float32x8 converts from Int8x32 to Float32x8
func ( from Int8x32 ) AsFloat32x8 ( ) ( to Float32x8 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Float64x4 converts from Int8x32 to Float64x4
func ( from Int8x32 ) AsFloat64x4 ( ) ( to Float64x4 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int16x16 converts from Int8x32 to Int16x16
func ( from Int8x32 ) AsInt16x16 ( ) ( to Int16x16 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int32x8 converts from Int8x32 to Int32x8
func ( from Int8x32 ) AsInt32x8 ( ) ( to Int32x8 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int64x4 converts from Int8x32 to Int64x4
func ( from Int8x32 ) AsInt64x4 ( ) ( to Int64x4 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint8x32 converts from Int8x32 to Uint8x32
func ( from Int8x32 ) AsUint8x32 ( ) ( to Uint8x32 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint16x16 converts from Int8x32 to Uint16x16
func ( from Int8x32 ) AsUint16x16 ( ) ( to Uint16x16 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint32x8 converts from Int8x32 to Uint32x8
func ( from Int8x32 ) AsUint32x8 ( ) ( to Uint32x8 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint64x4 converts from Int8x32 to Uint64x4
func ( from Int8x32 ) AsUint64x4 ( ) ( to Uint64x4 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Float32x16 converts from Int8x64 to Float32x16
func ( from Int8x64 ) AsFloat32x16 ( ) ( to Float32x16 )
// Float64x8 converts from Int8x64 to Float64x8
func ( from Int8x64 ) AsFloat64x8 ( ) ( to Float64x8 )
// Int16x32 converts from Int8x64 to Int16x32
func ( from Int8x64 ) AsInt16x32 ( ) ( to Int16x32 )
// Int32x16 converts from Int8x64 to Int32x16
func ( from Int8x64 ) AsInt32x16 ( ) ( to Int32x16 )
// Int64x8 converts from Int8x64 to Int64x8
func ( from Int8x64 ) AsInt64x8 ( ) ( to Int64x8 )
// Uint8x64 converts from Int8x64 to Uint8x64
func ( from Int8x64 ) AsUint8x64 ( ) ( to Uint8x64 )
// Uint16x32 converts from Int8x64 to Uint16x32
func ( from Int8x64 ) AsUint16x32 ( ) ( to Uint16x32 )
// Uint32x16 converts from Int8x64 to Uint32x16
func ( from Int8x64 ) AsUint32x16 ( ) ( to Uint32x16 )
// Uint64x8 converts from Int8x64 to Uint64x8
func ( from Int8x64 ) AsUint64x8 ( ) ( to Uint64x8 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Float32x4 converts from Int16x8 to Float32x4
func ( from Int16x8 ) AsFloat32x4 ( ) ( to Float32x4 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Float64x2 converts from Int16x8 to Float64x2
func ( from Int16x8 ) AsFloat64x2 ( ) ( to Float64x2 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int8x16 converts from Int16x8 to Int8x16
func ( from Int16x8 ) AsInt8x16 ( ) ( to Int8x16 )
2025-06-12 03:54:34 +00:00
// Int32x4 converts from Int16x8 to Int32x4
func ( from Int16x8 ) AsInt32x4 ( ) ( to Int32x4 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Int64x2 converts from Int16x8 to Int64x2
func ( from Int16x8 ) AsInt64x2 ( ) ( to Int64x2 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint8x16 converts from Int16x8 to Uint8x16
func ( from Int16x8 ) AsUint8x16 ( ) ( to Uint8x16 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint16x8 converts from Int16x8 to Uint16x8
func ( from Int16x8 ) AsUint16x8 ( ) ( to Uint16x8 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint32x4 converts from Int16x8 to Uint32x4
func ( from Int16x8 ) AsUint32x4 ( ) ( to Uint32x4 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint64x2 converts from Int16x8 to Uint64x2
func ( from Int16x8 ) AsUint64x2 ( ) ( to Uint64x2 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Float32x8 converts from Int16x16 to Float32x8
func ( from Int16x16 ) AsFloat32x8 ( ) ( to Float32x8 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Float64x4 converts from Int16x16 to Float64x4
func ( from Int16x16 ) AsFloat64x4 ( ) ( to Float64x4 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int8x32 converts from Int16x16 to Int8x32
func ( from Int16x16 ) AsInt8x32 ( ) ( to Int8x32 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int32x8 converts from Int16x16 to Int32x8
func ( from Int16x16 ) AsInt32x8 ( ) ( to Int32x8 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int64x4 converts from Int16x16 to Int64x4
func ( from Int16x16 ) AsInt64x4 ( ) ( to Int64x4 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint8x32 converts from Int16x16 to Uint8x32
func ( from Int16x16 ) AsUint8x32 ( ) ( to Uint8x32 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint16x16 converts from Int16x16 to Uint16x16
func ( from Int16x16 ) AsUint16x16 ( ) ( to Uint16x16 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint32x8 converts from Int16x16 to Uint32x8
func ( from Int16x16 ) AsUint32x8 ( ) ( to Uint32x8 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint64x4 converts from Int16x16 to Uint64x4
func ( from Int16x16 ) AsUint64x4 ( ) ( to Uint64x4 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Float32x16 converts from Int16x32 to Float32x16
func ( from Int16x32 ) AsFloat32x16 ( ) ( to Float32x16 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Float64x8 converts from Int16x32 to Float64x8
func ( from Int16x32 ) AsFloat64x8 ( ) ( to Float64x8 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int8x64 converts from Int16x32 to Int8x64
func ( from Int16x32 ) AsInt8x64 ( ) ( to Int8x64 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int32x16 converts from Int16x32 to Int32x16
func ( from Int16x32 ) AsInt32x16 ( ) ( to Int32x16 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int64x8 converts from Int16x32 to Int64x8
func ( from Int16x32 ) AsInt64x8 ( ) ( to Int64x8 )
// Uint8x64 converts from Int16x32 to Uint8x64
func ( from Int16x32 ) AsUint8x64 ( ) ( to Uint8x64 )
// Uint16x32 converts from Int16x32 to Uint16x32
func ( from Int16x32 ) AsUint16x32 ( ) ( to Uint16x32 )
// Uint32x16 converts from Int16x32 to Uint32x16
func ( from Int16x32 ) AsUint32x16 ( ) ( to Uint32x16 )
// Uint64x8 converts from Int16x32 to Uint64x8
func ( from Int16x32 ) AsUint64x8 ( ) ( to Uint64x8 )
// Float32x4 converts from Int32x4 to Float32x4
func ( from Int32x4 ) AsFloat32x4 ( ) ( to Float32x4 )
// Float64x2 converts from Int32x4 to Float64x2
func ( from Int32x4 ) AsFloat64x2 ( ) ( to Float64x2 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Int8x16 converts from Int32x4 to Int8x16
func ( from Int32x4 ) AsInt8x16 ( ) ( to Int8x16 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int16x8 converts from Int32x4 to Int16x8
func ( from Int32x4 ) AsInt16x8 ( ) ( to Int16x8 )
// Int64x2 converts from Int32x4 to Int64x2
func ( from Int32x4 ) AsInt64x2 ( ) ( to Int64x2 )
// Uint8x16 converts from Int32x4 to Uint8x16
func ( from Int32x4 ) AsUint8x16 ( ) ( to Uint8x16 )
2025-06-12 03:54:34 +00:00
// Uint16x8 converts from Int32x4 to Uint16x8
func ( from Int32x4 ) AsUint16x8 ( ) ( to Uint16x8 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint32x4 converts from Int32x4 to Uint32x4
func ( from Int32x4 ) AsUint32x4 ( ) ( to Uint32x4 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint64x2 converts from Int32x4 to Uint64x2
func ( from Int32x4 ) AsUint64x2 ( ) ( to Uint64x2 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Float32x8 converts from Int32x8 to Float32x8
func ( from Int32x8 ) AsFloat32x8 ( ) ( to Float32x8 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Float64x4 converts from Int32x8 to Float64x4
func ( from Int32x8 ) AsFloat64x4 ( ) ( to Float64x4 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int8x32 converts from Int32x8 to Int8x32
func ( from Int32x8 ) AsInt8x32 ( ) ( to Int8x32 )
2025-06-12 03:54:34 +00:00
// Int16x16 converts from Int32x8 to Int16x16
func ( from Int32x8 ) AsInt16x16 ( ) ( to Int16x16 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Int64x4 converts from Int32x8 to Int64x4
func ( from Int32x8 ) AsInt64x4 ( ) ( to Int64x4 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint8x32 converts from Int32x8 to Uint8x32
func ( from Int32x8 ) AsUint8x32 ( ) ( to Uint8x32 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint16x16 converts from Int32x8 to Uint16x16
func ( from Int32x8 ) AsUint16x16 ( ) ( to Uint16x16 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint32x8 converts from Int32x8 to Uint32x8
func ( from Int32x8 ) AsUint32x8 ( ) ( to Uint32x8 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint64x4 converts from Int32x8 to Uint64x4
func ( from Int32x8 ) AsUint64x4 ( ) ( to Uint64x4 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Float32x16 converts from Int32x16 to Float32x16
func ( from Int32x16 ) AsFloat32x16 ( ) ( to Float32x16 )
// Float64x8 converts from Int32x16 to Float64x8
func ( from Int32x16 ) AsFloat64x8 ( ) ( to Float64x8 )
// Int8x64 converts from Int32x16 to Int8x64
func ( from Int32x16 ) AsInt8x64 ( ) ( to Int8x64 )
// Int16x32 converts from Int32x16 to Int16x32
func ( from Int32x16 ) AsInt16x32 ( ) ( to Int16x32 )
// Int64x8 converts from Int32x16 to Int64x8
func ( from Int32x16 ) AsInt64x8 ( ) ( to Int64x8 )
// Uint8x64 converts from Int32x16 to Uint8x64
func ( from Int32x16 ) AsUint8x64 ( ) ( to Uint8x64 )
// Uint16x32 converts from Int32x16 to Uint16x32
func ( from Int32x16 ) AsUint16x32 ( ) ( to Uint16x32 )
// Uint32x16 converts from Int32x16 to Uint32x16
func ( from Int32x16 ) AsUint32x16 ( ) ( to Uint32x16 )
// Uint64x8 converts from Int32x16 to Uint64x8
func ( from Int32x16 ) AsUint64x8 ( ) ( to Uint64x8 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Float32x4 converts from Int64x2 to Float32x4
func ( from Int64x2 ) AsFloat32x4 ( ) ( to Float32x4 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Float64x2 converts from Int64x2 to Float64x2
func ( from Int64x2 ) AsFloat64x2 ( ) ( to Float64x2 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int8x16 converts from Int64x2 to Int8x16
func ( from Int64x2 ) AsInt8x16 ( ) ( to Int8x16 )
2025-06-12 03:54:34 +00:00
// Int16x8 converts from Int64x2 to Int16x8
func ( from Int64x2 ) AsInt16x8 ( ) ( to Int16x8 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Int32x4 converts from Int64x2 to Int32x4
func ( from Int64x2 ) AsInt32x4 ( ) ( to Int32x4 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint8x16 converts from Int64x2 to Uint8x16
func ( from Int64x2 ) AsUint8x16 ( ) ( to Uint8x16 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint16x8 converts from Int64x2 to Uint16x8
func ( from Int64x2 ) AsUint16x8 ( ) ( to Uint16x8 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint32x4 converts from Int64x2 to Uint32x4
func ( from Int64x2 ) AsUint32x4 ( ) ( to Uint32x4 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint64x2 converts from Int64x2 to Uint64x2
func ( from Int64x2 ) AsUint64x2 ( ) ( to Uint64x2 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Float32x8 converts from Int64x4 to Float32x8
func ( from Int64x4 ) AsFloat32x8 ( ) ( to Float32x8 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Float64x4 converts from Int64x4 to Float64x4
func ( from Int64x4 ) AsFloat64x4 ( ) ( to Float64x4 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int8x32 converts from Int64x4 to Int8x32
func ( from Int64x4 ) AsInt8x32 ( ) ( to Int8x32 )
2025-06-12 03:54:34 +00:00
// Int16x16 converts from Int64x4 to Int16x16
func ( from Int64x4 ) AsInt16x16 ( ) ( to Int16x16 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Int32x8 converts from Int64x4 to Int32x8
func ( from Int64x4 ) AsInt32x8 ( ) ( to Int32x8 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint8x32 converts from Int64x4 to Uint8x32
func ( from Int64x4 ) AsUint8x32 ( ) ( to Uint8x32 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint16x16 converts from Int64x4 to Uint16x16
func ( from Int64x4 ) AsUint16x16 ( ) ( to Uint16x16 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint32x8 converts from Int64x4 to Uint32x8
func ( from Int64x4 ) AsUint32x8 ( ) ( to Uint32x8 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint64x4 converts from Int64x4 to Uint64x4
func ( from Int64x4 ) AsUint64x4 ( ) ( to Uint64x4 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Float32x16 converts from Int64x8 to Float32x16
func ( from Int64x8 ) AsFloat32x16 ( ) ( to Float32x16 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Float64x8 converts from Int64x8 to Float64x8
func ( from Int64x8 ) AsFloat64x8 ( ) ( to Float64x8 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int8x64 converts from Int64x8 to Int8x64
func ( from Int64x8 ) AsInt8x64 ( ) ( to Int8x64 )
2025-06-12 03:54:34 +00:00
// Int16x32 converts from Int64x8 to Int16x32
func ( from Int64x8 ) AsInt16x32 ( ) ( to Int16x32 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Int32x16 converts from Int64x8 to Int32x16
func ( from Int64x8 ) AsInt32x16 ( ) ( to Int32x16 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint8x64 converts from Int64x8 to Uint8x64
func ( from Int64x8 ) AsUint8x64 ( ) ( to Uint8x64 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint16x32 converts from Int64x8 to Uint16x32
func ( from Int64x8 ) AsUint16x32 ( ) ( to Uint16x32 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint32x16 converts from Int64x8 to Uint32x16
func ( from Int64x8 ) AsUint32x16 ( ) ( to Uint32x16 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint64x8 converts from Int64x8 to Uint64x8
func ( from Int64x8 ) AsUint64x8 ( ) ( to Uint64x8 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Float32x4 converts from Uint8x16 to Float32x4
func ( from Uint8x16 ) AsFloat32x4 ( ) ( to Float32x4 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Float64x2 converts from Uint8x16 to Float64x2
func ( from Uint8x16 ) AsFloat64x2 ( ) ( to Float64x2 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int8x16 converts from Uint8x16 to Int8x16
func ( from Uint8x16 ) AsInt8x16 ( ) ( to Int8x16 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int16x8 converts from Uint8x16 to Int16x8
func ( from Uint8x16 ) AsInt16x8 ( ) ( to Int16x8 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int32x4 converts from Uint8x16 to Int32x4
func ( from Uint8x16 ) AsInt32x4 ( ) ( to Int32x4 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int64x2 converts from Uint8x16 to Int64x2
func ( from Uint8x16 ) AsInt64x2 ( ) ( to Int64x2 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint16x8 converts from Uint8x16 to Uint16x8
func ( from Uint8x16 ) AsUint16x8 ( ) ( to Uint16x8 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint32x4 converts from Uint8x16 to Uint32x4
func ( from Uint8x16 ) AsUint32x4 ( ) ( to Uint32x4 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint64x2 converts from Uint8x16 to Uint64x2
func ( from Uint8x16 ) AsUint64x2 ( ) ( to Uint64x2 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Float32x8 converts from Uint8x32 to Float32x8
func ( from Uint8x32 ) AsFloat32x8 ( ) ( to Float32x8 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Float64x4 converts from Uint8x32 to Float64x4
func ( from Uint8x32 ) AsFloat64x4 ( ) ( to Float64x4 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int8x32 converts from Uint8x32 to Int8x32
func ( from Uint8x32 ) AsInt8x32 ( ) ( to Int8x32 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int16x16 converts from Uint8x32 to Int16x16
func ( from Uint8x32 ) AsInt16x16 ( ) ( to Int16x16 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int32x8 converts from Uint8x32 to Int32x8
func ( from Uint8x32 ) AsInt32x8 ( ) ( to Int32x8 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int64x4 converts from Uint8x32 to Int64x4
func ( from Uint8x32 ) AsInt64x4 ( ) ( to Int64x4 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint16x16 converts from Uint8x32 to Uint16x16
func ( from Uint8x32 ) AsUint16x16 ( ) ( to Uint16x16 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint32x8 converts from Uint8x32 to Uint32x8
func ( from Uint8x32 ) AsUint32x8 ( ) ( to Uint32x8 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint64x4 converts from Uint8x32 to Uint64x4
func ( from Uint8x32 ) AsUint64x4 ( ) ( to Uint64x4 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Float32x16 converts from Uint8x64 to Float32x16
func ( from Uint8x64 ) AsFloat32x16 ( ) ( to Float32x16 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Float64x8 converts from Uint8x64 to Float64x8
func ( from Uint8x64 ) AsFloat64x8 ( ) ( to Float64x8 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int8x64 converts from Uint8x64 to Int8x64
func ( from Uint8x64 ) AsInt8x64 ( ) ( to Int8x64 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int16x32 converts from Uint8x64 to Int16x32
func ( from Uint8x64 ) AsInt16x32 ( ) ( to Int16x32 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int32x16 converts from Uint8x64 to Int32x16
func ( from Uint8x64 ) AsInt32x16 ( ) ( to Int32x16 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int64x8 converts from Uint8x64 to Int64x8
func ( from Uint8x64 ) AsInt64x8 ( ) ( to Int64x8 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint16x32 converts from Uint8x64 to Uint16x32
func ( from Uint8x64 ) AsUint16x32 ( ) ( to Uint16x32 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint32x16 converts from Uint8x64 to Uint32x16
func ( from Uint8x64 ) AsUint32x16 ( ) ( to Uint32x16 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint64x8 converts from Uint8x64 to Uint64x8
func ( from Uint8x64 ) AsUint64x8 ( ) ( to Uint64x8 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Float32x4 converts from Uint16x8 to Float32x4
func ( from Uint16x8 ) AsFloat32x4 ( ) ( to Float32x4 )
// Float64x2 converts from Uint16x8 to Float64x2
func ( from Uint16x8 ) AsFloat64x2 ( ) ( to Float64x2 )
// Int8x16 converts from Uint16x8 to Int8x16
func ( from Uint16x8 ) AsInt8x16 ( ) ( to Int8x16 )
// Int16x8 converts from Uint16x8 to Int16x8
func ( from Uint16x8 ) AsInt16x8 ( ) ( to Int16x8 )
// Int32x4 converts from Uint16x8 to Int32x4
func ( from Uint16x8 ) AsInt32x4 ( ) ( to Int32x4 )
// Int64x2 converts from Uint16x8 to Int64x2
func ( from Uint16x8 ) AsInt64x2 ( ) ( to Int64x2 )
// Uint8x16 converts from Uint16x8 to Uint8x16
func ( from Uint16x8 ) AsUint8x16 ( ) ( to Uint8x16 )
// Uint32x4 converts from Uint16x8 to Uint32x4
func ( from Uint16x8 ) AsUint32x4 ( ) ( to Uint32x4 )
// Uint64x2 converts from Uint16x8 to Uint64x2
func ( from Uint16x8 ) AsUint64x2 ( ) ( to Uint64x2 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Float32x8 converts from Uint16x16 to Float32x8
func ( from Uint16x16 ) AsFloat32x8 ( ) ( to Float32x8 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Float64x4 converts from Uint16x16 to Float64x4
func ( from Uint16x16 ) AsFloat64x4 ( ) ( to Float64x4 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int8x32 converts from Uint16x16 to Int8x32
func ( from Uint16x16 ) AsInt8x32 ( ) ( to Int8x32 )
2025-06-12 03:54:34 +00:00
// Int16x16 converts from Uint16x16 to Int16x16
func ( from Uint16x16 ) AsInt16x16 ( ) ( to Int16x16 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Int32x8 converts from Uint16x16 to Int32x8
func ( from Uint16x16 ) AsInt32x8 ( ) ( to Int32x8 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Int64x4 converts from Uint16x16 to Int64x4
func ( from Uint16x16 ) AsInt64x4 ( ) ( to Int64x4 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint8x32 converts from Uint16x16 to Uint8x32
func ( from Uint16x16 ) AsUint8x32 ( ) ( to Uint8x32 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint32x8 converts from Uint16x16 to Uint32x8
func ( from Uint16x16 ) AsUint32x8 ( ) ( to Uint32x8 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint64x4 converts from Uint16x16 to Uint64x4
func ( from Uint16x16 ) AsUint64x4 ( ) ( to Uint64x4 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Float32x16 converts from Uint16x32 to Float32x16
func ( from Uint16x32 ) AsFloat32x16 ( ) ( to Float32x16 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Float64x8 converts from Uint16x32 to Float64x8
func ( from Uint16x32 ) AsFloat64x8 ( ) ( to Float64x8 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int8x64 converts from Uint16x32 to Int8x64
func ( from Uint16x32 ) AsInt8x64 ( ) ( to Int8x64 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int16x32 converts from Uint16x32 to Int16x32
func ( from Uint16x32 ) AsInt16x32 ( ) ( to Int16x32 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int32x16 converts from Uint16x32 to Int32x16
func ( from Uint16x32 ) AsInt32x16 ( ) ( to Int32x16 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int64x8 converts from Uint16x32 to Int64x8
func ( from Uint16x32 ) AsInt64x8 ( ) ( to Int64x8 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint8x64 converts from Uint16x32 to Uint8x64
func ( from Uint16x32 ) AsUint8x64 ( ) ( to Uint8x64 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint32x16 converts from Uint16x32 to Uint32x16
func ( from Uint16x32 ) AsUint32x16 ( ) ( to Uint32x16 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint64x8 converts from Uint16x32 to Uint64x8
func ( from Uint16x32 ) AsUint64x8 ( ) ( to Uint64x8 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Float32x4 converts from Uint32x4 to Float32x4
func ( from Uint32x4 ) AsFloat32x4 ( ) ( to Float32x4 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Float64x2 converts from Uint32x4 to Float64x2
func ( from Uint32x4 ) AsFloat64x2 ( ) ( to Float64x2 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int8x16 converts from Uint32x4 to Int8x16
func ( from Uint32x4 ) AsInt8x16 ( ) ( to Int8x16 )
2025-06-12 03:54:34 +00:00
// Int16x8 converts from Uint32x4 to Int16x8
func ( from Uint32x4 ) AsInt16x8 ( ) ( to Int16x8 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Int32x4 converts from Uint32x4 to Int32x4
func ( from Uint32x4 ) AsInt32x4 ( ) ( to Int32x4 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Int64x2 converts from Uint32x4 to Int64x2
func ( from Uint32x4 ) AsInt64x2 ( ) ( to Int64x2 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint8x16 converts from Uint32x4 to Uint8x16
func ( from Uint32x4 ) AsUint8x16 ( ) ( to Uint8x16 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint16x8 converts from Uint32x4 to Uint16x8
func ( from Uint32x4 ) AsUint16x8 ( ) ( to Uint16x8 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint64x2 converts from Uint32x4 to Uint64x2
func ( from Uint32x4 ) AsUint64x2 ( ) ( to Uint64x2 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Float32x8 converts from Uint32x8 to Float32x8
func ( from Uint32x8 ) AsFloat32x8 ( ) ( to Float32x8 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Float64x4 converts from Uint32x8 to Float64x4
func ( from Uint32x8 ) AsFloat64x4 ( ) ( to Float64x4 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int8x32 converts from Uint32x8 to Int8x32
func ( from Uint32x8 ) AsInt8x32 ( ) ( to Int8x32 )
2025-06-12 03:54:34 +00:00
// Int16x16 converts from Uint32x8 to Int16x16
func ( from Uint32x8 ) AsInt16x16 ( ) ( to Int16x16 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Int32x8 converts from Uint32x8 to Int32x8
func ( from Uint32x8 ) AsInt32x8 ( ) ( to Int32x8 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Int64x4 converts from Uint32x8 to Int64x4
func ( from Uint32x8 ) AsInt64x4 ( ) ( to Int64x4 )
2025-06-17 11:57:19 -04:00
// Uint8x32 converts from Uint32x8 to Uint8x32
func ( from Uint32x8 ) AsUint8x32 ( ) ( to Uint8x32 )
2025-06-12 03:54:34 +00:00
// Uint16x16 converts from Uint32x8 to Uint16x16
func ( from Uint32x8 ) AsUint16x16 ( ) ( to Uint16x16 )
// Uint64x4 converts from Uint32x8 to Uint64x4
func ( from Uint32x8 ) AsUint64x4 ( ) ( to Uint64x4 )
2025-06-17 11:57:19 -04:00
// Float32x16 converts from Uint32x16 to Float32x16
func ( from Uint32x16 ) AsFloat32x16 ( ) ( to Float32x16 )
// Float64x8 converts from Uint32x16 to Float64x8
func ( from Uint32x16 ) AsFloat64x8 ( ) ( to Float64x8 )
// Int8x64 converts from Uint32x16 to Int8x64
func ( from Uint32x16 ) AsInt8x64 ( ) ( to Int8x64 )
// Int16x32 converts from Uint32x16 to Int16x32
func ( from Uint32x16 ) AsInt16x32 ( ) ( to Int16x32 )
// Int32x16 converts from Uint32x16 to Int32x16
func ( from Uint32x16 ) AsInt32x16 ( ) ( to Int32x16 )
// Int64x8 converts from Uint32x16 to Int64x8
func ( from Uint32x16 ) AsInt64x8 ( ) ( to Int64x8 )
// Uint8x64 converts from Uint32x16 to Uint8x64
func ( from Uint32x16 ) AsUint8x64 ( ) ( to Uint8x64 )
// Uint16x32 converts from Uint32x16 to Uint16x32
func ( from Uint32x16 ) AsUint16x32 ( ) ( to Uint16x32 )
// Uint64x8 converts from Uint32x16 to Uint64x8
func ( from Uint32x16 ) AsUint64x8 ( ) ( to Uint64x8 )
2025-05-22 19:59:12 +00:00
// Float32x4 converts from Uint64x2 to Float32x4
func ( from Uint64x2 ) AsFloat32x4 ( ) ( to Float32x4 )
// Float64x2 converts from Uint64x2 to Float64x2
func ( from Uint64x2 ) AsFloat64x2 ( ) ( to Float64x2 )
2025-06-17 11:57:19 -04:00
// Int8x16 converts from Uint64x2 to Int8x16
func ( from Uint64x2 ) AsInt8x16 ( ) ( to Int8x16 )
2025-06-12 03:54:34 +00:00
// Int16x8 converts from Uint64x2 to Int16x8
func ( from Uint64x2 ) AsInt16x8 ( ) ( to Int16x8 )
// Int32x4 converts from Uint64x2 to Int32x4
func ( from Uint64x2 ) AsInt32x4 ( ) ( to Int32x4 )
2025-05-22 19:59:12 +00:00
// Int64x2 converts from Uint64x2 to Int64x2
func ( from Uint64x2 ) AsInt64x2 ( ) ( to Int64x2 )
2025-06-17 11:57:19 -04:00
// Uint8x16 converts from Uint64x2 to Uint8x16
func ( from Uint64x2 ) AsUint8x16 ( ) ( to Uint8x16 )
2025-06-12 03:54:34 +00:00
// Uint16x8 converts from Uint64x2 to Uint16x8
func ( from Uint64x2 ) AsUint16x8 ( ) ( to Uint16x8 )
2025-05-22 19:59:12 +00:00
// Uint32x4 converts from Uint64x2 to Uint32x4
func ( from Uint64x2 ) AsUint32x4 ( ) ( to Uint32x4 )
2025-06-12 03:54:34 +00:00
// Float32x8 converts from Uint64x4 to Float32x8
func ( from Uint64x4 ) AsFloat32x8 ( ) ( to Float32x8 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Float64x4 converts from Uint64x4 to Float64x4
func ( from Uint64x4 ) AsFloat64x4 ( ) ( to Float64x4 )
2025-06-17 11:57:19 -04:00
// Int8x32 converts from Uint64x4 to Int8x32
func ( from Uint64x4 ) AsInt8x32 ( ) ( to Int8x32 )
2025-06-12 03:54:34 +00:00
// Int16x16 converts from Uint64x4 to Int16x16
func ( from Uint64x4 ) AsInt16x16 ( ) ( to Int16x16 )
// Int32x8 converts from Uint64x4 to Int32x8
func ( from Uint64x4 ) AsInt32x8 ( ) ( to Int32x8 )
// Int64x4 converts from Uint64x4 to Int64x4
func ( from Uint64x4 ) AsInt64x4 ( ) ( to Int64x4 )
2025-06-17 11:57:19 -04:00
// Uint8x32 converts from Uint64x4 to Uint8x32
func ( from Uint64x4 ) AsUint8x32 ( ) ( to Uint8x32 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint16x16 converts from Uint64x4 to Uint16x16
func ( from Uint64x4 ) AsUint16x16 ( ) ( to Uint16x16 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint32x8 converts from Uint64x4 to Uint32x8
func ( from Uint64x4 ) AsUint32x8 ( ) ( to Uint32x8 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Float32x16 converts from Uint64x8 to Float32x16
func ( from Uint64x8 ) AsFloat32x16 ( ) ( to Float32x16 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Float64x8 converts from Uint64x8 to Float64x8
func ( from Uint64x8 ) AsFloat64x8 ( ) ( to Float64x8 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Int8x64 converts from Uint64x8 to Int8x64
func ( from Uint64x8 ) AsInt8x64 ( ) ( to Int8x64 )
2025-06-12 03:54:34 +00:00
// Int16x32 converts from Uint64x8 to Int16x32
func ( from Uint64x8 ) AsInt16x32 ( ) ( to Int16x32 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Int32x16 converts from Uint64x8 to Int32x16
func ( from Uint64x8 ) AsInt32x16 ( ) ( to Int32x16 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Int64x8 converts from Uint64x8 to Int64x8
func ( from Uint64x8 ) AsInt64x8 ( ) ( to Int64x8 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// Uint8x64 converts from Uint64x8 to Uint8x64
func ( from Uint64x8 ) AsUint8x64 ( ) ( to Uint8x64 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint16x32 converts from Uint64x8 to Uint16x32
func ( from Uint64x8 ) AsUint16x32 ( ) ( to Uint16x32 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// Uint32x16 converts from Uint64x8 to Uint32x16
func ( from Uint64x8 ) AsUint32x16 ( ) ( to Uint32x16 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// converts from Mask8x16 to Int8x16
func ( from Mask8x16 ) AsInt8x16 ( ) ( to Int8x16 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// converts from Int8x16 to Mask8x16
func ( from Int8x16 ) AsMask8x16 ( ) ( to Mask8x16 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
func ( x Mask8x16 ) And ( y Mask8x16 ) Mask8x16
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
func ( x Mask8x16 ) Or ( y Mask8x16 ) Mask8x16
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// converts from Mask8x32 to Int8x32
func ( from Mask8x32 ) AsInt8x32 ( ) ( to Int8x32 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// converts from Int8x32 to Mask8x32
func ( from Int8x32 ) AsMask8x32 ( ) ( to Mask8x32 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
func ( x Mask8x32 ) And ( y Mask8x32 ) Mask8x32
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
func ( x Mask8x32 ) Or ( y Mask8x32 ) Mask8x32
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// converts from Mask8x64 to Int8x64
func ( from Mask8x64 ) AsInt8x64 ( ) ( to Int8x64 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// converts from Int8x64 to Mask8x64
func ( from Int8x64 ) AsMask8x64 ( ) ( to Mask8x64 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
func ( x Mask8x64 ) And ( y Mask8x64 ) Mask8x64
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
func ( x Mask8x64 ) Or ( y Mask8x64 ) Mask8x64
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// converts from Mask16x8 to Int16x8
func ( from Mask16x8 ) AsInt16x8 ( ) ( to Int16x8 )
2025-05-22 19:59:12 +00:00
2025-06-17 11:57:19 -04:00
// converts from Int16x8 to Mask16x8
func ( from Int16x8 ) AsMask16x8 ( ) ( to Mask16x8 )
2025-06-12 03:54:34 +00:00
2025-06-17 11:57:19 -04:00
func ( x Mask16x8 ) And ( y Mask16x8 ) Mask16x8
2025-06-12 03:54:34 +00:00
2025-06-17 11:57:19 -04:00
func ( x Mask16x8 ) Or ( y Mask16x8 ) Mask16x8
2025-05-22 19:59:12 +00:00
// converts from Mask16x16 to Int16x16
func ( from Mask16x16 ) AsInt16x16 ( ) ( to Int16x16 )
// converts from Int16x16 to Mask16x16
func ( from Int16x16 ) AsMask16x16 ( ) ( to Mask16x16 )
func ( x Mask16x16 ) And ( y Mask16x16 ) Mask16x16
func ( x Mask16x16 ) Or ( y Mask16x16 ) Mask16x16
2025-06-12 03:54:34 +00:00
// converts from Mask16x32 to Int16x32
func ( from Mask16x32 ) AsInt16x32 ( ) ( to Int16x32 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
// converts from Int16x32 to Mask16x32
func ( from Int16x32 ) AsMask16x32 ( ) ( to Mask16x32 )
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
func ( x Mask16x32 ) And ( y Mask16x32 ) Mask16x32
2025-05-22 19:59:12 +00:00
2025-06-12 03:54:34 +00:00
func ( x Mask16x32 ) Or ( y Mask16x32 ) Mask16x32
2025-05-22 19:59:12 +00:00
// converts from Mask32x4 to Int32x4
func ( from Mask32x4 ) AsInt32x4 ( ) ( to Int32x4 )
// converts from Int32x4 to Mask32x4
func ( from Int32x4 ) AsMask32x4 ( ) ( to Mask32x4 )
func ( x Mask32x4 ) And ( y Mask32x4 ) Mask32x4
func ( x Mask32x4 ) Or ( y Mask32x4 ) Mask32x4
2025-06-12 03:54:34 +00:00
// converts from Mask32x8 to Int32x8
func ( from Mask32x8 ) AsInt32x8 ( ) ( to Int32x8 )
// converts from Int32x8 to Mask32x8
func ( from Int32x8 ) AsMask32x8 ( ) ( to Mask32x8 )
func ( x Mask32x8 ) And ( y Mask32x8 ) Mask32x8
func ( x Mask32x8 ) Or ( y Mask32x8 ) Mask32x8
2025-06-17 11:57:19 -04:00
// converts from Mask32x16 to Int32x16
func ( from Mask32x16 ) AsInt32x16 ( ) ( to Int32x16 )
// converts from Int32x16 to Mask32x16
func ( from Int32x16 ) AsMask32x16 ( ) ( to Mask32x16 )
func ( x Mask32x16 ) And ( y Mask32x16 ) Mask32x16
func ( x Mask32x16 ) Or ( y Mask32x16 ) Mask32x16
2025-06-12 03:54:34 +00:00
// converts from Mask64x2 to Int64x2
func ( from Mask64x2 ) AsInt64x2 ( ) ( to Int64x2 )
// converts from Int64x2 to Mask64x2
func ( from Int64x2 ) AsMask64x2 ( ) ( to Mask64x2 )
func ( x Mask64x2 ) And ( y Mask64x2 ) Mask64x2
func ( x Mask64x2 ) Or ( y Mask64x2 ) Mask64x2
// converts from Mask64x4 to Int64x4
func ( from Mask64x4 ) AsInt64x4 ( ) ( to Int64x4 )
// converts from Int64x4 to Mask64x4
func ( from Int64x4 ) AsMask64x4 ( ) ( to Mask64x4 )
func ( x Mask64x4 ) And ( y Mask64x4 ) Mask64x4
func ( x Mask64x4 ) Or ( y Mask64x4 ) Mask64x4
// converts from Mask64x8 to Int64x8
func ( from Mask64x8 ) AsInt64x8 ( ) ( to Int64x8 )
// converts from Int64x8 to Mask64x8
func ( from Int64x8 ) AsMask64x8 ( ) ( to Mask64x8 )
func ( x Mask64x8 ) And ( y Mask64x8 ) Mask64x8
func ( x Mask64x8 ) Or ( y Mask64x8 ) Mask64x8