mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
[dev.simd] simd, cmd/compile: sample peephole optimization for .Masked()
This is not the end of such peephole optimizations, there would need to be many of these for many simd operations. Change-Id: I4511f6fac502bc7259c1c4414c96f56eb400c202 Reviewed-on: https://go-review.googlesource.com/c/go/+/697157 TryBot-Bypass: David Chase <drchase@google.com> Commit-Queue: David Chase <drchase@google.com> Reviewed-by: Junyang Shao <shaojunyang@google.com>
This commit is contained in:
parent
103b6e39ca
commit
ede64cf0d8
13 changed files with 572 additions and 3 deletions
|
|
@ -741,7 +741,13 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool {
|
||||||
ssa.OpAMD64VSQRTPSMasked512,
|
ssa.OpAMD64VSQRTPSMasked512,
|
||||||
ssa.OpAMD64VSQRTPDMasked128,
|
ssa.OpAMD64VSQRTPDMasked128,
|
||||||
ssa.OpAMD64VSQRTPDMasked256,
|
ssa.OpAMD64VSQRTPDMasked256,
|
||||||
ssa.OpAMD64VSQRTPDMasked512:
|
ssa.OpAMD64VSQRTPDMasked512,
|
||||||
|
ssa.OpAMD64VMOVUPSMasked512,
|
||||||
|
ssa.OpAMD64VMOVUPDMasked512,
|
||||||
|
ssa.OpAMD64VMOVDQU8Masked512,
|
||||||
|
ssa.OpAMD64VMOVDQU16Masked512,
|
||||||
|
ssa.OpAMD64VMOVDQU32Masked512,
|
||||||
|
ssa.OpAMD64VMOVDQU64Masked512:
|
||||||
p = simdVkv(s, v)
|
p = simdVkv(s, v)
|
||||||
|
|
||||||
case ssa.OpAMD64VPBLENDVB128,
|
case ssa.OpAMD64VPBLENDVB128,
|
||||||
|
|
@ -1672,6 +1678,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool {
|
||||||
ssa.OpAMD64VPXORQMasked128,
|
ssa.OpAMD64VPXORQMasked128,
|
||||||
ssa.OpAMD64VPXORQMasked256,
|
ssa.OpAMD64VPXORQMasked256,
|
||||||
ssa.OpAMD64VPXORQMasked512,
|
ssa.OpAMD64VPXORQMasked512,
|
||||||
|
ssa.OpAMD64VMOVUPSMasked512,
|
||||||
|
ssa.OpAMD64VMOVUPDMasked512,
|
||||||
|
ssa.OpAMD64VMOVDQU8Masked512,
|
||||||
|
ssa.OpAMD64VMOVDQU16Masked512,
|
||||||
|
ssa.OpAMD64VMOVDQU32Masked512,
|
||||||
|
ssa.OpAMD64VMOVDQU64Masked512,
|
||||||
ssa.OpAMD64VPSLLWMasked128const,
|
ssa.OpAMD64VPSLLWMasked128const,
|
||||||
ssa.OpAMD64VPSLLWMasked256const,
|
ssa.OpAMD64VPSLLWMasked256const,
|
||||||
ssa.OpAMD64VPSLLWMasked512const,
|
ssa.OpAMD64VPSLLWMasked512const,
|
||||||
|
|
|
||||||
|
|
@ -1763,3 +1763,8 @@
|
||||||
(VPMOVVec64x2ToM (VPMOVMToVec64x2 x)) => x
|
(VPMOVVec64x2ToM (VPMOVMToVec64x2 x)) => x
|
||||||
(VPMOVVec64x4ToM (VPMOVMToVec64x4 x)) => x
|
(VPMOVVec64x4ToM (VPMOVMToVec64x4 x)) => x
|
||||||
(VPMOVVec64x8ToM (VPMOVMToVec64x8 x)) => x
|
(VPMOVVec64x8ToM (VPMOVMToVec64x8 x)) => x
|
||||||
|
|
||||||
|
(VPANDQ512 x (VPMOVMToVec64x8 k)) => (VMOVDQU64Masked512 x k)
|
||||||
|
(VPANDQ512 x (VPMOVMToVec32x16 k)) => (VMOVDQU32Masked512 x k)
|
||||||
|
(VPANDQ512 x (VPMOVMToVec16x32 k)) => (VMOVDQU16Masked512 x k)
|
||||||
|
(VPANDQ512 x (VPMOVMToVec8x64 k)) => (VMOVDQU8Masked512 x k)
|
||||||
|
|
|
||||||
|
|
@ -1076,3 +1076,13 @@
|
||||||
(blendMaskedInt16x32 x y mask) => (VPBLENDMWMasked512 x y (VPMOVVec16x32ToM <types.TypeMask> mask))
|
(blendMaskedInt16x32 x y mask) => (VPBLENDMWMasked512 x y (VPMOVVec16x32ToM <types.TypeMask> mask))
|
||||||
(blendMaskedInt32x16 x y mask) => (VPBLENDMDMasked512 x y (VPMOVVec32x16ToM <types.TypeMask> mask))
|
(blendMaskedInt32x16 x y mask) => (VPBLENDMDMasked512 x y (VPMOVVec32x16ToM <types.TypeMask> mask))
|
||||||
(blendMaskedInt64x8 x y mask) => (VPBLENDMQMasked512 x y (VPMOVVec64x8ToM <types.TypeMask> mask))
|
(blendMaskedInt64x8 x y mask) => (VPBLENDMQMasked512 x y (VPMOVVec64x8ToM <types.TypeMask> mask))
|
||||||
|
(moveMaskedFloat32x16 x mask) => (VMOVUPSMasked512 x (VPMOVVec32x16ToM <types.TypeMask> mask))
|
||||||
|
(moveMaskedFloat64x8 x mask) => (VMOVUPDMasked512 x (VPMOVVec64x8ToM <types.TypeMask> mask))
|
||||||
|
(moveMaskedInt8x64 x mask) => (VMOVDQU8Masked512 x (VPMOVVec8x64ToM <types.TypeMask> mask))
|
||||||
|
(moveMaskedInt16x32 x mask) => (VMOVDQU16Masked512 x (VPMOVVec16x32ToM <types.TypeMask> mask))
|
||||||
|
(moveMaskedInt32x16 x mask) => (VMOVDQU32Masked512 x (VPMOVVec32x16ToM <types.TypeMask> mask))
|
||||||
|
(moveMaskedInt64x8 x mask) => (VMOVDQU64Masked512 x (VPMOVVec64x8ToM <types.TypeMask> mask))
|
||||||
|
(moveMaskedUint8x64 x mask) => (VMOVDQU8Masked512 x (VPMOVVec8x64ToM <types.TypeMask> mask))
|
||||||
|
(moveMaskedUint16x32 x mask) => (VMOVDQU16Masked512 x (VPMOVVec16x32ToM <types.TypeMask> mask))
|
||||||
|
(moveMaskedUint32x16 x mask) => (VMOVDQU32Masked512 x (VPMOVVec32x16ToM <types.TypeMask> mask))
|
||||||
|
(moveMaskedUint64x8 x mask) => (VMOVDQU64Masked512 x (VPMOVVec64x8ToM <types.TypeMask> mask))
|
||||||
|
|
|
||||||
|
|
@ -140,6 +140,12 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf
|
||||||
{name: "VMINPSMasked128", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false},
|
{name: "VMINPSMasked128", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false},
|
||||||
{name: "VMINPSMasked256", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false},
|
{name: "VMINPSMasked256", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false},
|
||||||
{name: "VMINPSMasked512", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false},
|
{name: "VMINPSMasked512", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false},
|
||||||
|
{name: "VMOVDQU8Masked512", argLength: 2, reg: wkw, asm: "VMOVDQU8", commutative: false, typ: "Vec512", resultInArg0: false},
|
||||||
|
{name: "VMOVDQU16Masked512", argLength: 2, reg: wkw, asm: "VMOVDQU16", commutative: false, typ: "Vec512", resultInArg0: false},
|
||||||
|
{name: "VMOVDQU32Masked512", argLength: 2, reg: wkw, asm: "VMOVDQU32", commutative: false, typ: "Vec512", resultInArg0: false},
|
||||||
|
{name: "VMOVDQU64Masked512", argLength: 2, reg: wkw, asm: "VMOVDQU64", commutative: false, typ: "Vec512", resultInArg0: false},
|
||||||
|
{name: "VMOVUPDMasked512", argLength: 2, reg: wkw, asm: "VMOVUPD", commutative: false, typ: "Vec512", resultInArg0: false},
|
||||||
|
{name: "VMOVUPSMasked512", argLength: 2, reg: wkw, asm: "VMOVUPS", commutative: false, typ: "Vec512", resultInArg0: false},
|
||||||
{name: "VMULPD128", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false},
|
{name: "VMULPD128", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false},
|
||||||
{name: "VMULPD256", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false},
|
{name: "VMULPD256", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false},
|
||||||
{name: "VMULPD512", argLength: 2, reg: w21, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false},
|
{name: "VMULPD512", argLength: 2, reg: w21, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false},
|
||||||
|
|
|
||||||
|
|
@ -928,6 +928,16 @@ func simdGenericOps() []opData {
|
||||||
{name: "blendMaskedInt16x32", argLength: 3, commutative: false},
|
{name: "blendMaskedInt16x32", argLength: 3, commutative: false},
|
||||||
{name: "blendMaskedInt32x16", argLength: 3, commutative: false},
|
{name: "blendMaskedInt32x16", argLength: 3, commutative: false},
|
||||||
{name: "blendMaskedInt64x8", argLength: 3, commutative: false},
|
{name: "blendMaskedInt64x8", argLength: 3, commutative: false},
|
||||||
|
{name: "moveMaskedFloat32x16", argLength: 2, commutative: false},
|
||||||
|
{name: "moveMaskedFloat64x8", argLength: 2, commutative: false},
|
||||||
|
{name: "moveMaskedInt8x64", argLength: 2, commutative: false},
|
||||||
|
{name: "moveMaskedInt16x32", argLength: 2, commutative: false},
|
||||||
|
{name: "moveMaskedInt32x16", argLength: 2, commutative: false},
|
||||||
|
{name: "moveMaskedInt64x8", argLength: 2, commutative: false},
|
||||||
|
{name: "moveMaskedUint8x64", argLength: 2, commutative: false},
|
||||||
|
{name: "moveMaskedUint16x32", argLength: 2, commutative: false},
|
||||||
|
{name: "moveMaskedUint32x16", argLength: 2, commutative: false},
|
||||||
|
{name: "moveMaskedUint64x8", argLength: 2, commutative: false},
|
||||||
{name: "CeilScaledFloat32x4", argLength: 1, commutative: false, aux: "UInt8"},
|
{name: "CeilScaledFloat32x4", argLength: 1, commutative: false, aux: "UInt8"},
|
||||||
{name: "CeilScaledFloat32x8", argLength: 1, commutative: false, aux: "UInt8"},
|
{name: "CeilScaledFloat32x8", argLength: 1, commutative: false, aux: "UInt8"},
|
||||||
{name: "CeilScaledFloat32x16", argLength: 1, commutative: false, aux: "UInt8"},
|
{name: "CeilScaledFloat32x16", argLength: 1, commutative: false, aux: "UInt8"},
|
||||||
|
|
|
||||||
|
|
@ -1363,6 +1363,12 @@ const (
|
||||||
OpAMD64VMINPSMasked128
|
OpAMD64VMINPSMasked128
|
||||||
OpAMD64VMINPSMasked256
|
OpAMD64VMINPSMasked256
|
||||||
OpAMD64VMINPSMasked512
|
OpAMD64VMINPSMasked512
|
||||||
|
OpAMD64VMOVDQU8Masked512
|
||||||
|
OpAMD64VMOVDQU16Masked512
|
||||||
|
OpAMD64VMOVDQU32Masked512
|
||||||
|
OpAMD64VMOVDQU64Masked512
|
||||||
|
OpAMD64VMOVUPDMasked512
|
||||||
|
OpAMD64VMOVUPSMasked512
|
||||||
OpAMD64VMULPD128
|
OpAMD64VMULPD128
|
||||||
OpAMD64VMULPD256
|
OpAMD64VMULPD256
|
||||||
OpAMD64VMULPD512
|
OpAMD64VMULPD512
|
||||||
|
|
@ -5572,6 +5578,16 @@ const (
|
||||||
OpblendMaskedInt16x32
|
OpblendMaskedInt16x32
|
||||||
OpblendMaskedInt32x16
|
OpblendMaskedInt32x16
|
||||||
OpblendMaskedInt64x8
|
OpblendMaskedInt64x8
|
||||||
|
OpmoveMaskedFloat32x16
|
||||||
|
OpmoveMaskedFloat64x8
|
||||||
|
OpmoveMaskedInt8x64
|
||||||
|
OpmoveMaskedInt16x32
|
||||||
|
OpmoveMaskedInt32x16
|
||||||
|
OpmoveMaskedInt64x8
|
||||||
|
OpmoveMaskedUint8x64
|
||||||
|
OpmoveMaskedUint16x32
|
||||||
|
OpmoveMaskedUint32x16
|
||||||
|
OpmoveMaskedUint64x8
|
||||||
OpCeilScaledFloat32x4
|
OpCeilScaledFloat32x4
|
||||||
OpCeilScaledFloat32x8
|
OpCeilScaledFloat32x8
|
||||||
OpCeilScaledFloat32x16
|
OpCeilScaledFloat32x16
|
||||||
|
|
@ -20776,6 +20792,90 @@ var opcodeTable = [...]opInfo{
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "VMOVDQU8Masked512",
|
||||||
|
argLen: 2,
|
||||||
|
asm: x86.AVMOVDQU8,
|
||||||
|
reg: regInfo{
|
||||||
|
inputs: []inputInfo{
|
||||||
|
{1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||||
|
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
|
||||||
|
},
|
||||||
|
outputs: []outputInfo{
|
||||||
|
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "VMOVDQU16Masked512",
|
||||||
|
argLen: 2,
|
||||||
|
asm: x86.AVMOVDQU16,
|
||||||
|
reg: regInfo{
|
||||||
|
inputs: []inputInfo{
|
||||||
|
{1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||||
|
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
|
||||||
|
},
|
||||||
|
outputs: []outputInfo{
|
||||||
|
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "VMOVDQU32Masked512",
|
||||||
|
argLen: 2,
|
||||||
|
asm: x86.AVMOVDQU32,
|
||||||
|
reg: regInfo{
|
||||||
|
inputs: []inputInfo{
|
||||||
|
{1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||||
|
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
|
||||||
|
},
|
||||||
|
outputs: []outputInfo{
|
||||||
|
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "VMOVDQU64Masked512",
|
||||||
|
argLen: 2,
|
||||||
|
asm: x86.AVMOVDQU64,
|
||||||
|
reg: regInfo{
|
||||||
|
inputs: []inputInfo{
|
||||||
|
{1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||||
|
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
|
||||||
|
},
|
||||||
|
outputs: []outputInfo{
|
||||||
|
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "VMOVUPDMasked512",
|
||||||
|
argLen: 2,
|
||||||
|
asm: x86.AVMOVUPD,
|
||||||
|
reg: regInfo{
|
||||||
|
inputs: []inputInfo{
|
||||||
|
{1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||||
|
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
|
||||||
|
},
|
||||||
|
outputs: []outputInfo{
|
||||||
|
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "VMOVUPSMasked512",
|
||||||
|
argLen: 2,
|
||||||
|
asm: x86.AVMOVUPS,
|
||||||
|
reg: regInfo{
|
||||||
|
inputs: []inputInfo{
|
||||||
|
{1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||||
|
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
|
||||||
|
},
|
||||||
|
outputs: []outputInfo{
|
||||||
|
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "VMULPD128",
|
name: "VMULPD128",
|
||||||
argLen: 2,
|
argLen: 2,
|
||||||
|
|
@ -67992,6 +68092,56 @@ var opcodeTable = [...]opInfo{
|
||||||
argLen: 3,
|
argLen: 3,
|
||||||
generic: true,
|
generic: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "moveMaskedFloat32x16",
|
||||||
|
argLen: 2,
|
||||||
|
generic: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "moveMaskedFloat64x8",
|
||||||
|
argLen: 2,
|
||||||
|
generic: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "moveMaskedInt8x64",
|
||||||
|
argLen: 2,
|
||||||
|
generic: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "moveMaskedInt16x32",
|
||||||
|
argLen: 2,
|
||||||
|
generic: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "moveMaskedInt32x16",
|
||||||
|
argLen: 2,
|
||||||
|
generic: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "moveMaskedInt64x8",
|
||||||
|
argLen: 2,
|
||||||
|
generic: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "moveMaskedUint8x64",
|
||||||
|
argLen: 2,
|
||||||
|
generic: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "moveMaskedUint16x32",
|
||||||
|
argLen: 2,
|
||||||
|
generic: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "moveMaskedUint32x16",
|
||||||
|
argLen: 2,
|
||||||
|
generic: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "moveMaskedUint64x8",
|
||||||
|
argLen: 2,
|
||||||
|
generic: true,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "CeilScaledFloat32x4",
|
name: "CeilScaledFloat32x4",
|
||||||
auxType: auxUInt8,
|
auxType: auxUInt8,
|
||||||
|
|
|
||||||
|
|
@ -507,6 +507,8 @@ func rewriteValueAMD64(v *Value) bool {
|
||||||
return rewriteValueAMD64_OpAMD64TESTW(v)
|
return rewriteValueAMD64_OpAMD64TESTW(v)
|
||||||
case OpAMD64TESTWconst:
|
case OpAMD64TESTWconst:
|
||||||
return rewriteValueAMD64_OpAMD64TESTWconst(v)
|
return rewriteValueAMD64_OpAMD64TESTWconst(v)
|
||||||
|
case OpAMD64VPANDQ512:
|
||||||
|
return rewriteValueAMD64_OpAMD64VPANDQ512(v)
|
||||||
case OpAMD64VPMOVVec16x16ToM:
|
case OpAMD64VPMOVVec16x16ToM:
|
||||||
return rewriteValueAMD64_OpAMD64VPMOVVec16x16ToM(v)
|
return rewriteValueAMD64_OpAMD64VPMOVVec16x16ToM(v)
|
||||||
case OpAMD64VPMOVVec16x32ToM:
|
case OpAMD64VPMOVVec16x32ToM:
|
||||||
|
|
@ -4255,6 +4257,26 @@ func rewriteValueAMD64(v *Value) bool {
|
||||||
return rewriteValueAMD64_OpblendMaskedInt64x8(v)
|
return rewriteValueAMD64_OpblendMaskedInt64x8(v)
|
||||||
case OpblendMaskedInt8x64:
|
case OpblendMaskedInt8x64:
|
||||||
return rewriteValueAMD64_OpblendMaskedInt8x64(v)
|
return rewriteValueAMD64_OpblendMaskedInt8x64(v)
|
||||||
|
case OpmoveMaskedFloat32x16:
|
||||||
|
return rewriteValueAMD64_OpmoveMaskedFloat32x16(v)
|
||||||
|
case OpmoveMaskedFloat64x8:
|
||||||
|
return rewriteValueAMD64_OpmoveMaskedFloat64x8(v)
|
||||||
|
case OpmoveMaskedInt16x32:
|
||||||
|
return rewriteValueAMD64_OpmoveMaskedInt16x32(v)
|
||||||
|
case OpmoveMaskedInt32x16:
|
||||||
|
return rewriteValueAMD64_OpmoveMaskedInt32x16(v)
|
||||||
|
case OpmoveMaskedInt64x8:
|
||||||
|
return rewriteValueAMD64_OpmoveMaskedInt64x8(v)
|
||||||
|
case OpmoveMaskedInt8x64:
|
||||||
|
return rewriteValueAMD64_OpmoveMaskedInt8x64(v)
|
||||||
|
case OpmoveMaskedUint16x32:
|
||||||
|
return rewriteValueAMD64_OpmoveMaskedUint16x32(v)
|
||||||
|
case OpmoveMaskedUint32x16:
|
||||||
|
return rewriteValueAMD64_OpmoveMaskedUint32x16(v)
|
||||||
|
case OpmoveMaskedUint64x8:
|
||||||
|
return rewriteValueAMD64_OpmoveMaskedUint64x8(v)
|
||||||
|
case OpmoveMaskedUint8x64:
|
||||||
|
return rewriteValueAMD64_OpmoveMaskedUint8x64(v)
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
@ -25949,6 +25971,71 @@ func rewriteValueAMD64_OpAMD64TESTWconst(v *Value) bool {
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
func rewriteValueAMD64_OpAMD64VPANDQ512(v *Value) bool {
|
||||||
|
v_1 := v.Args[1]
|
||||||
|
v_0 := v.Args[0]
|
||||||
|
// match: (VPANDQ512 x (VPMOVMToVec64x8 k))
|
||||||
|
// result: (VMOVDQU64Masked512 x k)
|
||||||
|
for {
|
||||||
|
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||||
|
x := v_0
|
||||||
|
if v_1.Op != OpAMD64VPMOVMToVec64x8 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
k := v_1.Args[0]
|
||||||
|
v.reset(OpAMD64VMOVDQU64Masked512)
|
||||||
|
v.AddArg2(x, k)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// match: (VPANDQ512 x (VPMOVMToVec32x16 k))
|
||||||
|
// result: (VMOVDQU32Masked512 x k)
|
||||||
|
for {
|
||||||
|
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||||
|
x := v_0
|
||||||
|
if v_1.Op != OpAMD64VPMOVMToVec32x16 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
k := v_1.Args[0]
|
||||||
|
v.reset(OpAMD64VMOVDQU32Masked512)
|
||||||
|
v.AddArg2(x, k)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// match: (VPANDQ512 x (VPMOVMToVec16x32 k))
|
||||||
|
// result: (VMOVDQU16Masked512 x k)
|
||||||
|
for {
|
||||||
|
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||||
|
x := v_0
|
||||||
|
if v_1.Op != OpAMD64VPMOVMToVec16x32 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
k := v_1.Args[0]
|
||||||
|
v.reset(OpAMD64VMOVDQU16Masked512)
|
||||||
|
v.AddArg2(x, k)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// match: (VPANDQ512 x (VPMOVMToVec8x64 k))
|
||||||
|
// result: (VMOVDQU8Masked512 x k)
|
||||||
|
for {
|
||||||
|
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||||
|
x := v_0
|
||||||
|
if v_1.Op != OpAMD64VPMOVMToVec8x64 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
k := v_1.Args[0]
|
||||||
|
v.reset(OpAMD64VMOVDQU8Masked512)
|
||||||
|
v.AddArg2(x, k)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
func rewriteValueAMD64_OpAMD64VPMOVVec16x16ToM(v *Value) bool {
|
func rewriteValueAMD64_OpAMD64VPMOVVec16x16ToM(v *Value) bool {
|
||||||
v_0 := v.Args[0]
|
v_0 := v.Args[0]
|
||||||
// match: (VPMOVVec16x16ToM (VPMOVMToVec16x16 x))
|
// match: (VPMOVVec16x16ToM (VPMOVMToVec16x16 x))
|
||||||
|
|
@ -39220,6 +39307,166 @@ func rewriteValueAMD64_OpblendMaskedInt8x64(v *Value) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
func rewriteValueAMD64_OpmoveMaskedFloat32x16(v *Value) bool {
|
||||||
|
v_1 := v.Args[1]
|
||||||
|
v_0 := v.Args[0]
|
||||||
|
b := v.Block
|
||||||
|
// match: (moveMaskedFloat32x16 x mask)
|
||||||
|
// result: (VMOVUPSMasked512 x (VPMOVVec32x16ToM <types.TypeMask> mask))
|
||||||
|
for {
|
||||||
|
x := v_0
|
||||||
|
mask := v_1
|
||||||
|
v.reset(OpAMD64VMOVUPSMasked512)
|
||||||
|
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
|
||||||
|
v0.AddArg(mask)
|
||||||
|
v.AddArg2(x, v0)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func rewriteValueAMD64_OpmoveMaskedFloat64x8(v *Value) bool {
|
||||||
|
v_1 := v.Args[1]
|
||||||
|
v_0 := v.Args[0]
|
||||||
|
b := v.Block
|
||||||
|
// match: (moveMaskedFloat64x8 x mask)
|
||||||
|
// result: (VMOVUPDMasked512 x (VPMOVVec64x8ToM <types.TypeMask> mask))
|
||||||
|
for {
|
||||||
|
x := v_0
|
||||||
|
mask := v_1
|
||||||
|
v.reset(OpAMD64VMOVUPDMasked512)
|
||||||
|
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
|
||||||
|
v0.AddArg(mask)
|
||||||
|
v.AddArg2(x, v0)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func rewriteValueAMD64_OpmoveMaskedInt16x32(v *Value) bool {
|
||||||
|
v_1 := v.Args[1]
|
||||||
|
v_0 := v.Args[0]
|
||||||
|
b := v.Block
|
||||||
|
// match: (moveMaskedInt16x32 x mask)
|
||||||
|
// result: (VMOVDQU16Masked512 x (VPMOVVec16x32ToM <types.TypeMask> mask))
|
||||||
|
for {
|
||||||
|
x := v_0
|
||||||
|
mask := v_1
|
||||||
|
v.reset(OpAMD64VMOVDQU16Masked512)
|
||||||
|
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
|
||||||
|
v0.AddArg(mask)
|
||||||
|
v.AddArg2(x, v0)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func rewriteValueAMD64_OpmoveMaskedInt32x16(v *Value) bool {
|
||||||
|
v_1 := v.Args[1]
|
||||||
|
v_0 := v.Args[0]
|
||||||
|
b := v.Block
|
||||||
|
// match: (moveMaskedInt32x16 x mask)
|
||||||
|
// result: (VMOVDQU32Masked512 x (VPMOVVec32x16ToM <types.TypeMask> mask))
|
||||||
|
for {
|
||||||
|
x := v_0
|
||||||
|
mask := v_1
|
||||||
|
v.reset(OpAMD64VMOVDQU32Masked512)
|
||||||
|
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
|
||||||
|
v0.AddArg(mask)
|
||||||
|
v.AddArg2(x, v0)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func rewriteValueAMD64_OpmoveMaskedInt64x8(v *Value) bool {
|
||||||
|
v_1 := v.Args[1]
|
||||||
|
v_0 := v.Args[0]
|
||||||
|
b := v.Block
|
||||||
|
// match: (moveMaskedInt64x8 x mask)
|
||||||
|
// result: (VMOVDQU64Masked512 x (VPMOVVec64x8ToM <types.TypeMask> mask))
|
||||||
|
for {
|
||||||
|
x := v_0
|
||||||
|
mask := v_1
|
||||||
|
v.reset(OpAMD64VMOVDQU64Masked512)
|
||||||
|
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
|
||||||
|
v0.AddArg(mask)
|
||||||
|
v.AddArg2(x, v0)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func rewriteValueAMD64_OpmoveMaskedInt8x64(v *Value) bool {
|
||||||
|
v_1 := v.Args[1]
|
||||||
|
v_0 := v.Args[0]
|
||||||
|
b := v.Block
|
||||||
|
// match: (moveMaskedInt8x64 x mask)
|
||||||
|
// result: (VMOVDQU8Masked512 x (VPMOVVec8x64ToM <types.TypeMask> mask))
|
||||||
|
for {
|
||||||
|
x := v_0
|
||||||
|
mask := v_1
|
||||||
|
v.reset(OpAMD64VMOVDQU8Masked512)
|
||||||
|
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask)
|
||||||
|
v0.AddArg(mask)
|
||||||
|
v.AddArg2(x, v0)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func rewriteValueAMD64_OpmoveMaskedUint16x32(v *Value) bool {
|
||||||
|
v_1 := v.Args[1]
|
||||||
|
v_0 := v.Args[0]
|
||||||
|
b := v.Block
|
||||||
|
// match: (moveMaskedUint16x32 x mask)
|
||||||
|
// result: (VMOVDQU16Masked512 x (VPMOVVec16x32ToM <types.TypeMask> mask))
|
||||||
|
for {
|
||||||
|
x := v_0
|
||||||
|
mask := v_1
|
||||||
|
v.reset(OpAMD64VMOVDQU16Masked512)
|
||||||
|
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
|
||||||
|
v0.AddArg(mask)
|
||||||
|
v.AddArg2(x, v0)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func rewriteValueAMD64_OpmoveMaskedUint32x16(v *Value) bool {
|
||||||
|
v_1 := v.Args[1]
|
||||||
|
v_0 := v.Args[0]
|
||||||
|
b := v.Block
|
||||||
|
// match: (moveMaskedUint32x16 x mask)
|
||||||
|
// result: (VMOVDQU32Masked512 x (VPMOVVec32x16ToM <types.TypeMask> mask))
|
||||||
|
for {
|
||||||
|
x := v_0
|
||||||
|
mask := v_1
|
||||||
|
v.reset(OpAMD64VMOVDQU32Masked512)
|
||||||
|
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
|
||||||
|
v0.AddArg(mask)
|
||||||
|
v.AddArg2(x, v0)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func rewriteValueAMD64_OpmoveMaskedUint64x8(v *Value) bool {
|
||||||
|
v_1 := v.Args[1]
|
||||||
|
v_0 := v.Args[0]
|
||||||
|
b := v.Block
|
||||||
|
// match: (moveMaskedUint64x8 x mask)
|
||||||
|
// result: (VMOVDQU64Masked512 x (VPMOVVec64x8ToM <types.TypeMask> mask))
|
||||||
|
for {
|
||||||
|
x := v_0
|
||||||
|
mask := v_1
|
||||||
|
v.reset(OpAMD64VMOVDQU64Masked512)
|
||||||
|
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
|
||||||
|
v0.AddArg(mask)
|
||||||
|
v.AddArg2(x, v0)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func rewriteValueAMD64_OpmoveMaskedUint8x64(v *Value) bool {
|
||||||
|
v_1 := v.Args[1]
|
||||||
|
v_0 := v.Args[0]
|
||||||
|
b := v.Block
|
||||||
|
// match: (moveMaskedUint8x64 x mask)
|
||||||
|
// result: (VMOVDQU8Masked512 x (VPMOVVec8x64ToM <types.TypeMask> mask))
|
||||||
|
for {
|
||||||
|
x := v_0
|
||||||
|
mask := v_1
|
||||||
|
v.reset(OpAMD64VMOVDQU8Masked512)
|
||||||
|
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask)
|
||||||
|
v0.AddArg(mask)
|
||||||
|
v.AddArg2(x, v0)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
func rewriteBlockAMD64(b *Block) bool {
|
func rewriteBlockAMD64(b *Block) bool {
|
||||||
typ := &b.Func.Config.Types
|
typ := &b.Func.Config.Types
|
||||||
switch b.Kind {
|
switch b.Kind {
|
||||||
|
|
|
||||||
|
|
@ -1070,6 +1070,16 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies .
|
||||||
addF(simdPackage, "Int16x32.blendMasked", opLen3(ssa.OpblendMaskedInt16x32, types.TypeVec512), sys.AMD64)
|
addF(simdPackage, "Int16x32.blendMasked", opLen3(ssa.OpblendMaskedInt16x32, types.TypeVec512), sys.AMD64)
|
||||||
addF(simdPackage, "Int32x16.blendMasked", opLen3(ssa.OpblendMaskedInt32x16, types.TypeVec512), sys.AMD64)
|
addF(simdPackage, "Int32x16.blendMasked", opLen3(ssa.OpblendMaskedInt32x16, types.TypeVec512), sys.AMD64)
|
||||||
addF(simdPackage, "Int64x8.blendMasked", opLen3(ssa.OpblendMaskedInt64x8, types.TypeVec512), sys.AMD64)
|
addF(simdPackage, "Int64x8.blendMasked", opLen3(ssa.OpblendMaskedInt64x8, types.TypeVec512), sys.AMD64)
|
||||||
|
addF(simdPackage, "Float32x16.moveMasked", opLen2(ssa.OpmoveMaskedFloat32x16, types.TypeVec512), sys.AMD64)
|
||||||
|
addF(simdPackage, "Float64x8.moveMasked", opLen2(ssa.OpmoveMaskedFloat64x8, types.TypeVec512), sys.AMD64)
|
||||||
|
addF(simdPackage, "Int8x64.moveMasked", opLen2(ssa.OpmoveMaskedInt8x64, types.TypeVec512), sys.AMD64)
|
||||||
|
addF(simdPackage, "Int16x32.moveMasked", opLen2(ssa.OpmoveMaskedInt16x32, types.TypeVec512), sys.AMD64)
|
||||||
|
addF(simdPackage, "Int32x16.moveMasked", opLen2(ssa.OpmoveMaskedInt32x16, types.TypeVec512), sys.AMD64)
|
||||||
|
addF(simdPackage, "Int64x8.moveMasked", opLen2(ssa.OpmoveMaskedInt64x8, types.TypeVec512), sys.AMD64)
|
||||||
|
addF(simdPackage, "Uint8x64.moveMasked", opLen2(ssa.OpmoveMaskedUint8x64, types.TypeVec512), sys.AMD64)
|
||||||
|
addF(simdPackage, "Uint16x32.moveMasked", opLen2(ssa.OpmoveMaskedUint16x32, types.TypeVec512), sys.AMD64)
|
||||||
|
addF(simdPackage, "Uint32x16.moveMasked", opLen2(ssa.OpmoveMaskedUint32x16, types.TypeVec512), sys.AMD64)
|
||||||
|
addF(simdPackage, "Uint64x8.moveMasked", opLen2(ssa.OpmoveMaskedUint64x8, types.TypeVec512), sys.AMD64)
|
||||||
addF(simdPackage, "Float32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
|
addF(simdPackage, "Float32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
|
||||||
addF(simdPackage, "Float32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
|
addF(simdPackage, "Float32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
|
||||||
addF(simdPackage, "Float32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
|
addF(simdPackage, "Float32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64)
|
||||||
|
|
|
||||||
|
|
@ -98,7 +98,7 @@ func writeSIMDSSA(ops []Operation) *bytes.Buffer {
|
||||||
seen[asm] = struct{}{}
|
seen[asm] = struct{}{}
|
||||||
caseStr := fmt.Sprintf("ssa.OpAMD64%s", asm)
|
caseStr := fmt.Sprintf("ssa.OpAMD64%s", asm)
|
||||||
if shapeIn == OneKmaskIn || shapeIn == OneKmaskImmIn {
|
if shapeIn == OneKmaskIn || shapeIn == OneKmaskImmIn {
|
||||||
if gOp.Zeroing == nil {
|
if gOp.Zeroing == nil || *gOp.Zeroing {
|
||||||
ZeroingMask = append(ZeroingMask, caseStr)
|
ZeroingMask = append(ZeroingMask, caseStr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -129,7 +129,7 @@ func (o *Operation) VectorWidth() int {
|
||||||
|
|
||||||
func machineOpName(maskType maskShape, gOp Operation) string {
|
func machineOpName(maskType maskShape, gOp Operation) string {
|
||||||
asm := gOp.Asm
|
asm := gOp.Asm
|
||||||
if maskType == 2 {
|
if maskType == OneMask {
|
||||||
asm += "Masked"
|
asm += "Masked"
|
||||||
}
|
}
|
||||||
asm = fmt.Sprintf("%s%d", asm, gOp.VectorWidth())
|
asm = fmt.Sprintf("%s%d", asm, gOp.VectorWidth())
|
||||||
|
|
|
||||||
|
|
@ -50,6 +50,11 @@
|
||||||
documentation: !string |-
|
documentation: !string |-
|
||||||
// NAME blends two vectors based on mask values, choosing either
|
// NAME blends two vectors based on mask values, choosing either
|
||||||
// the first or the second based on whether the third is false or true
|
// the first or the second based on whether the third is false or true
|
||||||
|
- go: move
|
||||||
|
commutative: false
|
||||||
|
documentation: !string |-
|
||||||
|
// NAME blends a vector with zero, with the original value where the mask is true
|
||||||
|
// and zero where the mask is false.
|
||||||
- go: Expand
|
- go: Expand
|
||||||
commutative: false
|
commutative: false
|
||||||
documentation: !string |-
|
documentation: !string |-
|
||||||
|
|
|
||||||
|
|
@ -284,6 +284,38 @@
|
||||||
out:
|
out:
|
||||||
- *v
|
- *v
|
||||||
|
|
||||||
|
# For AVX512
|
||||||
|
- go: move
|
||||||
|
asm: VMOVDQU(8|16|32|64)
|
||||||
|
zeroing: true
|
||||||
|
in:
|
||||||
|
- &v
|
||||||
|
go: $t
|
||||||
|
bits: 512
|
||||||
|
class: vreg
|
||||||
|
base: int|uint
|
||||||
|
inVariant:
|
||||||
|
-
|
||||||
|
class: mask
|
||||||
|
out:
|
||||||
|
- *v
|
||||||
|
|
||||||
|
# For AVX512
|
||||||
|
- go: move
|
||||||
|
asm: VMOVUP[SD]
|
||||||
|
zeroing: true
|
||||||
|
in:
|
||||||
|
- &v
|
||||||
|
go: $t
|
||||||
|
bits: 512
|
||||||
|
class: vreg
|
||||||
|
base: float
|
||||||
|
inVariant:
|
||||||
|
-
|
||||||
|
class: mask
|
||||||
|
out:
|
||||||
|
- *v
|
||||||
|
|
||||||
- go: Expand
|
- go: Expand
|
||||||
asm: "VPEXPAND[BWDQ]|VEXPANDP[SD]"
|
asm: "VPEXPAND[BWDQ]|VEXPANDP[SD]"
|
||||||
in:
|
in:
|
||||||
|
|
|
||||||
|
|
@ -6122,6 +6122,88 @@ func (x Int32x16) blendMasked(y Int32x16, mask Mask32x16) Int32x16
|
||||||
// Asm: VPBLENDMQ, CPU Feature: AVX512
|
// Asm: VPBLENDMQ, CPU Feature: AVX512
|
||||||
func (x Int64x8) blendMasked(y Int64x8, mask Mask64x8) Int64x8
|
func (x Int64x8) blendMasked(y Int64x8, mask Mask64x8) Int64x8
|
||||||
|
|
||||||
|
/* moveMasked */
|
||||||
|
|
||||||
|
// moveMasked blends a vector with zero, with the original value where the mask is true
|
||||||
|
// and zero where the mask is false.
|
||||||
|
//
|
||||||
|
// This operation is applied selectively under a write mask.
|
||||||
|
//
|
||||||
|
// Asm: VMOVUPS, CPU Feature: AVX512
|
||||||
|
func (x Float32x16) moveMasked(mask Mask32x16) Float32x16
|
||||||
|
|
||||||
|
// moveMasked blends a vector with zero, with the original value where the mask is true
|
||||||
|
// and zero where the mask is false.
|
||||||
|
//
|
||||||
|
// This operation is applied selectively under a write mask.
|
||||||
|
//
|
||||||
|
// Asm: VMOVUPD, CPU Feature: AVX512
|
||||||
|
func (x Float64x8) moveMasked(mask Mask64x8) Float64x8
|
||||||
|
|
||||||
|
// moveMasked blends a vector with zero, with the original value where the mask is true
|
||||||
|
// and zero where the mask is false.
|
||||||
|
//
|
||||||
|
// This operation is applied selectively under a write mask.
|
||||||
|
//
|
||||||
|
// Asm: VMOVDQU8, CPU Feature: AVX512
|
||||||
|
func (x Int8x64) moveMasked(mask Mask8x64) Int8x64
|
||||||
|
|
||||||
|
// moveMasked blends a vector with zero, with the original value where the mask is true
|
||||||
|
// and zero where the mask is false.
|
||||||
|
//
|
||||||
|
// This operation is applied selectively under a write mask.
|
||||||
|
//
|
||||||
|
// Asm: VMOVDQU16, CPU Feature: AVX512
|
||||||
|
func (x Int16x32) moveMasked(mask Mask16x32) Int16x32
|
||||||
|
|
||||||
|
// moveMasked blends a vector with zero, with the original value where the mask is true
|
||||||
|
// and zero where the mask is false.
|
||||||
|
//
|
||||||
|
// This operation is applied selectively under a write mask.
|
||||||
|
//
|
||||||
|
// Asm: VMOVDQU32, CPU Feature: AVX512
|
||||||
|
func (x Int32x16) moveMasked(mask Mask32x16) Int32x16
|
||||||
|
|
||||||
|
// moveMasked blends a vector with zero, with the original value where the mask is true
|
||||||
|
// and zero where the mask is false.
|
||||||
|
//
|
||||||
|
// This operation is applied selectively under a write mask.
|
||||||
|
//
|
||||||
|
// Asm: VMOVDQU64, CPU Feature: AVX512
|
||||||
|
func (x Int64x8) moveMasked(mask Mask64x8) Int64x8
|
||||||
|
|
||||||
|
// moveMasked blends a vector with zero, with the original value where the mask is true
|
||||||
|
// and zero where the mask is false.
|
||||||
|
//
|
||||||
|
// This operation is applied selectively under a write mask.
|
||||||
|
//
|
||||||
|
// Asm: VMOVDQU8, CPU Feature: AVX512
|
||||||
|
func (x Uint8x64) moveMasked(mask Mask8x64) Uint8x64
|
||||||
|
|
||||||
|
// moveMasked blends a vector with zero, with the original value where the mask is true
|
||||||
|
// and zero where the mask is false.
|
||||||
|
//
|
||||||
|
// This operation is applied selectively under a write mask.
|
||||||
|
//
|
||||||
|
// Asm: VMOVDQU16, CPU Feature: AVX512
|
||||||
|
func (x Uint16x32) moveMasked(mask Mask16x32) Uint16x32
|
||||||
|
|
||||||
|
// moveMasked blends a vector with zero, with the original value where the mask is true
|
||||||
|
// and zero where the mask is false.
|
||||||
|
//
|
||||||
|
// This operation is applied selectively under a write mask.
|
||||||
|
//
|
||||||
|
// Asm: VMOVDQU32, CPU Feature: AVX512
|
||||||
|
func (x Uint32x16) moveMasked(mask Mask32x16) Uint32x16
|
||||||
|
|
||||||
|
// moveMasked blends a vector with zero, with the original value where the mask is true
|
||||||
|
// and zero where the mask is false.
|
||||||
|
//
|
||||||
|
// This operation is applied selectively under a write mask.
|
||||||
|
//
|
||||||
|
// Asm: VMOVDQU64, CPU Feature: AVX512
|
||||||
|
func (x Uint64x8) moveMasked(mask Mask64x8) Uint64x8
|
||||||
|
|
||||||
// Float64x2 converts from Float32x4 to Float64x2
|
// Float64x2 converts from Float32x4 to Float64x2
|
||||||
func (from Float32x4) AsFloat64x2() (to Float64x2)
|
func (from Float32x4) AsFloat64x2() (to Float64x2)
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue