mirror of
https://github.com/golang/go.git
synced 2026-02-06 18:00:01 +00:00
[dev.simd] simd, cmd/compile: remove move from API
These should really be machine ops only. Change-Id: Idcc611719eff068153d88c5162dd2e0883e5e0ca Reviewed-on: https://go-review.googlesource.com/c/go/+/717821 Reviewed-by: David Chase <drchase@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
This commit is contained in:
parent
bf77323efa
commit
972732b245
12 changed files with 232 additions and 367 deletions
|
|
@ -914,11 +914,23 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool {
|
|||
ssa.OpAMD64VSQRTPDMasked128,
|
||||
ssa.OpAMD64VSQRTPDMasked256,
|
||||
ssa.OpAMD64VSQRTPDMasked512,
|
||||
ssa.OpAMD64VMOVUPSMasked128,
|
||||
ssa.OpAMD64VMOVUPSMasked256,
|
||||
ssa.OpAMD64VMOVUPSMasked512,
|
||||
ssa.OpAMD64VMOVUPDMasked128,
|
||||
ssa.OpAMD64VMOVUPDMasked256,
|
||||
ssa.OpAMD64VMOVUPDMasked512,
|
||||
ssa.OpAMD64VMOVDQU8Masked128,
|
||||
ssa.OpAMD64VMOVDQU8Masked256,
|
||||
ssa.OpAMD64VMOVDQU8Masked512,
|
||||
ssa.OpAMD64VMOVDQU16Masked128,
|
||||
ssa.OpAMD64VMOVDQU16Masked256,
|
||||
ssa.OpAMD64VMOVDQU16Masked512,
|
||||
ssa.OpAMD64VMOVDQU32Masked128,
|
||||
ssa.OpAMD64VMOVDQU32Masked256,
|
||||
ssa.OpAMD64VMOVDQU32Masked512,
|
||||
ssa.OpAMD64VMOVDQU64Masked128,
|
||||
ssa.OpAMD64VMOVDQU64Masked256,
|
||||
ssa.OpAMD64VMOVDQU64Masked512:
|
||||
p = simdVkv(s, v)
|
||||
|
||||
|
|
@ -2541,11 +2553,23 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool {
|
|||
ssa.OpAMD64VPXORQMasked128,
|
||||
ssa.OpAMD64VPXORQMasked256,
|
||||
ssa.OpAMD64VPXORQMasked512,
|
||||
ssa.OpAMD64VMOVUPSMasked128,
|
||||
ssa.OpAMD64VMOVUPSMasked256,
|
||||
ssa.OpAMD64VMOVUPSMasked512,
|
||||
ssa.OpAMD64VMOVUPDMasked128,
|
||||
ssa.OpAMD64VMOVUPDMasked256,
|
||||
ssa.OpAMD64VMOVUPDMasked512,
|
||||
ssa.OpAMD64VMOVDQU8Masked128,
|
||||
ssa.OpAMD64VMOVDQU8Masked256,
|
||||
ssa.OpAMD64VMOVDQU8Masked512,
|
||||
ssa.OpAMD64VMOVDQU16Masked128,
|
||||
ssa.OpAMD64VMOVDQU16Masked256,
|
||||
ssa.OpAMD64VMOVDQU16Masked512,
|
||||
ssa.OpAMD64VMOVDQU32Masked128,
|
||||
ssa.OpAMD64VMOVDQU32Masked256,
|
||||
ssa.OpAMD64VMOVDQU32Masked512,
|
||||
ssa.OpAMD64VMOVDQU64Masked128,
|
||||
ssa.OpAMD64VMOVDQU64Masked256,
|
||||
ssa.OpAMD64VMOVDQU64Masked512,
|
||||
ssa.OpAMD64VPSLLWMasked128const,
|
||||
ssa.OpAMD64VPSLLWMasked256const,
|
||||
|
|
|
|||
|
|
@ -1324,16 +1324,6 @@
|
|||
(concatSelectedConstantGroupedUint32x16 ...) => (VSHUFPS512 ...)
|
||||
(concatSelectedConstantGroupedUint64x4 ...) => (VSHUFPD256 ...)
|
||||
(concatSelectedConstantGroupedUint64x8 ...) => (VSHUFPD512 ...)
|
||||
(moveMaskedFloat32x16 x mask) => (VMOVUPSMasked512 x (VPMOVVec32x16ToM <types.TypeMask> mask))
|
||||
(moveMaskedFloat64x8 x mask) => (VMOVUPDMasked512 x (VPMOVVec64x8ToM <types.TypeMask> mask))
|
||||
(moveMaskedInt8x64 x mask) => (VMOVDQU8Masked512 x (VPMOVVec8x64ToM <types.TypeMask> mask))
|
||||
(moveMaskedInt16x32 x mask) => (VMOVDQU16Masked512 x (VPMOVVec16x32ToM <types.TypeMask> mask))
|
||||
(moveMaskedInt32x16 x mask) => (VMOVDQU32Masked512 x (VPMOVVec32x16ToM <types.TypeMask> mask))
|
||||
(moveMaskedInt64x8 x mask) => (VMOVDQU64Masked512 x (VPMOVVec64x8ToM <types.TypeMask> mask))
|
||||
(moveMaskedUint8x64 x mask) => (VMOVDQU8Masked512 x (VPMOVVec8x64ToM <types.TypeMask> mask))
|
||||
(moveMaskedUint16x32 x mask) => (VMOVDQU16Masked512 x (VPMOVVec16x32ToM <types.TypeMask> mask))
|
||||
(moveMaskedUint32x16 x mask) => (VMOVDQU32Masked512 x (VPMOVVec32x16ToM <types.TypeMask> mask))
|
||||
(moveMaskedUint64x8 x mask) => (VMOVDQU64Masked512 x (VPMOVVec64x8ToM <types.TypeMask> mask))
|
||||
(ternInt32x4 ...) => (VPTERNLOGD128 ...)
|
||||
(ternInt32x8 ...) => (VPTERNLOGD256 ...)
|
||||
(ternInt32x16 ...) => (VPTERNLOGD512 ...)
|
||||
|
|
|
|||
|
|
@ -155,11 +155,23 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf
|
|||
{name: "VMINPSMasked128", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false},
|
||||
{name: "VMINPSMasked256", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false},
|
||||
{name: "VMINPSMasked512", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false},
|
||||
{name: "VMOVDQU8Masked128", argLength: 2, reg: wkw, asm: "VMOVDQU8", commutative: false, typ: "Vec128", resultInArg0: false},
|
||||
{name: "VMOVDQU8Masked256", argLength: 2, reg: wkw, asm: "VMOVDQU8", commutative: false, typ: "Vec256", resultInArg0: false},
|
||||
{name: "VMOVDQU8Masked512", argLength: 2, reg: wkw, asm: "VMOVDQU8", commutative: false, typ: "Vec512", resultInArg0: false},
|
||||
{name: "VMOVDQU16Masked128", argLength: 2, reg: wkw, asm: "VMOVDQU16", commutative: false, typ: "Vec128", resultInArg0: false},
|
||||
{name: "VMOVDQU16Masked256", argLength: 2, reg: wkw, asm: "VMOVDQU16", commutative: false, typ: "Vec256", resultInArg0: false},
|
||||
{name: "VMOVDQU16Masked512", argLength: 2, reg: wkw, asm: "VMOVDQU16", commutative: false, typ: "Vec512", resultInArg0: false},
|
||||
{name: "VMOVDQU32Masked128", argLength: 2, reg: wkw, asm: "VMOVDQU32", commutative: false, typ: "Vec128", resultInArg0: false},
|
||||
{name: "VMOVDQU32Masked256", argLength: 2, reg: wkw, asm: "VMOVDQU32", commutative: false, typ: "Vec256", resultInArg0: false},
|
||||
{name: "VMOVDQU32Masked512", argLength: 2, reg: wkw, asm: "VMOVDQU32", commutative: false, typ: "Vec512", resultInArg0: false},
|
||||
{name: "VMOVDQU64Masked128", argLength: 2, reg: wkw, asm: "VMOVDQU64", commutative: false, typ: "Vec128", resultInArg0: false},
|
||||
{name: "VMOVDQU64Masked256", argLength: 2, reg: wkw, asm: "VMOVDQU64", commutative: false, typ: "Vec256", resultInArg0: false},
|
||||
{name: "VMOVDQU64Masked512", argLength: 2, reg: wkw, asm: "VMOVDQU64", commutative: false, typ: "Vec512", resultInArg0: false},
|
||||
{name: "VMOVUPDMasked128", argLength: 2, reg: wkw, asm: "VMOVUPD", commutative: false, typ: "Vec128", resultInArg0: false},
|
||||
{name: "VMOVUPDMasked256", argLength: 2, reg: wkw, asm: "VMOVUPD", commutative: false, typ: "Vec256", resultInArg0: false},
|
||||
{name: "VMOVUPDMasked512", argLength: 2, reg: wkw, asm: "VMOVUPD", commutative: false, typ: "Vec512", resultInArg0: false},
|
||||
{name: "VMOVUPSMasked128", argLength: 2, reg: wkw, asm: "VMOVUPS", commutative: false, typ: "Vec128", resultInArg0: false},
|
||||
{name: "VMOVUPSMasked256", argLength: 2, reg: wkw, asm: "VMOVUPS", commutative: false, typ: "Vec256", resultInArg0: false},
|
||||
{name: "VMOVUPSMasked512", argLength: 2, reg: wkw, asm: "VMOVUPS", commutative: false, typ: "Vec512", resultInArg0: false},
|
||||
{name: "VMULPD128", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false},
|
||||
{name: "VMULPD256", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false},
|
||||
|
|
|
|||
|
|
@ -1109,16 +1109,6 @@ func simdGenericOps() []opData {
|
|||
{name: "blendMaskedInt16x32", argLength: 3, commutative: false},
|
||||
{name: "blendMaskedInt32x16", argLength: 3, commutative: false},
|
||||
{name: "blendMaskedInt64x8", argLength: 3, commutative: false},
|
||||
{name: "moveMaskedFloat32x16", argLength: 2, commutative: false},
|
||||
{name: "moveMaskedFloat64x8", argLength: 2, commutative: false},
|
||||
{name: "moveMaskedInt8x64", argLength: 2, commutative: false},
|
||||
{name: "moveMaskedInt16x32", argLength: 2, commutative: false},
|
||||
{name: "moveMaskedInt32x16", argLength: 2, commutative: false},
|
||||
{name: "moveMaskedInt64x8", argLength: 2, commutative: false},
|
||||
{name: "moveMaskedUint8x64", argLength: 2, commutative: false},
|
||||
{name: "moveMaskedUint16x32", argLength: 2, commutative: false},
|
||||
{name: "moveMaskedUint32x16", argLength: 2, commutative: false},
|
||||
{name: "moveMaskedUint64x8", argLength: 2, commutative: false},
|
||||
{name: "AESRoundKeyGenAssistUint32x4", argLength: 1, commutative: false, aux: "UInt8"},
|
||||
{name: "CeilScaledFloat32x4", argLength: 1, commutative: false, aux: "UInt8"},
|
||||
{name: "CeilScaledFloat32x8", argLength: 1, commutative: false, aux: "UInt8"},
|
||||
|
|
|
|||
|
|
@ -1395,11 +1395,23 @@ const (
|
|||
OpAMD64VMINPSMasked128
|
||||
OpAMD64VMINPSMasked256
|
||||
OpAMD64VMINPSMasked512
|
||||
OpAMD64VMOVDQU8Masked128
|
||||
OpAMD64VMOVDQU8Masked256
|
||||
OpAMD64VMOVDQU8Masked512
|
||||
OpAMD64VMOVDQU16Masked128
|
||||
OpAMD64VMOVDQU16Masked256
|
||||
OpAMD64VMOVDQU16Masked512
|
||||
OpAMD64VMOVDQU32Masked128
|
||||
OpAMD64VMOVDQU32Masked256
|
||||
OpAMD64VMOVDQU32Masked512
|
||||
OpAMD64VMOVDQU64Masked128
|
||||
OpAMD64VMOVDQU64Masked256
|
||||
OpAMD64VMOVDQU64Masked512
|
||||
OpAMD64VMOVUPDMasked128
|
||||
OpAMD64VMOVUPDMasked256
|
||||
OpAMD64VMOVUPDMasked512
|
||||
OpAMD64VMOVUPSMasked128
|
||||
OpAMD64VMOVUPSMasked256
|
||||
OpAMD64VMOVUPSMasked512
|
||||
OpAMD64VMULPD128
|
||||
OpAMD64VMULPD256
|
||||
|
|
@ -6508,16 +6520,6 @@ const (
|
|||
OpblendMaskedInt16x32
|
||||
OpblendMaskedInt32x16
|
||||
OpblendMaskedInt64x8
|
||||
OpmoveMaskedFloat32x16
|
||||
OpmoveMaskedFloat64x8
|
||||
OpmoveMaskedInt8x64
|
||||
OpmoveMaskedInt16x32
|
||||
OpmoveMaskedInt32x16
|
||||
OpmoveMaskedInt64x8
|
||||
OpmoveMaskedUint8x64
|
||||
OpmoveMaskedUint16x32
|
||||
OpmoveMaskedUint32x16
|
||||
OpmoveMaskedUint64x8
|
||||
OpAESRoundKeyGenAssistUint32x4
|
||||
OpCeilScaledFloat32x4
|
||||
OpCeilScaledFloat32x8
|
||||
|
|
@ -22218,6 +22220,34 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "VMOVDQU8Masked128",
|
||||
argLen: 2,
|
||||
asm: x86.AVMOVDQU8,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||
{0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "VMOVDQU8Masked256",
|
||||
argLen: 2,
|
||||
asm: x86.AVMOVDQU8,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||
{0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "VMOVDQU8Masked512",
|
||||
argLen: 2,
|
||||
|
|
@ -22232,6 +22262,34 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "VMOVDQU16Masked128",
|
||||
argLen: 2,
|
||||
asm: x86.AVMOVDQU16,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||
{0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "VMOVDQU16Masked256",
|
||||
argLen: 2,
|
||||
asm: x86.AVMOVDQU16,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||
{0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "VMOVDQU16Masked512",
|
||||
argLen: 2,
|
||||
|
|
@ -22246,6 +22304,34 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "VMOVDQU32Masked128",
|
||||
argLen: 2,
|
||||
asm: x86.AVMOVDQU32,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||
{0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "VMOVDQU32Masked256",
|
||||
argLen: 2,
|
||||
asm: x86.AVMOVDQU32,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||
{0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "VMOVDQU32Masked512",
|
||||
argLen: 2,
|
||||
|
|
@ -22260,6 +22346,34 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "VMOVDQU64Masked128",
|
||||
argLen: 2,
|
||||
asm: x86.AVMOVDQU64,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||
{0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "VMOVDQU64Masked256",
|
||||
argLen: 2,
|
||||
asm: x86.AVMOVDQU64,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||
{0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "VMOVDQU64Masked512",
|
||||
argLen: 2,
|
||||
|
|
@ -22274,6 +22388,34 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "VMOVUPDMasked128",
|
||||
argLen: 2,
|
||||
asm: x86.AVMOVUPD,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||
{0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "VMOVUPDMasked256",
|
||||
argLen: 2,
|
||||
asm: x86.AVMOVUPD,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||
{0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "VMOVUPDMasked512",
|
||||
argLen: 2,
|
||||
|
|
@ -22288,6 +22430,34 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "VMOVUPSMasked128",
|
||||
argLen: 2,
|
||||
asm: x86.AVMOVUPS,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||
{0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "VMOVUPSMasked256",
|
||||
argLen: 2,
|
||||
asm: x86.AVMOVUPS,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||
{0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "VMOVUPSMasked512",
|
||||
argLen: 2,
|
||||
|
|
@ -82110,56 +82280,6 @@ var opcodeTable = [...]opInfo{
|
|||
argLen: 3,
|
||||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "moveMaskedFloat32x16",
|
||||
argLen: 2,
|
||||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "moveMaskedFloat64x8",
|
||||
argLen: 2,
|
||||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "moveMaskedInt8x64",
|
||||
argLen: 2,
|
||||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "moveMaskedInt16x32",
|
||||
argLen: 2,
|
||||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "moveMaskedInt32x16",
|
||||
argLen: 2,
|
||||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "moveMaskedInt64x8",
|
||||
argLen: 2,
|
||||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "moveMaskedUint8x64",
|
||||
argLen: 2,
|
||||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "moveMaskedUint16x32",
|
||||
argLen: 2,
|
||||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "moveMaskedUint32x16",
|
||||
argLen: 2,
|
||||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "moveMaskedUint64x8",
|
||||
argLen: 2,
|
||||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "AESRoundKeyGenAssistUint32x4",
|
||||
auxType: auxUInt8,
|
||||
|
|
|
|||
|
|
@ -6095,26 +6095,6 @@ func rewriteValueAMD64(v *Value) bool {
|
|||
case OpconcatSelectedConstantUint64x2:
|
||||
v.Op = OpAMD64VSHUFPD128
|
||||
return true
|
||||
case OpmoveMaskedFloat32x16:
|
||||
return rewriteValueAMD64_OpmoveMaskedFloat32x16(v)
|
||||
case OpmoveMaskedFloat64x8:
|
||||
return rewriteValueAMD64_OpmoveMaskedFloat64x8(v)
|
||||
case OpmoveMaskedInt16x32:
|
||||
return rewriteValueAMD64_OpmoveMaskedInt16x32(v)
|
||||
case OpmoveMaskedInt32x16:
|
||||
return rewriteValueAMD64_OpmoveMaskedInt32x16(v)
|
||||
case OpmoveMaskedInt64x8:
|
||||
return rewriteValueAMD64_OpmoveMaskedInt64x8(v)
|
||||
case OpmoveMaskedInt8x64:
|
||||
return rewriteValueAMD64_OpmoveMaskedInt8x64(v)
|
||||
case OpmoveMaskedUint16x32:
|
||||
return rewriteValueAMD64_OpmoveMaskedUint16x32(v)
|
||||
case OpmoveMaskedUint32x16:
|
||||
return rewriteValueAMD64_OpmoveMaskedUint32x16(v)
|
||||
case OpmoveMaskedUint64x8:
|
||||
return rewriteValueAMD64_OpmoveMaskedUint64x8(v)
|
||||
case OpmoveMaskedUint8x64:
|
||||
return rewriteValueAMD64_OpmoveMaskedUint8x64(v)
|
||||
case OpternInt32x16:
|
||||
v.Op = OpAMD64VPTERNLOGD512
|
||||
return true
|
||||
|
|
@ -60638,166 +60618,6 @@ func rewriteValueAMD64_OpblendMaskedInt8x64(v *Value) bool {
|
|||
return true
|
||||
}
|
||||
}
|
||||
func rewriteValueAMD64_OpmoveMaskedFloat32x16(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
// match: (moveMaskedFloat32x16 x mask)
|
||||
// result: (VMOVUPSMasked512 x (VPMOVVec32x16ToM <types.TypeMask> mask))
|
||||
for {
|
||||
x := v_0
|
||||
mask := v_1
|
||||
v.reset(OpAMD64VMOVUPSMasked512)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
|
||||
v0.AddArg(mask)
|
||||
v.AddArg2(x, v0)
|
||||
return true
|
||||
}
|
||||
}
|
||||
func rewriteValueAMD64_OpmoveMaskedFloat64x8(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
// match: (moveMaskedFloat64x8 x mask)
|
||||
// result: (VMOVUPDMasked512 x (VPMOVVec64x8ToM <types.TypeMask> mask))
|
||||
for {
|
||||
x := v_0
|
||||
mask := v_1
|
||||
v.reset(OpAMD64VMOVUPDMasked512)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
|
||||
v0.AddArg(mask)
|
||||
v.AddArg2(x, v0)
|
||||
return true
|
||||
}
|
||||
}
|
||||
func rewriteValueAMD64_OpmoveMaskedInt16x32(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
// match: (moveMaskedInt16x32 x mask)
|
||||
// result: (VMOVDQU16Masked512 x (VPMOVVec16x32ToM <types.TypeMask> mask))
|
||||
for {
|
||||
x := v_0
|
||||
mask := v_1
|
||||
v.reset(OpAMD64VMOVDQU16Masked512)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
|
||||
v0.AddArg(mask)
|
||||
v.AddArg2(x, v0)
|
||||
return true
|
||||
}
|
||||
}
|
||||
func rewriteValueAMD64_OpmoveMaskedInt32x16(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
// match: (moveMaskedInt32x16 x mask)
|
||||
// result: (VMOVDQU32Masked512 x (VPMOVVec32x16ToM <types.TypeMask> mask))
|
||||
for {
|
||||
x := v_0
|
||||
mask := v_1
|
||||
v.reset(OpAMD64VMOVDQU32Masked512)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
|
||||
v0.AddArg(mask)
|
||||
v.AddArg2(x, v0)
|
||||
return true
|
||||
}
|
||||
}
|
||||
func rewriteValueAMD64_OpmoveMaskedInt64x8(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
// match: (moveMaskedInt64x8 x mask)
|
||||
// result: (VMOVDQU64Masked512 x (VPMOVVec64x8ToM <types.TypeMask> mask))
|
||||
for {
|
||||
x := v_0
|
||||
mask := v_1
|
||||
v.reset(OpAMD64VMOVDQU64Masked512)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
|
||||
v0.AddArg(mask)
|
||||
v.AddArg2(x, v0)
|
||||
return true
|
||||
}
|
||||
}
|
||||
func rewriteValueAMD64_OpmoveMaskedInt8x64(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
// match: (moveMaskedInt8x64 x mask)
|
||||
// result: (VMOVDQU8Masked512 x (VPMOVVec8x64ToM <types.TypeMask> mask))
|
||||
for {
|
||||
x := v_0
|
||||
mask := v_1
|
||||
v.reset(OpAMD64VMOVDQU8Masked512)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask)
|
||||
v0.AddArg(mask)
|
||||
v.AddArg2(x, v0)
|
||||
return true
|
||||
}
|
||||
}
|
||||
func rewriteValueAMD64_OpmoveMaskedUint16x32(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
// match: (moveMaskedUint16x32 x mask)
|
||||
// result: (VMOVDQU16Masked512 x (VPMOVVec16x32ToM <types.TypeMask> mask))
|
||||
for {
|
||||
x := v_0
|
||||
mask := v_1
|
||||
v.reset(OpAMD64VMOVDQU16Masked512)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask)
|
||||
v0.AddArg(mask)
|
||||
v.AddArg2(x, v0)
|
||||
return true
|
||||
}
|
||||
}
|
||||
func rewriteValueAMD64_OpmoveMaskedUint32x16(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
// match: (moveMaskedUint32x16 x mask)
|
||||
// result: (VMOVDQU32Masked512 x (VPMOVVec32x16ToM <types.TypeMask> mask))
|
||||
for {
|
||||
x := v_0
|
||||
mask := v_1
|
||||
v.reset(OpAMD64VMOVDQU32Masked512)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask)
|
||||
v0.AddArg(mask)
|
||||
v.AddArg2(x, v0)
|
||||
return true
|
||||
}
|
||||
}
|
||||
func rewriteValueAMD64_OpmoveMaskedUint64x8(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
// match: (moveMaskedUint64x8 x mask)
|
||||
// result: (VMOVDQU64Masked512 x (VPMOVVec64x8ToM <types.TypeMask> mask))
|
||||
for {
|
||||
x := v_0
|
||||
mask := v_1
|
||||
v.reset(OpAMD64VMOVDQU64Masked512)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask)
|
||||
v0.AddArg(mask)
|
||||
v.AddArg2(x, v0)
|
||||
return true
|
||||
}
|
||||
}
|
||||
func rewriteValueAMD64_OpmoveMaskedUint8x64(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
// match: (moveMaskedUint8x64 x mask)
|
||||
// result: (VMOVDQU8Masked512 x (VPMOVVec8x64ToM <types.TypeMask> mask))
|
||||
for {
|
||||
x := v_0
|
||||
mask := v_1
|
||||
v.reset(OpAMD64VMOVDQU8Masked512)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask)
|
||||
v0.AddArg(mask)
|
||||
v.AddArg2(x, v0)
|
||||
return true
|
||||
}
|
||||
}
|
||||
func rewriteBlockAMD64(b *Block) bool {
|
||||
typ := &b.Func.Config.Types
|
||||
switch b.Kind {
|
||||
|
|
|
|||
|
|
@ -1300,16 +1300,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies .
|
|||
addF(simdPackage, "Uint32x16.concatSelectedConstantGrouped", opLen2Imm8(ssa.OpconcatSelectedConstantGroupedUint32x16, types.TypeVec512, 0), sys.AMD64)
|
||||
addF(simdPackage, "Uint64x4.concatSelectedConstantGrouped", opLen2Imm8(ssa.OpconcatSelectedConstantGroupedUint64x4, types.TypeVec256, 0), sys.AMD64)
|
||||
addF(simdPackage, "Uint64x8.concatSelectedConstantGrouped", opLen2Imm8(ssa.OpconcatSelectedConstantGroupedUint64x8, types.TypeVec512, 0), sys.AMD64)
|
||||
addF(simdPackage, "Float32x16.moveMasked", opLen2(ssa.OpmoveMaskedFloat32x16, types.TypeVec512), sys.AMD64)
|
||||
addF(simdPackage, "Float64x8.moveMasked", opLen2(ssa.OpmoveMaskedFloat64x8, types.TypeVec512), sys.AMD64)
|
||||
addF(simdPackage, "Int8x64.moveMasked", opLen2(ssa.OpmoveMaskedInt8x64, types.TypeVec512), sys.AMD64)
|
||||
addF(simdPackage, "Int16x32.moveMasked", opLen2(ssa.OpmoveMaskedInt16x32, types.TypeVec512), sys.AMD64)
|
||||
addF(simdPackage, "Int32x16.moveMasked", opLen2(ssa.OpmoveMaskedInt32x16, types.TypeVec512), sys.AMD64)
|
||||
addF(simdPackage, "Int64x8.moveMasked", opLen2(ssa.OpmoveMaskedInt64x8, types.TypeVec512), sys.AMD64)
|
||||
addF(simdPackage, "Uint8x64.moveMasked", opLen2(ssa.OpmoveMaskedUint8x64, types.TypeVec512), sys.AMD64)
|
||||
addF(simdPackage, "Uint16x32.moveMasked", opLen2(ssa.OpmoveMaskedUint16x32, types.TypeVec512), sys.AMD64)
|
||||
addF(simdPackage, "Uint32x16.moveMasked", opLen2(ssa.OpmoveMaskedUint32x16, types.TypeVec512), sys.AMD64)
|
||||
addF(simdPackage, "Uint64x8.moveMasked", opLen2(ssa.OpmoveMaskedUint64x8, types.TypeVec512), sys.AMD64)
|
||||
addF(simdPackage, "Int32x4.tern", opLen3Imm8(ssa.OpternInt32x4, types.TypeVec128, 0), sys.AMD64)
|
||||
addF(simdPackage, "Int32x8.tern", opLen3Imm8(ssa.OpternInt32x8, types.TypeVec256, 0), sys.AMD64)
|
||||
addF(simdPackage, "Int32x16.tern", opLen3Imm8(ssa.OpternInt32x16, types.TypeVec512, 0), sys.AMD64)
|
||||
|
|
|
|||
|
|
@ -613,7 +613,11 @@ func writeSIMDStubs(ops []Operation, typeMap simdTypeMap) (f, fI *bytes.Buffer)
|
|||
}
|
||||
}
|
||||
if i == 0 || op.Go != ops[i-1].Go {
|
||||
fmt.Fprintf(f, "\n/* %s */\n", op.Go)
|
||||
if unicode.IsUpper([]rune(op.Go)[0]) {
|
||||
fmt.Fprintf(f, "\n/* %s */\n", op.Go)
|
||||
} else {
|
||||
fmt.Fprintf(fI, "\n/* %s */\n", op.Go)
|
||||
}
|
||||
}
|
||||
if unicode.IsUpper([]rune(op.Go)[0]) {
|
||||
if err := t.ExecuteTemplate(f, s, op); err != nil {
|
||||
|
|
|
|||
|
|
@ -52,9 +52,8 @@
|
|||
// the first or the second based on whether the third is false or true
|
||||
- go: move
|
||||
commutative: false
|
||||
documentation: !string |-
|
||||
// NAME blends a vector with zero, with the original value where the mask is true
|
||||
// and zero where the mask is false.
|
||||
noTypes: "true"
|
||||
noGenericOps: "true"
|
||||
- go: Expand
|
||||
commutative: false
|
||||
documentation: !string |-
|
||||
|
|
|
|||
|
|
@ -291,7 +291,6 @@
|
|||
in:
|
||||
- &v
|
||||
go: $t
|
||||
bits: 512
|
||||
class: vreg
|
||||
base: int|uint
|
||||
inVariant:
|
||||
|
|
@ -307,7 +306,6 @@
|
|||
in:
|
||||
- &v
|
||||
go: $t
|
||||
bits: 512
|
||||
class: vreg
|
||||
base: float
|
||||
inVariant:
|
||||
|
|
|
|||
|
|
@ -7606,18 +7606,6 @@ func (x Uint64x4) Xor(y Uint64x4) Uint64x4
|
|||
// Asm: VPXORQ, CPU Feature: AVX512
|
||||
func (x Uint64x8) Xor(y Uint64x8) Uint64x8
|
||||
|
||||
/* blend */
|
||||
|
||||
/* blendMasked */
|
||||
|
||||
/* concatSelectedConstant */
|
||||
|
||||
/* concatSelectedConstantGrouped */
|
||||
|
||||
/* moveMasked */
|
||||
|
||||
/* tern */
|
||||
|
||||
// Float64x2 converts from Float32x4 to Float64x2
|
||||
func (from Float32x4) AsFloat64x2() (to Float64x2)
|
||||
|
||||
|
|
|
|||
|
|
@ -4,6 +4,8 @@
|
|||
|
||||
package simd
|
||||
|
||||
/* blend */
|
||||
|
||||
// blend blends two vectors based on mask values, choosing either
|
||||
// the first or the second based on whether the third is false or true
|
||||
//
|
||||
|
|
@ -16,6 +18,8 @@ func (x Int8x16) blend(y Int8x16, mask Int8x16) Int8x16
|
|||
// Asm: VPBLENDVB, CPU Feature: AVX2
|
||||
func (x Int8x32) blend(y Int8x32, mask Int8x32) Int8x32
|
||||
|
||||
/* blendMasked */
|
||||
|
||||
// blendMasked blends two vectors based on mask values, choosing either
|
||||
// the first or the second based on whether the third is false or true
|
||||
//
|
||||
|
|
@ -48,6 +52,8 @@ func (x Int32x16) blendMasked(y Int32x16, mask Mask32x16) Int32x16
|
|||
// Asm: VPBLENDMQ, CPU Feature: AVX512
|
||||
func (x Int64x8) blendMasked(y Int64x8, mask Mask64x8) Int64x8
|
||||
|
||||
/* concatSelectedConstant */
|
||||
|
||||
// concatSelectedConstant concatenates selected elements from x and y into the lower and upper
|
||||
// halves of the output. The selection is chosen by the constant parameter h1h0l1l0
|
||||
// where each {h,l}{1,0} is two bits specify which element from y or x to select.
|
||||
|
|
@ -117,6 +123,8 @@ func (x Uint32x4) concatSelectedConstant(h1h0l1l0 uint8, y Uint32x4) Uint32x4
|
|||
// Asm: VSHUFPD, CPU Feature: AVX
|
||||
func (x Uint64x2) concatSelectedConstant(hilo uint8, y Uint64x2) Uint64x2
|
||||
|
||||
/* concatSelectedConstantGrouped */
|
||||
|
||||
// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y
|
||||
// into the lower and upper halves of corresponding subvectors of the output.
|
||||
// The selection is chosen by the constant parameter h1h0l1l0
|
||||
|
|
@ -330,85 +338,7 @@ func (x Uint64x4) concatSelectedConstantGrouped(hilos uint8, y Uint64x4) Uint64x
|
|||
// Asm: VSHUFPD, CPU Feature: AVX512
|
||||
func (x Uint64x8) concatSelectedConstantGrouped(hilos uint8, y Uint64x8) Uint64x8
|
||||
|
||||
// moveMasked blends a vector with zero, with the original value where the mask is true
|
||||
// and zero where the mask is false.
|
||||
//
|
||||
// This operation is applied selectively under a write mask.
|
||||
//
|
||||
// Asm: VMOVUPS, CPU Feature: AVX512
|
||||
func (x Float32x16) moveMasked(mask Mask32x16) Float32x16
|
||||
|
||||
// moveMasked blends a vector with zero, with the original value where the mask is true
|
||||
// and zero where the mask is false.
|
||||
//
|
||||
// This operation is applied selectively under a write mask.
|
||||
//
|
||||
// Asm: VMOVUPD, CPU Feature: AVX512
|
||||
func (x Float64x8) moveMasked(mask Mask64x8) Float64x8
|
||||
|
||||
// moveMasked blends a vector with zero, with the original value where the mask is true
|
||||
// and zero where the mask is false.
|
||||
//
|
||||
// This operation is applied selectively under a write mask.
|
||||
//
|
||||
// Asm: VMOVDQU8, CPU Feature: AVX512
|
||||
func (x Int8x64) moveMasked(mask Mask8x64) Int8x64
|
||||
|
||||
// moveMasked blends a vector with zero, with the original value where the mask is true
|
||||
// and zero where the mask is false.
|
||||
//
|
||||
// This operation is applied selectively under a write mask.
|
||||
//
|
||||
// Asm: VMOVDQU16, CPU Feature: AVX512
|
||||
func (x Int16x32) moveMasked(mask Mask16x32) Int16x32
|
||||
|
||||
// moveMasked blends a vector with zero, with the original value where the mask is true
|
||||
// and zero where the mask is false.
|
||||
//
|
||||
// This operation is applied selectively under a write mask.
|
||||
//
|
||||
// Asm: VMOVDQU32, CPU Feature: AVX512
|
||||
func (x Int32x16) moveMasked(mask Mask32x16) Int32x16
|
||||
|
||||
// moveMasked blends a vector with zero, with the original value where the mask is true
|
||||
// and zero where the mask is false.
|
||||
//
|
||||
// This operation is applied selectively under a write mask.
|
||||
//
|
||||
// Asm: VMOVDQU64, CPU Feature: AVX512
|
||||
func (x Int64x8) moveMasked(mask Mask64x8) Int64x8
|
||||
|
||||
// moveMasked blends a vector with zero, with the original value where the mask is true
|
||||
// and zero where the mask is false.
|
||||
//
|
||||
// This operation is applied selectively under a write mask.
|
||||
//
|
||||
// Asm: VMOVDQU8, CPU Feature: AVX512
|
||||
func (x Uint8x64) moveMasked(mask Mask8x64) Uint8x64
|
||||
|
||||
// moveMasked blends a vector with zero, with the original value where the mask is true
|
||||
// and zero where the mask is false.
|
||||
//
|
||||
// This operation is applied selectively under a write mask.
|
||||
//
|
||||
// Asm: VMOVDQU16, CPU Feature: AVX512
|
||||
func (x Uint16x32) moveMasked(mask Mask16x32) Uint16x32
|
||||
|
||||
// moveMasked blends a vector with zero, with the original value where the mask is true
|
||||
// and zero where the mask is false.
|
||||
//
|
||||
// This operation is applied selectively under a write mask.
|
||||
//
|
||||
// Asm: VMOVDQU32, CPU Feature: AVX512
|
||||
func (x Uint32x16) moveMasked(mask Mask32x16) Uint32x16
|
||||
|
||||
// moveMasked blends a vector with zero, with the original value where the mask is true
|
||||
// and zero where the mask is false.
|
||||
//
|
||||
// This operation is applied selectively under a write mask.
|
||||
//
|
||||
// Asm: VMOVDQU64, CPU Feature: AVX512
|
||||
func (x Uint64x8) moveMasked(mask Mask64x8) Uint64x8
|
||||
/* tern */
|
||||
|
||||
// tern performs a logical operation on three vectors based on the 8-bit truth table.
|
||||
// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z))
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue