mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
[dev.simd] simd, cmd/compile: add float -> float conversions
This should mark the end of the conversion table, except for float16 which does not exist on Go yet. The rounding logic documentation of float64 -> float32 is based on abi-internal default MXCSR: | RC | 14/13 | 0 (RN) | Round to nearest | Change-Id: I27a86560e8d74d20f21350bf78314b4eada20ec0 Reviewed-on: https://go-review.googlesource.com/c/go/+/724440 LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: David Chase <drchase@google.com>
This commit is contained in:
parent
d6564ed088
commit
f3a306527c
11 changed files with 881 additions and 3 deletions
|
|
@ -42,6 +42,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool {
|
||||||
ssa.OpAMD64VPBROADCASTW512,
|
ssa.OpAMD64VPBROADCASTW512,
|
||||||
ssa.OpAMD64VPBROADCASTD512,
|
ssa.OpAMD64VPBROADCASTD512,
|
||||||
ssa.OpAMD64VPBROADCASTQ512,
|
ssa.OpAMD64VPBROADCASTQ512,
|
||||||
|
ssa.OpAMD64VCVTPD2PSX128,
|
||||||
|
ssa.OpAMD64VCVTPD2PSY128,
|
||||||
|
ssa.OpAMD64VCVTPD2PS256,
|
||||||
ssa.OpAMD64VCVTDQ2PS128,
|
ssa.OpAMD64VCVTDQ2PS128,
|
||||||
ssa.OpAMD64VCVTDQ2PS256,
|
ssa.OpAMD64VCVTDQ2PS256,
|
||||||
ssa.OpAMD64VCVTDQ2PS512,
|
ssa.OpAMD64VCVTDQ2PS512,
|
||||||
|
|
@ -54,6 +57,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool {
|
||||||
ssa.OpAMD64VCVTUQQ2PSX128,
|
ssa.OpAMD64VCVTUQQ2PSX128,
|
||||||
ssa.OpAMD64VCVTUQQ2PSY128,
|
ssa.OpAMD64VCVTUQQ2PSY128,
|
||||||
ssa.OpAMD64VCVTUQQ2PS256,
|
ssa.OpAMD64VCVTUQQ2PS256,
|
||||||
|
ssa.OpAMD64VCVTPS2PD256,
|
||||||
|
ssa.OpAMD64VCVTPS2PD512,
|
||||||
ssa.OpAMD64VCVTDQ2PD256,
|
ssa.OpAMD64VCVTDQ2PD256,
|
||||||
ssa.OpAMD64VCVTDQ2PD512,
|
ssa.OpAMD64VCVTDQ2PD512,
|
||||||
ssa.OpAMD64VCVTQQ2PD128,
|
ssa.OpAMD64VCVTQQ2PD128,
|
||||||
|
|
@ -854,6 +859,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool {
|
||||||
ssa.OpAMD64VPCOMPRESSQMasked128,
|
ssa.OpAMD64VPCOMPRESSQMasked128,
|
||||||
ssa.OpAMD64VPCOMPRESSQMasked256,
|
ssa.OpAMD64VPCOMPRESSQMasked256,
|
||||||
ssa.OpAMD64VPCOMPRESSQMasked512,
|
ssa.OpAMD64VPCOMPRESSQMasked512,
|
||||||
|
ssa.OpAMD64VCVTPD2PSXMasked128,
|
||||||
|
ssa.OpAMD64VCVTPD2PSYMasked128,
|
||||||
|
ssa.OpAMD64VCVTPD2PSMasked256,
|
||||||
ssa.OpAMD64VCVTDQ2PSMasked128,
|
ssa.OpAMD64VCVTDQ2PSMasked128,
|
||||||
ssa.OpAMD64VCVTDQ2PSMasked256,
|
ssa.OpAMD64VCVTDQ2PSMasked256,
|
||||||
ssa.OpAMD64VCVTDQ2PSMasked512,
|
ssa.OpAMD64VCVTDQ2PSMasked512,
|
||||||
|
|
@ -866,6 +874,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool {
|
||||||
ssa.OpAMD64VCVTUQQ2PSXMasked128,
|
ssa.OpAMD64VCVTUQQ2PSXMasked128,
|
||||||
ssa.OpAMD64VCVTUQQ2PSYMasked128,
|
ssa.OpAMD64VCVTUQQ2PSYMasked128,
|
||||||
ssa.OpAMD64VCVTUQQ2PSMasked256,
|
ssa.OpAMD64VCVTUQQ2PSMasked256,
|
||||||
|
ssa.OpAMD64VCVTPS2PDMasked256,
|
||||||
|
ssa.OpAMD64VCVTPS2PDMasked512,
|
||||||
ssa.OpAMD64VCVTDQ2PDMasked256,
|
ssa.OpAMD64VCVTDQ2PDMasked256,
|
||||||
ssa.OpAMD64VCVTDQ2PDMasked512,
|
ssa.OpAMD64VCVTDQ2PDMasked512,
|
||||||
ssa.OpAMD64VCVTQQ2PDMasked128,
|
ssa.OpAMD64VCVTQQ2PDMasked128,
|
||||||
|
|
@ -1764,6 +1774,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool {
|
||||||
ssa.OpAMD64VPABSQMasked128load,
|
ssa.OpAMD64VPABSQMasked128load,
|
||||||
ssa.OpAMD64VPABSQMasked256load,
|
ssa.OpAMD64VPABSQMasked256load,
|
||||||
ssa.OpAMD64VPABSQMasked512load,
|
ssa.OpAMD64VPABSQMasked512load,
|
||||||
|
ssa.OpAMD64VCVTPD2PSXMasked128load,
|
||||||
|
ssa.OpAMD64VCVTPD2PSYMasked128load,
|
||||||
|
ssa.OpAMD64VCVTPD2PSMasked256load,
|
||||||
ssa.OpAMD64VCVTDQ2PSMasked128load,
|
ssa.OpAMD64VCVTDQ2PSMasked128load,
|
||||||
ssa.OpAMD64VCVTDQ2PSMasked256load,
|
ssa.OpAMD64VCVTDQ2PSMasked256load,
|
||||||
ssa.OpAMD64VCVTDQ2PSMasked512load,
|
ssa.OpAMD64VCVTDQ2PSMasked512load,
|
||||||
|
|
@ -1776,6 +1789,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool {
|
||||||
ssa.OpAMD64VCVTUQQ2PSXMasked128load,
|
ssa.OpAMD64VCVTUQQ2PSXMasked128load,
|
||||||
ssa.OpAMD64VCVTUQQ2PSYMasked128load,
|
ssa.OpAMD64VCVTUQQ2PSYMasked128load,
|
||||||
ssa.OpAMD64VCVTUQQ2PSMasked256load,
|
ssa.OpAMD64VCVTUQQ2PSMasked256load,
|
||||||
|
ssa.OpAMD64VCVTPS2PDMasked256load,
|
||||||
|
ssa.OpAMD64VCVTPS2PDMasked512load,
|
||||||
ssa.OpAMD64VCVTDQ2PDMasked256load,
|
ssa.OpAMD64VCVTDQ2PDMasked256load,
|
||||||
ssa.OpAMD64VCVTDQ2PDMasked512load,
|
ssa.OpAMD64VCVTDQ2PDMasked512load,
|
||||||
ssa.OpAMD64VCVTQQ2PDMasked128load,
|
ssa.OpAMD64VCVTQQ2PDMasked128load,
|
||||||
|
|
@ -2190,6 +2205,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool {
|
||||||
ssa.OpAMD64VPABSQ128load,
|
ssa.OpAMD64VPABSQ128load,
|
||||||
ssa.OpAMD64VPABSQ256load,
|
ssa.OpAMD64VPABSQ256load,
|
||||||
ssa.OpAMD64VPABSQ512load,
|
ssa.OpAMD64VPABSQ512load,
|
||||||
|
ssa.OpAMD64VCVTPD2PS256load,
|
||||||
ssa.OpAMD64VCVTDQ2PS512load,
|
ssa.OpAMD64VCVTDQ2PS512load,
|
||||||
ssa.OpAMD64VCVTQQ2PSX128load,
|
ssa.OpAMD64VCVTQQ2PSX128load,
|
||||||
ssa.OpAMD64VCVTQQ2PSY128load,
|
ssa.OpAMD64VCVTQQ2PSY128load,
|
||||||
|
|
@ -2200,6 +2216,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool {
|
||||||
ssa.OpAMD64VCVTUQQ2PSX128load,
|
ssa.OpAMD64VCVTUQQ2PSX128load,
|
||||||
ssa.OpAMD64VCVTUQQ2PSY128load,
|
ssa.OpAMD64VCVTUQQ2PSY128load,
|
||||||
ssa.OpAMD64VCVTUQQ2PS256load,
|
ssa.OpAMD64VCVTUQQ2PS256load,
|
||||||
|
ssa.OpAMD64VCVTPS2PD512load,
|
||||||
ssa.OpAMD64VCVTDQ2PD512load,
|
ssa.OpAMD64VCVTDQ2PD512load,
|
||||||
ssa.OpAMD64VCVTQQ2PD128load,
|
ssa.OpAMD64VCVTQQ2PD128load,
|
||||||
ssa.OpAMD64VCVTQQ2PD256load,
|
ssa.OpAMD64VCVTQQ2PD256load,
|
||||||
|
|
@ -2473,6 +2490,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool {
|
||||||
ssa.OpAMD64VREDUCEPDMasked128Merging,
|
ssa.OpAMD64VREDUCEPDMasked128Merging,
|
||||||
ssa.OpAMD64VREDUCEPDMasked256Merging,
|
ssa.OpAMD64VREDUCEPDMasked256Merging,
|
||||||
ssa.OpAMD64VREDUCEPDMasked512Merging,
|
ssa.OpAMD64VREDUCEPDMasked512Merging,
|
||||||
|
ssa.OpAMD64VCVTPD2PSXMasked128Merging,
|
||||||
|
ssa.OpAMD64VCVTPD2PSYMasked128Merging,
|
||||||
|
ssa.OpAMD64VCVTPD2PSMasked256Merging,
|
||||||
ssa.OpAMD64VCVTDQ2PSMasked128Merging,
|
ssa.OpAMD64VCVTDQ2PSMasked128Merging,
|
||||||
ssa.OpAMD64VCVTDQ2PSMasked256Merging,
|
ssa.OpAMD64VCVTDQ2PSMasked256Merging,
|
||||||
ssa.OpAMD64VCVTDQ2PSMasked512Merging,
|
ssa.OpAMD64VCVTDQ2PSMasked512Merging,
|
||||||
|
|
@ -2485,6 +2505,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool {
|
||||||
ssa.OpAMD64VCVTUQQ2PSXMasked128Merging,
|
ssa.OpAMD64VCVTUQQ2PSXMasked128Merging,
|
||||||
ssa.OpAMD64VCVTUQQ2PSYMasked128Merging,
|
ssa.OpAMD64VCVTUQQ2PSYMasked128Merging,
|
||||||
ssa.OpAMD64VCVTUQQ2PSMasked256Merging,
|
ssa.OpAMD64VCVTUQQ2PSMasked256Merging,
|
||||||
|
ssa.OpAMD64VCVTPS2PDMasked256Merging,
|
||||||
|
ssa.OpAMD64VCVTPS2PDMasked512Merging,
|
||||||
ssa.OpAMD64VCVTDQ2PDMasked256Merging,
|
ssa.OpAMD64VCVTDQ2PDMasked256Merging,
|
||||||
ssa.OpAMD64VCVTDQ2PDMasked512Merging,
|
ssa.OpAMD64VCVTDQ2PDMasked512Merging,
|
||||||
ssa.OpAMD64VCVTQQ2PDMasked128Merging,
|
ssa.OpAMD64VCVTQQ2PDMasked128Merging,
|
||||||
|
|
@ -2880,6 +2902,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool {
|
||||||
ssa.OpAMD64VPALIGNRMasked256,
|
ssa.OpAMD64VPALIGNRMasked256,
|
||||||
ssa.OpAMD64VPALIGNRMasked512,
|
ssa.OpAMD64VPALIGNRMasked512,
|
||||||
ssa.OpAMD64VPALIGNRMasked128,
|
ssa.OpAMD64VPALIGNRMasked128,
|
||||||
|
ssa.OpAMD64VCVTPD2PSXMasked128,
|
||||||
|
ssa.OpAMD64VCVTPD2PSXMasked128load,
|
||||||
|
ssa.OpAMD64VCVTPD2PSYMasked128,
|
||||||
|
ssa.OpAMD64VCVTPD2PSYMasked128load,
|
||||||
|
ssa.OpAMD64VCVTPD2PSMasked256,
|
||||||
|
ssa.OpAMD64VCVTPD2PSMasked256load,
|
||||||
ssa.OpAMD64VCVTDQ2PSMasked128,
|
ssa.OpAMD64VCVTDQ2PSMasked128,
|
||||||
ssa.OpAMD64VCVTDQ2PSMasked128load,
|
ssa.OpAMD64VCVTDQ2PSMasked128load,
|
||||||
ssa.OpAMD64VCVTDQ2PSMasked256,
|
ssa.OpAMD64VCVTDQ2PSMasked256,
|
||||||
|
|
@ -2904,6 +2932,10 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool {
|
||||||
ssa.OpAMD64VCVTUQQ2PSYMasked128load,
|
ssa.OpAMD64VCVTUQQ2PSYMasked128load,
|
||||||
ssa.OpAMD64VCVTUQQ2PSMasked256,
|
ssa.OpAMD64VCVTUQQ2PSMasked256,
|
||||||
ssa.OpAMD64VCVTUQQ2PSMasked256load,
|
ssa.OpAMD64VCVTUQQ2PSMasked256load,
|
||||||
|
ssa.OpAMD64VCVTPS2PDMasked256,
|
||||||
|
ssa.OpAMD64VCVTPS2PDMasked256load,
|
||||||
|
ssa.OpAMD64VCVTPS2PDMasked512,
|
||||||
|
ssa.OpAMD64VCVTPS2PDMasked512load,
|
||||||
ssa.OpAMD64VCVTDQ2PDMasked256,
|
ssa.OpAMD64VCVTDQ2PDMasked256,
|
||||||
ssa.OpAMD64VCVTDQ2PDMasked256load,
|
ssa.OpAMD64VCVTDQ2PDMasked256load,
|
||||||
ssa.OpAMD64VCVTDQ2PDMasked512,
|
ssa.OpAMD64VCVTDQ2PDMasked512,
|
||||||
|
|
|
||||||
|
|
@ -249,6 +249,9 @@
|
||||||
(ConcatShiftBytesRightUint8x16 ...) => (VPALIGNR128 ...)
|
(ConcatShiftBytesRightUint8x16 ...) => (VPALIGNR128 ...)
|
||||||
(ConcatShiftBytesRightGroupedUint8x32 ...) => (VPALIGNR256 ...)
|
(ConcatShiftBytesRightGroupedUint8x32 ...) => (VPALIGNR256 ...)
|
||||||
(ConcatShiftBytesRightGroupedUint8x64 ...) => (VPALIGNR512 ...)
|
(ConcatShiftBytesRightGroupedUint8x64 ...) => (VPALIGNR512 ...)
|
||||||
|
(ConvertToFloat32Float64x2 ...) => (VCVTPD2PSX128 ...)
|
||||||
|
(ConvertToFloat32Float64x4 ...) => (VCVTPD2PSY128 ...)
|
||||||
|
(ConvertToFloat32Float64x8 ...) => (VCVTPD2PS256 ...)
|
||||||
(ConvertToFloat32Int32x4 ...) => (VCVTDQ2PS128 ...)
|
(ConvertToFloat32Int32x4 ...) => (VCVTDQ2PS128 ...)
|
||||||
(ConvertToFloat32Int32x8 ...) => (VCVTDQ2PS256 ...)
|
(ConvertToFloat32Int32x8 ...) => (VCVTDQ2PS256 ...)
|
||||||
(ConvertToFloat32Int32x16 ...) => (VCVTDQ2PS512 ...)
|
(ConvertToFloat32Int32x16 ...) => (VCVTDQ2PS512 ...)
|
||||||
|
|
@ -261,6 +264,8 @@
|
||||||
(ConvertToFloat32Uint64x2 ...) => (VCVTUQQ2PSX128 ...)
|
(ConvertToFloat32Uint64x2 ...) => (VCVTUQQ2PSX128 ...)
|
||||||
(ConvertToFloat32Uint64x4 ...) => (VCVTUQQ2PSY128 ...)
|
(ConvertToFloat32Uint64x4 ...) => (VCVTUQQ2PSY128 ...)
|
||||||
(ConvertToFloat32Uint64x8 ...) => (VCVTUQQ2PS256 ...)
|
(ConvertToFloat32Uint64x8 ...) => (VCVTUQQ2PS256 ...)
|
||||||
|
(ConvertToFloat64Float32x4 ...) => (VCVTPS2PD256 ...)
|
||||||
|
(ConvertToFloat64Float32x8 ...) => (VCVTPS2PD512 ...)
|
||||||
(ConvertToFloat64Int32x4 ...) => (VCVTDQ2PD256 ...)
|
(ConvertToFloat64Int32x4 ...) => (VCVTDQ2PD256 ...)
|
||||||
(ConvertToFloat64Int32x8 ...) => (VCVTDQ2PD512 ...)
|
(ConvertToFloat64Int32x8 ...) => (VCVTDQ2PD512 ...)
|
||||||
(ConvertToFloat64Int64x2 ...) => (VCVTQQ2PD128 ...)
|
(ConvertToFloat64Int64x2 ...) => (VCVTQQ2PD128 ...)
|
||||||
|
|
@ -1478,6 +1483,9 @@
|
||||||
(VMOVDQU8Masked256 (VPALIGNR256 [a] x y) mask) => (VPALIGNRMasked256 [a] x y mask)
|
(VMOVDQU8Masked256 (VPALIGNR256 [a] x y) mask) => (VPALIGNRMasked256 [a] x y mask)
|
||||||
(VMOVDQU8Masked512 (VPALIGNR512 [a] x y) mask) => (VPALIGNRMasked512 [a] x y mask)
|
(VMOVDQU8Masked512 (VPALIGNR512 [a] x y) mask) => (VPALIGNRMasked512 [a] x y mask)
|
||||||
(VMOVDQU8Masked128 (VPALIGNR128 [a] x y) mask) => (VPALIGNRMasked128 [a] x y mask)
|
(VMOVDQU8Masked128 (VPALIGNR128 [a] x y) mask) => (VPALIGNRMasked128 [a] x y mask)
|
||||||
|
(VMOVDQU64Masked128 (VCVTPD2PSX128 x) mask) => (VCVTPD2PSXMasked128 x mask)
|
||||||
|
(VMOVDQU64Masked128 (VCVTPD2PSY128 x) mask) => (VCVTPD2PSYMasked128 x mask)
|
||||||
|
(VMOVDQU64Masked256 (VCVTPD2PS256 x) mask) => (VCVTPD2PSMasked256 x mask)
|
||||||
(VMOVDQU32Masked128 (VCVTDQ2PS128 x) mask) => (VCVTDQ2PSMasked128 x mask)
|
(VMOVDQU32Masked128 (VCVTDQ2PS128 x) mask) => (VCVTDQ2PSMasked128 x mask)
|
||||||
(VMOVDQU32Masked256 (VCVTDQ2PS256 x) mask) => (VCVTDQ2PSMasked256 x mask)
|
(VMOVDQU32Masked256 (VCVTDQ2PS256 x) mask) => (VCVTDQ2PSMasked256 x mask)
|
||||||
(VMOVDQU32Masked512 (VCVTDQ2PS512 x) mask) => (VCVTDQ2PSMasked512 x mask)
|
(VMOVDQU32Masked512 (VCVTDQ2PS512 x) mask) => (VCVTDQ2PSMasked512 x mask)
|
||||||
|
|
@ -1490,6 +1498,8 @@
|
||||||
(VMOVDQU64Masked128 (VCVTUQQ2PSX128 x) mask) => (VCVTUQQ2PSXMasked128 x mask)
|
(VMOVDQU64Masked128 (VCVTUQQ2PSX128 x) mask) => (VCVTUQQ2PSXMasked128 x mask)
|
||||||
(VMOVDQU64Masked128 (VCVTUQQ2PSY128 x) mask) => (VCVTUQQ2PSYMasked128 x mask)
|
(VMOVDQU64Masked128 (VCVTUQQ2PSY128 x) mask) => (VCVTUQQ2PSYMasked128 x mask)
|
||||||
(VMOVDQU64Masked256 (VCVTUQQ2PS256 x) mask) => (VCVTUQQ2PSMasked256 x mask)
|
(VMOVDQU64Masked256 (VCVTUQQ2PS256 x) mask) => (VCVTUQQ2PSMasked256 x mask)
|
||||||
|
(VMOVDQU32Masked256 (VCVTPS2PD256 x) mask) => (VCVTPS2PDMasked256 x mask)
|
||||||
|
(VMOVDQU32Masked512 (VCVTPS2PD512 x) mask) => (VCVTPS2PDMasked512 x mask)
|
||||||
(VMOVDQU32Masked256 (VCVTDQ2PD256 x) mask) => (VCVTDQ2PDMasked256 x mask)
|
(VMOVDQU32Masked256 (VCVTDQ2PD256 x) mask) => (VCVTDQ2PDMasked256 x mask)
|
||||||
(VMOVDQU32Masked512 (VCVTDQ2PD512 x) mask) => (VCVTDQ2PDMasked512 x mask)
|
(VMOVDQU32Masked512 (VCVTDQ2PD512 x) mask) => (VCVTDQ2PDMasked512 x mask)
|
||||||
(VMOVDQU64Masked128 (VCVTQQ2PD128 x) mask) => (VCVTQQ2PDMasked128 x mask)
|
(VMOVDQU64Masked128 (VCVTQQ2PD128 x) mask) => (VCVTQQ2PDMasked128 x mask)
|
||||||
|
|
@ -2031,6 +2041,7 @@
|
||||||
(VPBLENDMDMasked512 dst (VSQRTPS512 x) mask) => (VSQRTPSMasked512Merging dst x mask)
|
(VPBLENDMDMasked512 dst (VSQRTPS512 x) mask) => (VSQRTPSMasked512Merging dst x mask)
|
||||||
(VPBLENDMDMasked512 dst (VSUBPS512 x y) mask) => (VSUBPSMasked512Merging dst x y mask)
|
(VPBLENDMDMasked512 dst (VSUBPS512 x y) mask) => (VSUBPSMasked512Merging dst x y mask)
|
||||||
(VPBLENDMQMasked512 dst (VADDPD512 x y) mask) => (VADDPDMasked512Merging dst x y mask)
|
(VPBLENDMQMasked512 dst (VADDPD512 x y) mask) => (VADDPDMasked512Merging dst x y mask)
|
||||||
|
(VPBLENDMQMasked512 dst (VCVTPD2PS256 x) mask) => (VCVTPD2PSMasked256Merging dst x mask)
|
||||||
(VPBLENDMQMasked512 dst (VCVTQQ2PD512 x) mask) => (VCVTQQ2PDMasked512Merging dst x mask)
|
(VPBLENDMQMasked512 dst (VCVTQQ2PD512 x) mask) => (VCVTQQ2PDMasked512Merging dst x mask)
|
||||||
(VPBLENDMQMasked512 dst (VCVTQQ2PS256 x) mask) => (VCVTQQ2PSMasked256Merging dst x mask)
|
(VPBLENDMQMasked512 dst (VCVTQQ2PS256 x) mask) => (VCVTQQ2PSMasked256Merging dst x mask)
|
||||||
(VPBLENDMQMasked512 dst (VCVTTPD2DQ256 x) mask) => (VCVTTPD2DQMasked256Merging dst x mask)
|
(VPBLENDMQMasked512 dst (VCVTTPD2DQ256 x) mask) => (VCVTTPD2DQMasked256Merging dst x mask)
|
||||||
|
|
@ -2121,6 +2132,8 @@
|
||||||
(VPBLENDVB128 dst (VBROADCASTSS512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VBROADCASTSSMasked512Merging dst x (VPMOVVec32x4ToM <types.TypeMask> mask))
|
(VPBLENDVB128 dst (VBROADCASTSS512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VBROADCASTSSMasked512Merging dst x (VPMOVVec32x4ToM <types.TypeMask> mask))
|
||||||
(VPBLENDVB128 dst (VCVTDQ2PD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTDQ2PDMasked256Merging dst x (VPMOVVec32x4ToM <types.TypeMask> mask))
|
(VPBLENDVB128 dst (VCVTDQ2PD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTDQ2PDMasked256Merging dst x (VPMOVVec32x4ToM <types.TypeMask> mask))
|
||||||
(VPBLENDVB128 dst (VCVTDQ2PS128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTDQ2PSMasked128Merging dst x (VPMOVVec32x4ToM <types.TypeMask> mask))
|
(VPBLENDVB128 dst (VCVTDQ2PS128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTDQ2PSMasked128Merging dst x (VPMOVVec32x4ToM <types.TypeMask> mask))
|
||||||
|
(VPBLENDVB128 dst (VCVTPD2PSX128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTPD2PSXMasked128Merging dst x (VPMOVVec64x2ToM <types.TypeMask> mask))
|
||||||
|
(VPBLENDVB128 dst (VCVTPS2PD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTPS2PDMasked256Merging dst x (VPMOVVec32x4ToM <types.TypeMask> mask))
|
||||||
(VPBLENDVB128 dst (VCVTQQ2PD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTQQ2PDMasked128Merging dst x (VPMOVVec64x2ToM <types.TypeMask> mask))
|
(VPBLENDVB128 dst (VCVTQQ2PD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTQQ2PDMasked128Merging dst x (VPMOVVec64x2ToM <types.TypeMask> mask))
|
||||||
(VPBLENDVB128 dst (VCVTQQ2PSX128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTQQ2PSXMasked128Merging dst x (VPMOVVec64x2ToM <types.TypeMask> mask))
|
(VPBLENDVB128 dst (VCVTQQ2PSX128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTQQ2PSXMasked128Merging dst x (VPMOVVec64x2ToM <types.TypeMask> mask))
|
||||||
(VPBLENDVB128 dst (VCVTTPD2DQX128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTTPD2DQXMasked128Merging dst x (VPMOVVec64x2ToM <types.TypeMask> mask))
|
(VPBLENDVB128 dst (VCVTTPD2DQX128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTTPD2DQXMasked128Merging dst x (VPMOVVec64x2ToM <types.TypeMask> mask))
|
||||||
|
|
@ -2304,6 +2317,8 @@
|
||||||
(VPBLENDVB256 dst (VADDPS256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VADDPSMasked256Merging dst x y (VPMOVVec32x8ToM <types.TypeMask> mask))
|
(VPBLENDVB256 dst (VADDPS256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VADDPSMasked256Merging dst x y (VPMOVVec32x8ToM <types.TypeMask> mask))
|
||||||
(VPBLENDVB256 dst (VCVTDQ2PD512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTDQ2PDMasked512Merging dst x (VPMOVVec32x8ToM <types.TypeMask> mask))
|
(VPBLENDVB256 dst (VCVTDQ2PD512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTDQ2PDMasked512Merging dst x (VPMOVVec32x8ToM <types.TypeMask> mask))
|
||||||
(VPBLENDVB256 dst (VCVTDQ2PS256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTDQ2PSMasked256Merging dst x (VPMOVVec32x8ToM <types.TypeMask> mask))
|
(VPBLENDVB256 dst (VCVTDQ2PS256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTDQ2PSMasked256Merging dst x (VPMOVVec32x8ToM <types.TypeMask> mask))
|
||||||
|
(VPBLENDVB256 dst (VCVTPD2PSY128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTPD2PSYMasked128Merging dst x (VPMOVVec64x4ToM <types.TypeMask> mask))
|
||||||
|
(VPBLENDVB256 dst (VCVTPS2PD512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTPS2PDMasked512Merging dst x (VPMOVVec32x8ToM <types.TypeMask> mask))
|
||||||
(VPBLENDVB256 dst (VCVTQQ2PD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTQQ2PDMasked256Merging dst x (VPMOVVec64x4ToM <types.TypeMask> mask))
|
(VPBLENDVB256 dst (VCVTQQ2PD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTQQ2PDMasked256Merging dst x (VPMOVVec64x4ToM <types.TypeMask> mask))
|
||||||
(VPBLENDVB256 dst (VCVTQQ2PSY128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTQQ2PSYMasked128Merging dst x (VPMOVVec64x4ToM <types.TypeMask> mask))
|
(VPBLENDVB256 dst (VCVTQQ2PSY128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTQQ2PSYMasked128Merging dst x (VPMOVVec64x4ToM <types.TypeMask> mask))
|
||||||
(VPBLENDVB256 dst (VCVTTPD2DQY128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTTPD2DQYMasked128Merging dst x (VPMOVVec64x4ToM <types.TypeMask> mask))
|
(VPBLENDVB256 dst (VCVTTPD2DQY128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTTPD2DQYMasked128Merging dst x (VPMOVVec64x4ToM <types.TypeMask> mask))
|
||||||
|
|
@ -2541,6 +2556,7 @@
|
||||||
(VPERMI2QMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2QMasked256load {sym} [off] x y ptr mask mem)
|
(VPERMI2QMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2QMasked256load {sym} [off] x y ptr mask mem)
|
||||||
(VPERMI2PDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PDMasked512load {sym} [off] x y ptr mask mem)
|
(VPERMI2PDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PDMasked512load {sym} [off] x y ptr mask mem)
|
||||||
(VPERMI2QMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2QMasked512load {sym} [off] x y ptr mask mem)
|
(VPERMI2QMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2QMasked512load {sym} [off] x y ptr mask mem)
|
||||||
|
(VCVTPD2PS256 l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTPD2PS256load {sym} [off] ptr mem)
|
||||||
(VCVTDQ2PS512 l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTDQ2PS512load {sym} [off] ptr mem)
|
(VCVTDQ2PS512 l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTDQ2PS512load {sym} [off] ptr mem)
|
||||||
(VCVTQQ2PSX128 l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTQQ2PSX128load {sym} [off] ptr mem)
|
(VCVTQQ2PSX128 l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTQQ2PSX128load {sym} [off] ptr mem)
|
||||||
(VCVTQQ2PSY128 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTQQ2PSY128load {sym} [off] ptr mem)
|
(VCVTQQ2PSY128 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTQQ2PSY128load {sym} [off] ptr mem)
|
||||||
|
|
@ -2551,6 +2567,9 @@
|
||||||
(VCVTUQQ2PSX128 l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTUQQ2PSX128load {sym} [off] ptr mem)
|
(VCVTUQQ2PSX128 l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTUQQ2PSX128load {sym} [off] ptr mem)
|
||||||
(VCVTUQQ2PSY128 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTUQQ2PSY128load {sym} [off] ptr mem)
|
(VCVTUQQ2PSY128 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTUQQ2PSY128load {sym} [off] ptr mem)
|
||||||
(VCVTUQQ2PS256 l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTUQQ2PS256load {sym} [off] ptr mem)
|
(VCVTUQQ2PS256 l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTUQQ2PS256load {sym} [off] ptr mem)
|
||||||
|
(VCVTPD2PSXMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTPD2PSXMasked128load {sym} [off] ptr mask mem)
|
||||||
|
(VCVTPD2PSYMasked128 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTPD2PSYMasked128load {sym} [off] ptr mask mem)
|
||||||
|
(VCVTPD2PSMasked256 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTPD2PSMasked256load {sym} [off] ptr mask mem)
|
||||||
(VCVTDQ2PSMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTDQ2PSMasked128load {sym} [off] ptr mask mem)
|
(VCVTDQ2PSMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTDQ2PSMasked128load {sym} [off] ptr mask mem)
|
||||||
(VCVTDQ2PSMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTDQ2PSMasked256load {sym} [off] ptr mask mem)
|
(VCVTDQ2PSMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTDQ2PSMasked256load {sym} [off] ptr mask mem)
|
||||||
(VCVTDQ2PSMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTDQ2PSMasked512load {sym} [off] ptr mask mem)
|
(VCVTDQ2PSMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTDQ2PSMasked512load {sym} [off] ptr mask mem)
|
||||||
|
|
@ -2563,6 +2582,7 @@
|
||||||
(VCVTUQQ2PSXMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTUQQ2PSXMasked128load {sym} [off] ptr mask mem)
|
(VCVTUQQ2PSXMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTUQQ2PSXMasked128load {sym} [off] ptr mask mem)
|
||||||
(VCVTUQQ2PSYMasked128 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTUQQ2PSYMasked128load {sym} [off] ptr mask mem)
|
(VCVTUQQ2PSYMasked128 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTUQQ2PSYMasked128load {sym} [off] ptr mask mem)
|
||||||
(VCVTUQQ2PSMasked256 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTUQQ2PSMasked256load {sym} [off] ptr mask mem)
|
(VCVTUQQ2PSMasked256 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTUQQ2PSMasked256load {sym} [off] ptr mask mem)
|
||||||
|
(VCVTPS2PD512 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTPS2PD512load {sym} [off] ptr mem)
|
||||||
(VCVTDQ2PD512 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTDQ2PD512load {sym} [off] ptr mem)
|
(VCVTDQ2PD512 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTDQ2PD512load {sym} [off] ptr mem)
|
||||||
(VCVTQQ2PD128 l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTQQ2PD128load {sym} [off] ptr mem)
|
(VCVTQQ2PD128 l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTQQ2PD128load {sym} [off] ptr mem)
|
||||||
(VCVTQQ2PD256 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTQQ2PD256load {sym} [off] ptr mem)
|
(VCVTQQ2PD256 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTQQ2PD256load {sym} [off] ptr mem)
|
||||||
|
|
@ -2572,6 +2592,8 @@
|
||||||
(VCVTUQQ2PD128 l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTUQQ2PD128load {sym} [off] ptr mem)
|
(VCVTUQQ2PD128 l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTUQQ2PD128load {sym} [off] ptr mem)
|
||||||
(VCVTUQQ2PD256 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTUQQ2PD256load {sym} [off] ptr mem)
|
(VCVTUQQ2PD256 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTUQQ2PD256load {sym} [off] ptr mem)
|
||||||
(VCVTUQQ2PD512 l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTUQQ2PD512load {sym} [off] ptr mem)
|
(VCVTUQQ2PD512 l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTUQQ2PD512load {sym} [off] ptr mem)
|
||||||
|
(VCVTPS2PDMasked256 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTPS2PDMasked256load {sym} [off] ptr mask mem)
|
||||||
|
(VCVTPS2PDMasked512 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTPS2PDMasked512load {sym} [off] ptr mask mem)
|
||||||
(VCVTDQ2PDMasked256 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTDQ2PDMasked256load {sym} [off] ptr mask mem)
|
(VCVTDQ2PDMasked256 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTDQ2PDMasked256load {sym} [off] ptr mask mem)
|
||||||
(VCVTDQ2PDMasked512 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTDQ2PDMasked512load {sym} [off] ptr mask mem)
|
(VCVTDQ2PDMasked512 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTDQ2PDMasked512load {sym} [off] ptr mask mem)
|
||||||
(VCVTQQ2PDMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTQQ2PDMasked128load {sym} [off] ptr mask mem)
|
(VCVTQQ2PDMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTQQ2PDMasked128load {sym} [off] ptr mask mem)
|
||||||
|
|
|
||||||
|
|
@ -66,6 +66,16 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf
|
||||||
{name: "VCVTDQ2PSMasked128", argLength: 2, reg: wkw, asm: "VCVTDQ2PS", commutative: false, typ: "Vec128", resultInArg0: false},
|
{name: "VCVTDQ2PSMasked128", argLength: 2, reg: wkw, asm: "VCVTDQ2PS", commutative: false, typ: "Vec128", resultInArg0: false},
|
||||||
{name: "VCVTDQ2PSMasked256", argLength: 2, reg: wkw, asm: "VCVTDQ2PS", commutative: false, typ: "Vec256", resultInArg0: false},
|
{name: "VCVTDQ2PSMasked256", argLength: 2, reg: wkw, asm: "VCVTDQ2PS", commutative: false, typ: "Vec256", resultInArg0: false},
|
||||||
{name: "VCVTDQ2PSMasked512", argLength: 2, reg: wkw, asm: "VCVTDQ2PS", commutative: false, typ: "Vec512", resultInArg0: false},
|
{name: "VCVTDQ2PSMasked512", argLength: 2, reg: wkw, asm: "VCVTDQ2PS", commutative: false, typ: "Vec512", resultInArg0: false},
|
||||||
|
{name: "VCVTPD2PS256", argLength: 1, reg: w11, asm: "VCVTPD2PS", commutative: false, typ: "Vec256", resultInArg0: false},
|
||||||
|
{name: "VCVTPD2PSMasked256", argLength: 2, reg: wkw, asm: "VCVTPD2PS", commutative: false, typ: "Vec256", resultInArg0: false},
|
||||||
|
{name: "VCVTPD2PSX128", argLength: 1, reg: v11, asm: "VCVTPD2PSX", commutative: false, typ: "Vec128", resultInArg0: false},
|
||||||
|
{name: "VCVTPD2PSXMasked128", argLength: 2, reg: wkw, asm: "VCVTPD2PSX", commutative: false, typ: "Vec128", resultInArg0: false},
|
||||||
|
{name: "VCVTPD2PSY128", argLength: 1, reg: v11, asm: "VCVTPD2PSY", commutative: false, typ: "Vec128", resultInArg0: false},
|
||||||
|
{name: "VCVTPD2PSYMasked128", argLength: 2, reg: wkw, asm: "VCVTPD2PSY", commutative: false, typ: "Vec128", resultInArg0: false},
|
||||||
|
{name: "VCVTPS2PD256", argLength: 1, reg: v11, asm: "VCVTPS2PD", commutative: false, typ: "Vec256", resultInArg0: false},
|
||||||
|
{name: "VCVTPS2PD512", argLength: 1, reg: w11, asm: "VCVTPS2PD", commutative: false, typ: "Vec512", resultInArg0: false},
|
||||||
|
{name: "VCVTPS2PDMasked256", argLength: 2, reg: wkw, asm: "VCVTPS2PD", commutative: false, typ: "Vec256", resultInArg0: false},
|
||||||
|
{name: "VCVTPS2PDMasked512", argLength: 2, reg: wkw, asm: "VCVTPS2PD", commutative: false, typ: "Vec512", resultInArg0: false},
|
||||||
{name: "VCVTQQ2PD128", argLength: 1, reg: w11, asm: "VCVTQQ2PD", commutative: false, typ: "Vec128", resultInArg0: false},
|
{name: "VCVTQQ2PD128", argLength: 1, reg: w11, asm: "VCVTQQ2PD", commutative: false, typ: "Vec128", resultInArg0: false},
|
||||||
{name: "VCVTQQ2PD256", argLength: 1, reg: w11, asm: "VCVTQQ2PD", commutative: false, typ: "Vec256", resultInArg0: false},
|
{name: "VCVTQQ2PD256", argLength: 1, reg: w11, asm: "VCVTQQ2PD", commutative: false, typ: "Vec256", resultInArg0: false},
|
||||||
{name: "VCVTQQ2PD512", argLength: 1, reg: w11, asm: "VCVTQQ2PD", commutative: false, typ: "Vec512", resultInArg0: false},
|
{name: "VCVTQQ2PD512", argLength: 1, reg: w11, asm: "VCVTQQ2PD", commutative: false, typ: "Vec512", resultInArg0: false},
|
||||||
|
|
@ -1488,6 +1498,13 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf
|
||||||
{name: "VCVTDQ2PSMasked128load", argLength: 3, reg: wkwload, asm: "VCVTDQ2PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false},
|
{name: "VCVTDQ2PSMasked128load", argLength: 3, reg: wkwload, asm: "VCVTDQ2PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false},
|
||||||
{name: "VCVTDQ2PSMasked256load", argLength: 3, reg: wkwload, asm: "VCVTDQ2PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false},
|
{name: "VCVTDQ2PSMasked256load", argLength: 3, reg: wkwload, asm: "VCVTDQ2PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false},
|
||||||
{name: "VCVTDQ2PSMasked512load", argLength: 3, reg: wkwload, asm: "VCVTDQ2PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false},
|
{name: "VCVTDQ2PSMasked512load", argLength: 3, reg: wkwload, asm: "VCVTDQ2PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false},
|
||||||
|
{name: "VCVTPD2PS256load", argLength: 2, reg: w11load, asm: "VCVTPD2PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false},
|
||||||
|
{name: "VCVTPD2PSMasked256load", argLength: 3, reg: wkwload, asm: "VCVTPD2PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false},
|
||||||
|
{name: "VCVTPD2PSXMasked128load", argLength: 3, reg: wkwload, asm: "VCVTPD2PSX", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false},
|
||||||
|
{name: "VCVTPD2PSYMasked128load", argLength: 3, reg: wkwload, asm: "VCVTPD2PSY", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false},
|
||||||
|
{name: "VCVTPS2PD512load", argLength: 2, reg: w11load, asm: "VCVTPS2PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false},
|
||||||
|
{name: "VCVTPS2PDMasked256load", argLength: 3, reg: wkwload, asm: "VCVTPS2PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false},
|
||||||
|
{name: "VCVTPS2PDMasked512load", argLength: 3, reg: wkwload, asm: "VCVTPS2PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false},
|
||||||
{name: "VCVTQQ2PD128load", argLength: 2, reg: w11load, asm: "VCVTQQ2PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false},
|
{name: "VCVTQQ2PD128load", argLength: 2, reg: w11load, asm: "VCVTQQ2PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false},
|
||||||
{name: "VCVTQQ2PD256load", argLength: 2, reg: w11load, asm: "VCVTQQ2PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false},
|
{name: "VCVTQQ2PD256load", argLength: 2, reg: w11load, asm: "VCVTQQ2PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false},
|
||||||
{name: "VCVTQQ2PD512load", argLength: 2, reg: w11load, asm: "VCVTQQ2PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false},
|
{name: "VCVTQQ2PD512load", argLength: 2, reg: w11load, asm: "VCVTQQ2PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false},
|
||||||
|
|
@ -2114,6 +2131,11 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf
|
||||||
{name: "VCVTDQ2PSMasked128Merging", argLength: 3, reg: w2kw, asm: "VCVTDQ2PS", commutative: false, typ: "Vec128", resultInArg0: true},
|
{name: "VCVTDQ2PSMasked128Merging", argLength: 3, reg: w2kw, asm: "VCVTDQ2PS", commutative: false, typ: "Vec128", resultInArg0: true},
|
||||||
{name: "VCVTDQ2PSMasked256Merging", argLength: 3, reg: w2kw, asm: "VCVTDQ2PS", commutative: false, typ: "Vec256", resultInArg0: true},
|
{name: "VCVTDQ2PSMasked256Merging", argLength: 3, reg: w2kw, asm: "VCVTDQ2PS", commutative: false, typ: "Vec256", resultInArg0: true},
|
||||||
{name: "VCVTDQ2PSMasked512Merging", argLength: 3, reg: w2kw, asm: "VCVTDQ2PS", commutative: false, typ: "Vec512", resultInArg0: true},
|
{name: "VCVTDQ2PSMasked512Merging", argLength: 3, reg: w2kw, asm: "VCVTDQ2PS", commutative: false, typ: "Vec512", resultInArg0: true},
|
||||||
|
{name: "VCVTPD2PSMasked256Merging", argLength: 3, reg: w2kw, asm: "VCVTPD2PS", commutative: false, typ: "Vec256", resultInArg0: true},
|
||||||
|
{name: "VCVTPD2PSXMasked128Merging", argLength: 3, reg: w2kw, asm: "VCVTPD2PSX", commutative: false, typ: "Vec128", resultInArg0: true},
|
||||||
|
{name: "VCVTPD2PSYMasked128Merging", argLength: 3, reg: w2kw, asm: "VCVTPD2PSY", commutative: false, typ: "Vec128", resultInArg0: true},
|
||||||
|
{name: "VCVTPS2PDMasked256Merging", argLength: 3, reg: w2kw, asm: "VCVTPS2PD", commutative: false, typ: "Vec256", resultInArg0: true},
|
||||||
|
{name: "VCVTPS2PDMasked512Merging", argLength: 3, reg: w2kw, asm: "VCVTPS2PD", commutative: false, typ: "Vec512", resultInArg0: true},
|
||||||
{name: "VCVTQQ2PDMasked128Merging", argLength: 3, reg: w2kw, asm: "VCVTQQ2PD", commutative: false, typ: "Vec128", resultInArg0: true},
|
{name: "VCVTQQ2PDMasked128Merging", argLength: 3, reg: w2kw, asm: "VCVTQQ2PD", commutative: false, typ: "Vec128", resultInArg0: true},
|
||||||
{name: "VCVTQQ2PDMasked256Merging", argLength: 3, reg: w2kw, asm: "VCVTQQ2PD", commutative: false, typ: "Vec256", resultInArg0: true},
|
{name: "VCVTQQ2PDMasked256Merging", argLength: 3, reg: w2kw, asm: "VCVTQQ2PD", commutative: false, typ: "Vec256", resultInArg0: true},
|
||||||
{name: "VCVTQQ2PDMasked512Merging", argLength: 3, reg: w2kw, asm: "VCVTQQ2PD", commutative: false, typ: "Vec512", resultInArg0: true},
|
{name: "VCVTQQ2PDMasked512Merging", argLength: 3, reg: w2kw, asm: "VCVTQQ2PD", commutative: false, typ: "Vec512", resultInArg0: true},
|
||||||
|
|
|
||||||
|
|
@ -237,6 +237,9 @@ func simdGenericOps() []opData {
|
||||||
{name: "ConcatPermuteUint64x2", argLength: 3, commutative: false},
|
{name: "ConcatPermuteUint64x2", argLength: 3, commutative: false},
|
||||||
{name: "ConcatPermuteUint64x4", argLength: 3, commutative: false},
|
{name: "ConcatPermuteUint64x4", argLength: 3, commutative: false},
|
||||||
{name: "ConcatPermuteUint64x8", argLength: 3, commutative: false},
|
{name: "ConcatPermuteUint64x8", argLength: 3, commutative: false},
|
||||||
|
{name: "ConvertToFloat32Float64x2", argLength: 1, commutative: false},
|
||||||
|
{name: "ConvertToFloat32Float64x4", argLength: 1, commutative: false},
|
||||||
|
{name: "ConvertToFloat32Float64x8", argLength: 1, commutative: false},
|
||||||
{name: "ConvertToFloat32Int32x4", argLength: 1, commutative: false},
|
{name: "ConvertToFloat32Int32x4", argLength: 1, commutative: false},
|
||||||
{name: "ConvertToFloat32Int32x8", argLength: 1, commutative: false},
|
{name: "ConvertToFloat32Int32x8", argLength: 1, commutative: false},
|
||||||
{name: "ConvertToFloat32Int32x16", argLength: 1, commutative: false},
|
{name: "ConvertToFloat32Int32x16", argLength: 1, commutative: false},
|
||||||
|
|
@ -249,6 +252,8 @@ func simdGenericOps() []opData {
|
||||||
{name: "ConvertToFloat32Uint64x2", argLength: 1, commutative: false},
|
{name: "ConvertToFloat32Uint64x2", argLength: 1, commutative: false},
|
||||||
{name: "ConvertToFloat32Uint64x4", argLength: 1, commutative: false},
|
{name: "ConvertToFloat32Uint64x4", argLength: 1, commutative: false},
|
||||||
{name: "ConvertToFloat32Uint64x8", argLength: 1, commutative: false},
|
{name: "ConvertToFloat32Uint64x8", argLength: 1, commutative: false},
|
||||||
|
{name: "ConvertToFloat64Float32x4", argLength: 1, commutative: false},
|
||||||
|
{name: "ConvertToFloat64Float32x8", argLength: 1, commutative: false},
|
||||||
{name: "ConvertToFloat64Int32x4", argLength: 1, commutative: false},
|
{name: "ConvertToFloat64Int32x4", argLength: 1, commutative: false},
|
||||||
{name: "ConvertToFloat64Int32x8", argLength: 1, commutative: false},
|
{name: "ConvertToFloat64Int32x8", argLength: 1, commutative: false},
|
||||||
{name: "ConvertToFloat64Int64x2", argLength: 1, commutative: false},
|
{name: "ConvertToFloat64Int64x2", argLength: 1, commutative: false},
|
||||||
|
|
|
||||||
|
|
@ -1307,6 +1307,16 @@ const (
|
||||||
OpAMD64VCVTDQ2PSMasked128
|
OpAMD64VCVTDQ2PSMasked128
|
||||||
OpAMD64VCVTDQ2PSMasked256
|
OpAMD64VCVTDQ2PSMasked256
|
||||||
OpAMD64VCVTDQ2PSMasked512
|
OpAMD64VCVTDQ2PSMasked512
|
||||||
|
OpAMD64VCVTPD2PS256
|
||||||
|
OpAMD64VCVTPD2PSMasked256
|
||||||
|
OpAMD64VCVTPD2PSX128
|
||||||
|
OpAMD64VCVTPD2PSXMasked128
|
||||||
|
OpAMD64VCVTPD2PSY128
|
||||||
|
OpAMD64VCVTPD2PSYMasked128
|
||||||
|
OpAMD64VCVTPS2PD256
|
||||||
|
OpAMD64VCVTPS2PD512
|
||||||
|
OpAMD64VCVTPS2PDMasked256
|
||||||
|
OpAMD64VCVTPS2PDMasked512
|
||||||
OpAMD64VCVTQQ2PD128
|
OpAMD64VCVTQQ2PD128
|
||||||
OpAMD64VCVTQQ2PD256
|
OpAMD64VCVTQQ2PD256
|
||||||
OpAMD64VCVTQQ2PD512
|
OpAMD64VCVTQQ2PD512
|
||||||
|
|
@ -2729,6 +2739,13 @@ const (
|
||||||
OpAMD64VCVTDQ2PSMasked128load
|
OpAMD64VCVTDQ2PSMasked128load
|
||||||
OpAMD64VCVTDQ2PSMasked256load
|
OpAMD64VCVTDQ2PSMasked256load
|
||||||
OpAMD64VCVTDQ2PSMasked512load
|
OpAMD64VCVTDQ2PSMasked512load
|
||||||
|
OpAMD64VCVTPD2PS256load
|
||||||
|
OpAMD64VCVTPD2PSMasked256load
|
||||||
|
OpAMD64VCVTPD2PSXMasked128load
|
||||||
|
OpAMD64VCVTPD2PSYMasked128load
|
||||||
|
OpAMD64VCVTPS2PD512load
|
||||||
|
OpAMD64VCVTPS2PDMasked256load
|
||||||
|
OpAMD64VCVTPS2PDMasked512load
|
||||||
OpAMD64VCVTQQ2PD128load
|
OpAMD64VCVTQQ2PD128load
|
||||||
OpAMD64VCVTQQ2PD256load
|
OpAMD64VCVTQQ2PD256load
|
||||||
OpAMD64VCVTQQ2PD512load
|
OpAMD64VCVTQQ2PD512load
|
||||||
|
|
@ -3355,6 +3372,11 @@ const (
|
||||||
OpAMD64VCVTDQ2PSMasked128Merging
|
OpAMD64VCVTDQ2PSMasked128Merging
|
||||||
OpAMD64VCVTDQ2PSMasked256Merging
|
OpAMD64VCVTDQ2PSMasked256Merging
|
||||||
OpAMD64VCVTDQ2PSMasked512Merging
|
OpAMD64VCVTDQ2PSMasked512Merging
|
||||||
|
OpAMD64VCVTPD2PSMasked256Merging
|
||||||
|
OpAMD64VCVTPD2PSXMasked128Merging
|
||||||
|
OpAMD64VCVTPD2PSYMasked128Merging
|
||||||
|
OpAMD64VCVTPS2PDMasked256Merging
|
||||||
|
OpAMD64VCVTPS2PDMasked512Merging
|
||||||
OpAMD64VCVTQQ2PDMasked128Merging
|
OpAMD64VCVTQQ2PDMasked128Merging
|
||||||
OpAMD64VCVTQQ2PDMasked256Merging
|
OpAMD64VCVTQQ2PDMasked256Merging
|
||||||
OpAMD64VCVTQQ2PDMasked512Merging
|
OpAMD64VCVTQQ2PDMasked512Merging
|
||||||
|
|
@ -6360,6 +6382,9 @@ const (
|
||||||
OpConcatPermuteUint64x2
|
OpConcatPermuteUint64x2
|
||||||
OpConcatPermuteUint64x4
|
OpConcatPermuteUint64x4
|
||||||
OpConcatPermuteUint64x8
|
OpConcatPermuteUint64x8
|
||||||
|
OpConvertToFloat32Float64x2
|
||||||
|
OpConvertToFloat32Float64x4
|
||||||
|
OpConvertToFloat32Float64x8
|
||||||
OpConvertToFloat32Int32x4
|
OpConvertToFloat32Int32x4
|
||||||
OpConvertToFloat32Int32x8
|
OpConvertToFloat32Int32x8
|
||||||
OpConvertToFloat32Int32x16
|
OpConvertToFloat32Int32x16
|
||||||
|
|
@ -6372,6 +6397,8 @@ const (
|
||||||
OpConvertToFloat32Uint64x2
|
OpConvertToFloat32Uint64x2
|
||||||
OpConvertToFloat32Uint64x4
|
OpConvertToFloat32Uint64x4
|
||||||
OpConvertToFloat32Uint64x8
|
OpConvertToFloat32Uint64x8
|
||||||
|
OpConvertToFloat64Float32x4
|
||||||
|
OpConvertToFloat64Float32x8
|
||||||
OpConvertToFloat64Int32x4
|
OpConvertToFloat64Int32x4
|
||||||
OpConvertToFloat64Int32x8
|
OpConvertToFloat64Int32x8
|
||||||
OpConvertToFloat64Int64x2
|
OpConvertToFloat64Int64x2
|
||||||
|
|
@ -21611,6 +21638,141 @@ var opcodeTable = [...]opInfo{
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "VCVTPD2PS256",
|
||||||
|
argLen: 1,
|
||||||
|
asm: x86.AVCVTPD2PS,
|
||||||
|
reg: regInfo{
|
||||||
|
inputs: []inputInfo{
|
||||||
|
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
},
|
||||||
|
outputs: []outputInfo{
|
||||||
|
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "VCVTPD2PSMasked256",
|
||||||
|
argLen: 2,
|
||||||
|
asm: x86.AVCVTPD2PS,
|
||||||
|
reg: regInfo{
|
||||||
|
inputs: []inputInfo{
|
||||||
|
{1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||||
|
{0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
},
|
||||||
|
outputs: []outputInfo{
|
||||||
|
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "VCVTPD2PSX128",
|
||||||
|
argLen: 1,
|
||||||
|
asm: x86.AVCVTPD2PSX,
|
||||||
|
reg: regInfo{
|
||||||
|
inputs: []inputInfo{
|
||||||
|
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
|
||||||
|
},
|
||||||
|
outputs: []outputInfo{
|
||||||
|
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "VCVTPD2PSXMasked128",
|
||||||
|
argLen: 2,
|
||||||
|
asm: x86.AVCVTPD2PSX,
|
||||||
|
reg: regInfo{
|
||||||
|
inputs: []inputInfo{
|
||||||
|
{1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||||
|
{0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
},
|
||||||
|
outputs: []outputInfo{
|
||||||
|
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "VCVTPD2PSY128",
|
||||||
|
argLen: 1,
|
||||||
|
asm: x86.AVCVTPD2PSY,
|
||||||
|
reg: regInfo{
|
||||||
|
inputs: []inputInfo{
|
||||||
|
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
|
||||||
|
},
|
||||||
|
outputs: []outputInfo{
|
||||||
|
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "VCVTPD2PSYMasked128",
|
||||||
|
argLen: 2,
|
||||||
|
asm: x86.AVCVTPD2PSY,
|
||||||
|
reg: regInfo{
|
||||||
|
inputs: []inputInfo{
|
||||||
|
{1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||||
|
{0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
},
|
||||||
|
outputs: []outputInfo{
|
||||||
|
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "VCVTPS2PD256",
|
||||||
|
argLen: 1,
|
||||||
|
asm: x86.AVCVTPS2PD,
|
||||||
|
reg: regInfo{
|
||||||
|
inputs: []inputInfo{
|
||||||
|
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
|
||||||
|
},
|
||||||
|
outputs: []outputInfo{
|
||||||
|
{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "VCVTPS2PD512",
|
||||||
|
argLen: 1,
|
||||||
|
asm: x86.AVCVTPS2PD,
|
||||||
|
reg: regInfo{
|
||||||
|
inputs: []inputInfo{
|
||||||
|
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
},
|
||||||
|
outputs: []outputInfo{
|
||||||
|
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "VCVTPS2PDMasked256",
|
||||||
|
argLen: 2,
|
||||||
|
asm: x86.AVCVTPS2PD,
|
||||||
|
reg: regInfo{
|
||||||
|
inputs: []inputInfo{
|
||||||
|
{1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||||
|
{0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
},
|
||||||
|
outputs: []outputInfo{
|
||||||
|
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "VCVTPS2PDMasked512",
|
||||||
|
argLen: 2,
|
||||||
|
asm: x86.AVCVTPS2PD,
|
||||||
|
reg: regInfo{
|
||||||
|
inputs: []inputInfo{
|
||||||
|
{1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||||
|
{0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
},
|
||||||
|
outputs: []outputInfo{
|
||||||
|
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "VCVTQQ2PD128",
|
name: "VCVTQQ2PD128",
|
||||||
argLen: 1,
|
argLen: 1,
|
||||||
|
|
@ -42523,6 +42685,116 @@ var opcodeTable = [...]opInfo{
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "VCVTPD2PS256load",
|
||||||
|
auxType: auxSymOff,
|
||||||
|
argLen: 2,
|
||||||
|
symEffect: SymRead,
|
||||||
|
asm: x86.AVCVTPD2PS,
|
||||||
|
reg: regInfo{
|
||||||
|
inputs: []inputInfo{
|
||||||
|
{0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
|
||||||
|
},
|
||||||
|
outputs: []outputInfo{
|
||||||
|
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "VCVTPD2PSMasked256load",
|
||||||
|
auxType: auxSymOff,
|
||||||
|
argLen: 3,
|
||||||
|
symEffect: SymRead,
|
||||||
|
asm: x86.AVCVTPD2PS,
|
||||||
|
reg: regInfo{
|
||||||
|
inputs: []inputInfo{
|
||||||
|
{1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||||
|
{0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
|
||||||
|
},
|
||||||
|
outputs: []outputInfo{
|
||||||
|
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "VCVTPD2PSXMasked128load",
|
||||||
|
auxType: auxSymOff,
|
||||||
|
argLen: 3,
|
||||||
|
symEffect: SymRead,
|
||||||
|
asm: x86.AVCVTPD2PSX,
|
||||||
|
reg: regInfo{
|
||||||
|
inputs: []inputInfo{
|
||||||
|
{1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||||
|
{0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
|
||||||
|
},
|
||||||
|
outputs: []outputInfo{
|
||||||
|
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "VCVTPD2PSYMasked128load",
|
||||||
|
auxType: auxSymOff,
|
||||||
|
argLen: 3,
|
||||||
|
symEffect: SymRead,
|
||||||
|
asm: x86.AVCVTPD2PSY,
|
||||||
|
reg: regInfo{
|
||||||
|
inputs: []inputInfo{
|
||||||
|
{1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||||
|
{0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
|
||||||
|
},
|
||||||
|
outputs: []outputInfo{
|
||||||
|
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "VCVTPS2PD512load",
|
||||||
|
auxType: auxSymOff,
|
||||||
|
argLen: 2,
|
||||||
|
symEffect: SymRead,
|
||||||
|
asm: x86.AVCVTPS2PD,
|
||||||
|
reg: regInfo{
|
||||||
|
inputs: []inputInfo{
|
||||||
|
{0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
|
||||||
|
},
|
||||||
|
outputs: []outputInfo{
|
||||||
|
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "VCVTPS2PDMasked256load",
|
||||||
|
auxType: auxSymOff,
|
||||||
|
argLen: 3,
|
||||||
|
symEffect: SymRead,
|
||||||
|
asm: x86.AVCVTPS2PD,
|
||||||
|
reg: regInfo{
|
||||||
|
inputs: []inputInfo{
|
||||||
|
{1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||||
|
{0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
|
||||||
|
},
|
||||||
|
outputs: []outputInfo{
|
||||||
|
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "VCVTPS2PDMasked512load",
|
||||||
|
auxType: auxSymOff,
|
||||||
|
argLen: 3,
|
||||||
|
symEffect: SymRead,
|
||||||
|
asm: x86.AVCVTPS2PD,
|
||||||
|
reg: regInfo{
|
||||||
|
inputs: []inputInfo{
|
||||||
|
{1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||||
|
{0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
|
||||||
|
},
|
||||||
|
outputs: []outputInfo{
|
||||||
|
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "VCVTQQ2PD128load",
|
name: "VCVTQQ2PD128load",
|
||||||
auxType: auxSymOff,
|
auxType: auxSymOff,
|
||||||
|
|
@ -52890,6 +53162,86 @@ var opcodeTable = [...]opInfo{
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "VCVTPD2PSMasked256Merging",
|
||||||
|
argLen: 3,
|
||||||
|
resultInArg0: true,
|
||||||
|
asm: x86.AVCVTPD2PS,
|
||||||
|
reg: regInfo{
|
||||||
|
inputs: []inputInfo{
|
||||||
|
{2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||||
|
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
{1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
},
|
||||||
|
outputs: []outputInfo{
|
||||||
|
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "VCVTPD2PSXMasked128Merging",
|
||||||
|
argLen: 3,
|
||||||
|
resultInArg0: true,
|
||||||
|
asm: x86.AVCVTPD2PSX,
|
||||||
|
reg: regInfo{
|
||||||
|
inputs: []inputInfo{
|
||||||
|
{2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||||
|
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
{1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
},
|
||||||
|
outputs: []outputInfo{
|
||||||
|
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "VCVTPD2PSYMasked128Merging",
|
||||||
|
argLen: 3,
|
||||||
|
resultInArg0: true,
|
||||||
|
asm: x86.AVCVTPD2PSY,
|
||||||
|
reg: regInfo{
|
||||||
|
inputs: []inputInfo{
|
||||||
|
{2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||||
|
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
{1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
},
|
||||||
|
outputs: []outputInfo{
|
||||||
|
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "VCVTPS2PDMasked256Merging",
|
||||||
|
argLen: 3,
|
||||||
|
resultInArg0: true,
|
||||||
|
asm: x86.AVCVTPS2PD,
|
||||||
|
reg: regInfo{
|
||||||
|
inputs: []inputInfo{
|
||||||
|
{2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||||
|
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
{1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
},
|
||||||
|
outputs: []outputInfo{
|
||||||
|
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "VCVTPS2PDMasked512Merging",
|
||||||
|
argLen: 3,
|
||||||
|
resultInArg0: true,
|
||||||
|
asm: x86.AVCVTPS2PD,
|
||||||
|
reg: regInfo{
|
||||||
|
inputs: []inputInfo{
|
||||||
|
{2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7
|
||||||
|
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
{1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
},
|
||||||
|
outputs: []outputInfo{
|
||||||
|
{0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "VCVTQQ2PDMasked128Merging",
|
name: "VCVTQQ2PDMasked128Merging",
|
||||||
argLen: 3,
|
argLen: 3,
|
||||||
|
|
@ -89844,6 +90196,21 @@ var opcodeTable = [...]opInfo{
|
||||||
argLen: 3,
|
argLen: 3,
|
||||||
generic: true,
|
generic: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "ConvertToFloat32Float64x2",
|
||||||
|
argLen: 1,
|
||||||
|
generic: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ConvertToFloat32Float64x4",
|
||||||
|
argLen: 1,
|
||||||
|
generic: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ConvertToFloat32Float64x8",
|
||||||
|
argLen: 1,
|
||||||
|
generic: true,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "ConvertToFloat32Int32x4",
|
name: "ConvertToFloat32Int32x4",
|
||||||
argLen: 1,
|
argLen: 1,
|
||||||
|
|
@ -89904,6 +90271,16 @@ var opcodeTable = [...]opInfo{
|
||||||
argLen: 1,
|
argLen: 1,
|
||||||
generic: true,
|
generic: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "ConvertToFloat64Float32x4",
|
||||||
|
argLen: 1,
|
||||||
|
generic: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ConvertToFloat64Float32x8",
|
||||||
|
argLen: 1,
|
||||||
|
generic: true,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "ConvertToFloat64Int32x4",
|
name: "ConvertToFloat64Int32x4",
|
||||||
argLen: 1,
|
argLen: 1,
|
||||||
|
|
|
||||||
|
|
@ -604,6 +604,20 @@ func rewriteValueAMD64(v *Value) bool {
|
||||||
return rewriteValueAMD64_OpAMD64VCVTDQ2PSMasked256(v)
|
return rewriteValueAMD64_OpAMD64VCVTDQ2PSMasked256(v)
|
||||||
case OpAMD64VCVTDQ2PSMasked512:
|
case OpAMD64VCVTDQ2PSMasked512:
|
||||||
return rewriteValueAMD64_OpAMD64VCVTDQ2PSMasked512(v)
|
return rewriteValueAMD64_OpAMD64VCVTDQ2PSMasked512(v)
|
||||||
|
case OpAMD64VCVTPD2PS256:
|
||||||
|
return rewriteValueAMD64_OpAMD64VCVTPD2PS256(v)
|
||||||
|
case OpAMD64VCVTPD2PSMasked256:
|
||||||
|
return rewriteValueAMD64_OpAMD64VCVTPD2PSMasked256(v)
|
||||||
|
case OpAMD64VCVTPD2PSXMasked128:
|
||||||
|
return rewriteValueAMD64_OpAMD64VCVTPD2PSXMasked128(v)
|
||||||
|
case OpAMD64VCVTPD2PSYMasked128:
|
||||||
|
return rewriteValueAMD64_OpAMD64VCVTPD2PSYMasked128(v)
|
||||||
|
case OpAMD64VCVTPS2PD512:
|
||||||
|
return rewriteValueAMD64_OpAMD64VCVTPS2PD512(v)
|
||||||
|
case OpAMD64VCVTPS2PDMasked256:
|
||||||
|
return rewriteValueAMD64_OpAMD64VCVTPS2PDMasked256(v)
|
||||||
|
case OpAMD64VCVTPS2PDMasked512:
|
||||||
|
return rewriteValueAMD64_OpAMD64VCVTPS2PDMasked512(v)
|
||||||
case OpAMD64VCVTQQ2PD128:
|
case OpAMD64VCVTQQ2PD128:
|
||||||
return rewriteValueAMD64_OpAMD64VCVTQQ2PD128(v)
|
return rewriteValueAMD64_OpAMD64VCVTQQ2PD128(v)
|
||||||
case OpAMD64VCVTQQ2PD256:
|
case OpAMD64VCVTQQ2PD256:
|
||||||
|
|
@ -2809,6 +2823,15 @@ func rewriteValueAMD64(v *Value) bool {
|
||||||
return rewriteValueAMD64_OpConstBool(v)
|
return rewriteValueAMD64_OpConstBool(v)
|
||||||
case OpConstNil:
|
case OpConstNil:
|
||||||
return rewriteValueAMD64_OpConstNil(v)
|
return rewriteValueAMD64_OpConstNil(v)
|
||||||
|
case OpConvertToFloat32Float64x2:
|
||||||
|
v.Op = OpAMD64VCVTPD2PSX128
|
||||||
|
return true
|
||||||
|
case OpConvertToFloat32Float64x4:
|
||||||
|
v.Op = OpAMD64VCVTPD2PSY128
|
||||||
|
return true
|
||||||
|
case OpConvertToFloat32Float64x8:
|
||||||
|
v.Op = OpAMD64VCVTPD2PS256
|
||||||
|
return true
|
||||||
case OpConvertToFloat32Int32x16:
|
case OpConvertToFloat32Int32x16:
|
||||||
v.Op = OpAMD64VCVTDQ2PS512
|
v.Op = OpAMD64VCVTDQ2PS512
|
||||||
return true
|
return true
|
||||||
|
|
@ -2845,6 +2868,12 @@ func rewriteValueAMD64(v *Value) bool {
|
||||||
case OpConvertToFloat32Uint64x8:
|
case OpConvertToFloat32Uint64x8:
|
||||||
v.Op = OpAMD64VCVTUQQ2PS256
|
v.Op = OpAMD64VCVTUQQ2PS256
|
||||||
return true
|
return true
|
||||||
|
case OpConvertToFloat64Float32x4:
|
||||||
|
v.Op = OpAMD64VCVTPS2PD256
|
||||||
|
return true
|
||||||
|
case OpConvertToFloat64Float32x8:
|
||||||
|
v.Op = OpAMD64VCVTPS2PD512
|
||||||
|
return true
|
||||||
case OpConvertToFloat64Int32x4:
|
case OpConvertToFloat64Int32x4:
|
||||||
v.Op = OpAMD64VCVTDQ2PD256
|
v.Op = OpAMD64VCVTDQ2PD256
|
||||||
return true
|
return true
|
||||||
|
|
@ -29165,6 +29194,191 @@ func rewriteValueAMD64_OpAMD64VCVTDQ2PSMasked512(v *Value) bool {
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
func rewriteValueAMD64_OpAMD64VCVTPD2PS256(v *Value) bool {
|
||||||
|
v_0 := v.Args[0]
|
||||||
|
// match: (VCVTPD2PS256 l:(VMOVDQUload512 {sym} [off] ptr mem))
|
||||||
|
// cond: canMergeLoad(v, l) && clobber(l)
|
||||||
|
// result: (VCVTPD2PS256load {sym} [off] ptr mem)
|
||||||
|
for {
|
||||||
|
l := v_0
|
||||||
|
if l.Op != OpAMD64VMOVDQUload512 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
off := auxIntToInt32(l.AuxInt)
|
||||||
|
sym := auxToSym(l.Aux)
|
||||||
|
mem := l.Args[1]
|
||||||
|
ptr := l.Args[0]
|
||||||
|
if !(canMergeLoad(v, l) && clobber(l)) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
v.reset(OpAMD64VCVTPD2PS256load)
|
||||||
|
v.AuxInt = int32ToAuxInt(off)
|
||||||
|
v.Aux = symToAux(sym)
|
||||||
|
v.AddArg2(ptr, mem)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
func rewriteValueAMD64_OpAMD64VCVTPD2PSMasked256(v *Value) bool {
|
||||||
|
v_1 := v.Args[1]
|
||||||
|
v_0 := v.Args[0]
|
||||||
|
// match: (VCVTPD2PSMasked256 l:(VMOVDQUload512 {sym} [off] ptr mem) mask)
|
||||||
|
// cond: canMergeLoad(v, l) && clobber(l)
|
||||||
|
// result: (VCVTPD2PSMasked256load {sym} [off] ptr mask mem)
|
||||||
|
for {
|
||||||
|
l := v_0
|
||||||
|
if l.Op != OpAMD64VMOVDQUload512 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
off := auxIntToInt32(l.AuxInt)
|
||||||
|
sym := auxToSym(l.Aux)
|
||||||
|
mem := l.Args[1]
|
||||||
|
ptr := l.Args[0]
|
||||||
|
mask := v_1
|
||||||
|
if !(canMergeLoad(v, l) && clobber(l)) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
v.reset(OpAMD64VCVTPD2PSMasked256load)
|
||||||
|
v.AuxInt = int32ToAuxInt(off)
|
||||||
|
v.Aux = symToAux(sym)
|
||||||
|
v.AddArg3(ptr, mask, mem)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
func rewriteValueAMD64_OpAMD64VCVTPD2PSXMasked128(v *Value) bool {
|
||||||
|
v_1 := v.Args[1]
|
||||||
|
v_0 := v.Args[0]
|
||||||
|
// match: (VCVTPD2PSXMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask)
|
||||||
|
// cond: canMergeLoad(v, l) && clobber(l)
|
||||||
|
// result: (VCVTPD2PSXMasked128load {sym} [off] ptr mask mem)
|
||||||
|
for {
|
||||||
|
l := v_0
|
||||||
|
if l.Op != OpAMD64VMOVDQUload128 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
off := auxIntToInt32(l.AuxInt)
|
||||||
|
sym := auxToSym(l.Aux)
|
||||||
|
mem := l.Args[1]
|
||||||
|
ptr := l.Args[0]
|
||||||
|
mask := v_1
|
||||||
|
if !(canMergeLoad(v, l) && clobber(l)) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
v.reset(OpAMD64VCVTPD2PSXMasked128load)
|
||||||
|
v.AuxInt = int32ToAuxInt(off)
|
||||||
|
v.Aux = symToAux(sym)
|
||||||
|
v.AddArg3(ptr, mask, mem)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
func rewriteValueAMD64_OpAMD64VCVTPD2PSYMasked128(v *Value) bool {
|
||||||
|
v_1 := v.Args[1]
|
||||||
|
v_0 := v.Args[0]
|
||||||
|
// match: (VCVTPD2PSYMasked128 l:(VMOVDQUload256 {sym} [off] ptr mem) mask)
|
||||||
|
// cond: canMergeLoad(v, l) && clobber(l)
|
||||||
|
// result: (VCVTPD2PSYMasked128load {sym} [off] ptr mask mem)
|
||||||
|
for {
|
||||||
|
l := v_0
|
||||||
|
if l.Op != OpAMD64VMOVDQUload256 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
off := auxIntToInt32(l.AuxInt)
|
||||||
|
sym := auxToSym(l.Aux)
|
||||||
|
mem := l.Args[1]
|
||||||
|
ptr := l.Args[0]
|
||||||
|
mask := v_1
|
||||||
|
if !(canMergeLoad(v, l) && clobber(l)) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
v.reset(OpAMD64VCVTPD2PSYMasked128load)
|
||||||
|
v.AuxInt = int32ToAuxInt(off)
|
||||||
|
v.Aux = symToAux(sym)
|
||||||
|
v.AddArg3(ptr, mask, mem)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
func rewriteValueAMD64_OpAMD64VCVTPS2PD512(v *Value) bool {
|
||||||
|
v_0 := v.Args[0]
|
||||||
|
// match: (VCVTPS2PD512 l:(VMOVDQUload256 {sym} [off] ptr mem))
|
||||||
|
// cond: canMergeLoad(v, l) && clobber(l)
|
||||||
|
// result: (VCVTPS2PD512load {sym} [off] ptr mem)
|
||||||
|
for {
|
||||||
|
l := v_0
|
||||||
|
if l.Op != OpAMD64VMOVDQUload256 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
off := auxIntToInt32(l.AuxInt)
|
||||||
|
sym := auxToSym(l.Aux)
|
||||||
|
mem := l.Args[1]
|
||||||
|
ptr := l.Args[0]
|
||||||
|
if !(canMergeLoad(v, l) && clobber(l)) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
v.reset(OpAMD64VCVTPS2PD512load)
|
||||||
|
v.AuxInt = int32ToAuxInt(off)
|
||||||
|
v.Aux = symToAux(sym)
|
||||||
|
v.AddArg2(ptr, mem)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
func rewriteValueAMD64_OpAMD64VCVTPS2PDMasked256(v *Value) bool {
|
||||||
|
v_1 := v.Args[1]
|
||||||
|
v_0 := v.Args[0]
|
||||||
|
// match: (VCVTPS2PDMasked256 l:(VMOVDQUload128 {sym} [off] ptr mem) mask)
|
||||||
|
// cond: canMergeLoad(v, l) && clobber(l)
|
||||||
|
// result: (VCVTPS2PDMasked256load {sym} [off] ptr mask mem)
|
||||||
|
for {
|
||||||
|
l := v_0
|
||||||
|
if l.Op != OpAMD64VMOVDQUload128 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
off := auxIntToInt32(l.AuxInt)
|
||||||
|
sym := auxToSym(l.Aux)
|
||||||
|
mem := l.Args[1]
|
||||||
|
ptr := l.Args[0]
|
||||||
|
mask := v_1
|
||||||
|
if !(canMergeLoad(v, l) && clobber(l)) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
v.reset(OpAMD64VCVTPS2PDMasked256load)
|
||||||
|
v.AuxInt = int32ToAuxInt(off)
|
||||||
|
v.Aux = symToAux(sym)
|
||||||
|
v.AddArg3(ptr, mask, mem)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
func rewriteValueAMD64_OpAMD64VCVTPS2PDMasked512(v *Value) bool {
|
||||||
|
v_1 := v.Args[1]
|
||||||
|
v_0 := v.Args[0]
|
||||||
|
// match: (VCVTPS2PDMasked512 l:(VMOVDQUload256 {sym} [off] ptr mem) mask)
|
||||||
|
// cond: canMergeLoad(v, l) && clobber(l)
|
||||||
|
// result: (VCVTPS2PDMasked512load {sym} [off] ptr mask mem)
|
||||||
|
for {
|
||||||
|
l := v_0
|
||||||
|
if l.Op != OpAMD64VMOVDQUload256 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
off := auxIntToInt32(l.AuxInt)
|
||||||
|
sym := auxToSym(l.Aux)
|
||||||
|
mem := l.Args[1]
|
||||||
|
ptr := l.Args[0]
|
||||||
|
mask := v_1
|
||||||
|
if !(canMergeLoad(v, l) && clobber(l)) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
v.reset(OpAMD64VCVTPS2PDMasked512load)
|
||||||
|
v.AuxInt = int32ToAuxInt(off)
|
||||||
|
v.Aux = symToAux(sym)
|
||||||
|
v.AddArg3(ptr, mask, mem)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
func rewriteValueAMD64_OpAMD64VCVTQQ2PD128(v *Value) bool {
|
func rewriteValueAMD64_OpAMD64VCVTQQ2PD128(v *Value) bool {
|
||||||
v_0 := v.Args[0]
|
v_0 := v.Args[0]
|
||||||
// match: (VCVTQQ2PD128 l:(VMOVDQUload128 {sym} [off] ptr mem))
|
// match: (VCVTQQ2PD128 l:(VMOVDQUload128 {sym} [off] ptr mem))
|
||||||
|
|
@ -35833,6 +36047,18 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked256(v *Value) bool {
|
||||||
v.AddArg2(x, mask)
|
v.AddArg2(x, mask)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
// match: (VMOVDQU32Masked256 (VCVTPS2PD256 x) mask)
|
||||||
|
// result: (VCVTPS2PDMasked256 x mask)
|
||||||
|
for {
|
||||||
|
if v_0.Op != OpAMD64VCVTPS2PD256 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
x := v_0.Args[0]
|
||||||
|
mask := v_1
|
||||||
|
v.reset(OpAMD64VCVTPS2PDMasked256)
|
||||||
|
v.AddArg2(x, mask)
|
||||||
|
return true
|
||||||
|
}
|
||||||
// match: (VMOVDQU32Masked256 (VCVTDQ2PD256 x) mask)
|
// match: (VMOVDQU32Masked256 (VCVTDQ2PD256 x) mask)
|
||||||
// result: (VCVTDQ2PDMasked256 x mask)
|
// result: (VCVTDQ2PDMasked256 x mask)
|
||||||
for {
|
for {
|
||||||
|
|
@ -36744,6 +36970,18 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked512(v *Value) bool {
|
||||||
v.AddArg2(x, mask)
|
v.AddArg2(x, mask)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
// match: (VMOVDQU32Masked512 (VCVTPS2PD512 x) mask)
|
||||||
|
// result: (VCVTPS2PDMasked512 x mask)
|
||||||
|
for {
|
||||||
|
if v_0.Op != OpAMD64VCVTPS2PD512 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
x := v_0.Args[0]
|
||||||
|
mask := v_1
|
||||||
|
v.reset(OpAMD64VCVTPS2PDMasked512)
|
||||||
|
v.AddArg2(x, mask)
|
||||||
|
return true
|
||||||
|
}
|
||||||
// match: (VMOVDQU32Masked512 (VCVTDQ2PD512 x) mask)
|
// match: (VMOVDQU32Masked512 (VCVTDQ2PD512 x) mask)
|
||||||
// result: (VCVTDQ2PDMasked512 x mask)
|
// result: (VCVTDQ2PDMasked512 x mask)
|
||||||
for {
|
for {
|
||||||
|
|
@ -37571,6 +37809,30 @@ func rewriteValueAMD64_OpAMD64VMOVDQU64Masked128(v *Value) bool {
|
||||||
v.AddArg4(x, y, z, mask)
|
v.AddArg4(x, y, z, mask)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
// match: (VMOVDQU64Masked128 (VCVTPD2PSX128 x) mask)
|
||||||
|
// result: (VCVTPD2PSXMasked128 x mask)
|
||||||
|
for {
|
||||||
|
if v_0.Op != OpAMD64VCVTPD2PSX128 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
x := v_0.Args[0]
|
||||||
|
mask := v_1
|
||||||
|
v.reset(OpAMD64VCVTPD2PSXMasked128)
|
||||||
|
v.AddArg2(x, mask)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// match: (VMOVDQU64Masked128 (VCVTPD2PSY128 x) mask)
|
||||||
|
// result: (VCVTPD2PSYMasked128 x mask)
|
||||||
|
for {
|
||||||
|
if v_0.Op != OpAMD64VCVTPD2PSY128 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
x := v_0.Args[0]
|
||||||
|
mask := v_1
|
||||||
|
v.reset(OpAMD64VCVTPD2PSYMasked128)
|
||||||
|
v.AddArg2(x, mask)
|
||||||
|
return true
|
||||||
|
}
|
||||||
// match: (VMOVDQU64Masked128 (VCVTQQ2PSX128 x) mask)
|
// match: (VMOVDQU64Masked128 (VCVTQQ2PSX128 x) mask)
|
||||||
// result: (VCVTQQ2PSXMasked128 x mask)
|
// result: (VCVTQQ2PSXMasked128 x mask)
|
||||||
for {
|
for {
|
||||||
|
|
@ -38410,6 +38672,18 @@ func rewriteValueAMD64_OpAMD64VMOVDQU64Masked256(v *Value) bool {
|
||||||
v.AddArg4(x, y, z, mask)
|
v.AddArg4(x, y, z, mask)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
// match: (VMOVDQU64Masked256 (VCVTPD2PS256 x) mask)
|
||||||
|
// result: (VCVTPD2PSMasked256 x mask)
|
||||||
|
for {
|
||||||
|
if v_0.Op != OpAMD64VCVTPD2PS256 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
x := v_0.Args[0]
|
||||||
|
mask := v_1
|
||||||
|
v.reset(OpAMD64VCVTPD2PSMasked256)
|
||||||
|
v.AddArg2(x, mask)
|
||||||
|
return true
|
||||||
|
}
|
||||||
// match: (VMOVDQU64Masked256 (VCVTQQ2PS256 x) mask)
|
// match: (VMOVDQU64Masked256 (VCVTQQ2PS256 x) mask)
|
||||||
// result: (VCVTQQ2PSMasked256 x mask)
|
// result: (VCVTQQ2PSMasked256 x mask)
|
||||||
for {
|
for {
|
||||||
|
|
@ -44249,6 +44523,19 @@ func rewriteValueAMD64_OpAMD64VPBLENDMQMasked512(v *Value) bool {
|
||||||
v.AddArg4(dst, x, y, mask)
|
v.AddArg4(dst, x, y, mask)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
// match: (VPBLENDMQMasked512 dst (VCVTPD2PS256 x) mask)
|
||||||
|
// result: (VCVTPD2PSMasked256Merging dst x mask)
|
||||||
|
for {
|
||||||
|
dst := v_0
|
||||||
|
if v_1.Op != OpAMD64VCVTPD2PS256 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
x := v_1.Args[0]
|
||||||
|
mask := v_2
|
||||||
|
v.reset(OpAMD64VCVTPD2PSMasked256Merging)
|
||||||
|
v.AddArg3(dst, x, mask)
|
||||||
|
return true
|
||||||
|
}
|
||||||
// match: (VPBLENDMQMasked512 dst (VCVTQQ2PD512 x) mask)
|
// match: (VPBLENDMQMasked512 dst (VCVTQQ2PD512 x) mask)
|
||||||
// result: (VCVTQQ2PDMasked512Merging dst x mask)
|
// result: (VCVTQQ2PDMasked512Merging dst x mask)
|
||||||
for {
|
for {
|
||||||
|
|
@ -45583,6 +45870,44 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool {
|
||||||
v.AddArg3(dst, x, v0)
|
v.AddArg3(dst, x, v0)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
// match: (VPBLENDVB128 dst (VCVTPD2PSX128 x) mask)
|
||||||
|
// cond: v.Block.CPUfeatures.hasFeature(CPUavx512)
|
||||||
|
// result: (VCVTPD2PSXMasked128Merging dst x (VPMOVVec64x2ToM <types.TypeMask> mask))
|
||||||
|
for {
|
||||||
|
dst := v_0
|
||||||
|
if v_1.Op != OpAMD64VCVTPD2PSX128 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
x := v_1.Args[0]
|
||||||
|
mask := v_2
|
||||||
|
if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
v.reset(OpAMD64VCVTPD2PSXMasked128Merging)
|
||||||
|
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask)
|
||||||
|
v0.AddArg(mask)
|
||||||
|
v.AddArg3(dst, x, v0)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// match: (VPBLENDVB128 dst (VCVTPS2PD256 x) mask)
|
||||||
|
// cond: v.Block.CPUfeatures.hasFeature(CPUavx512)
|
||||||
|
// result: (VCVTPS2PDMasked256Merging dst x (VPMOVVec32x4ToM <types.TypeMask> mask))
|
||||||
|
for {
|
||||||
|
dst := v_0
|
||||||
|
if v_1.Op != OpAMD64VCVTPS2PD256 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
x := v_1.Args[0]
|
||||||
|
mask := v_2
|
||||||
|
if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
v.reset(OpAMD64VCVTPS2PDMasked256Merging)
|
||||||
|
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask)
|
||||||
|
v0.AddArg(mask)
|
||||||
|
v.AddArg3(dst, x, v0)
|
||||||
|
return true
|
||||||
|
}
|
||||||
// match: (VPBLENDVB128 dst (VCVTQQ2PD128 x) mask)
|
// match: (VPBLENDVB128 dst (VCVTQQ2PD128 x) mask)
|
||||||
// cond: v.Block.CPUfeatures.hasFeature(CPUavx512)
|
// cond: v.Block.CPUfeatures.hasFeature(CPUavx512)
|
||||||
// result: (VCVTQQ2PDMasked128Merging dst x (VPMOVVec64x2ToM <types.TypeMask> mask))
|
// result: (VCVTQQ2PDMasked128Merging dst x (VPMOVVec64x2ToM <types.TypeMask> mask))
|
||||||
|
|
@ -49194,6 +49519,44 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool {
|
||||||
v.AddArg3(dst, x, v0)
|
v.AddArg3(dst, x, v0)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
// match: (VPBLENDVB256 dst (VCVTPD2PSY128 x) mask)
|
||||||
|
// cond: v.Block.CPUfeatures.hasFeature(CPUavx512)
|
||||||
|
// result: (VCVTPD2PSYMasked128Merging dst x (VPMOVVec64x4ToM <types.TypeMask> mask))
|
||||||
|
for {
|
||||||
|
dst := v_0
|
||||||
|
if v_1.Op != OpAMD64VCVTPD2PSY128 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
x := v_1.Args[0]
|
||||||
|
mask := v_2
|
||||||
|
if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
v.reset(OpAMD64VCVTPD2PSYMasked128Merging)
|
||||||
|
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask)
|
||||||
|
v0.AddArg(mask)
|
||||||
|
v.AddArg3(dst, x, v0)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// match: (VPBLENDVB256 dst (VCVTPS2PD512 x) mask)
|
||||||
|
// cond: v.Block.CPUfeatures.hasFeature(CPUavx512)
|
||||||
|
// result: (VCVTPS2PDMasked512Merging dst x (VPMOVVec32x8ToM <types.TypeMask> mask))
|
||||||
|
for {
|
||||||
|
dst := v_0
|
||||||
|
if v_1.Op != OpAMD64VCVTPS2PD512 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
x := v_1.Args[0]
|
||||||
|
mask := v_2
|
||||||
|
if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
v.reset(OpAMD64VCVTPS2PDMasked512Merging)
|
||||||
|
v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask)
|
||||||
|
v0.AddArg(mask)
|
||||||
|
v.AddArg3(dst, x, v0)
|
||||||
|
return true
|
||||||
|
}
|
||||||
// match: (VPBLENDVB256 dst (VCVTQQ2PD256 x) mask)
|
// match: (VPBLENDVB256 dst (VCVTQQ2PD256 x) mask)
|
||||||
// cond: v.Block.CPUfeatures.hasFeature(CPUavx512)
|
// cond: v.Block.CPUfeatures.hasFeature(CPUavx512)
|
||||||
// result: (VCVTQQ2PDMasked256Merging dst x (VPMOVVec64x4ToM <types.TypeMask> mask))
|
// result: (VCVTQQ2PDMasked256Merging dst x (VPMOVVec64x4ToM <types.TypeMask> mask))
|
||||||
|
|
|
||||||
|
|
@ -261,6 +261,9 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies .
|
||||||
addF(simdPackage, "Uint8x16.ConcatShiftBytesRight", opLen2Imm8(ssa.OpConcatShiftBytesRightUint8x16, types.TypeVec128, 0), sys.AMD64)
|
addF(simdPackage, "Uint8x16.ConcatShiftBytesRight", opLen2Imm8(ssa.OpConcatShiftBytesRightUint8x16, types.TypeVec128, 0), sys.AMD64)
|
||||||
addF(simdPackage, "Uint8x32.ConcatShiftBytesRightGrouped", opLen2Imm8(ssa.OpConcatShiftBytesRightGroupedUint8x32, types.TypeVec256, 0), sys.AMD64)
|
addF(simdPackage, "Uint8x32.ConcatShiftBytesRightGrouped", opLen2Imm8(ssa.OpConcatShiftBytesRightGroupedUint8x32, types.TypeVec256, 0), sys.AMD64)
|
||||||
addF(simdPackage, "Uint8x64.ConcatShiftBytesRightGrouped", opLen2Imm8(ssa.OpConcatShiftBytesRightGroupedUint8x64, types.TypeVec512, 0), sys.AMD64)
|
addF(simdPackage, "Uint8x64.ConcatShiftBytesRightGrouped", opLen2Imm8(ssa.OpConcatShiftBytesRightGroupedUint8x64, types.TypeVec512, 0), sys.AMD64)
|
||||||
|
addF(simdPackage, "Float64x2.ConvertToFloat32", opLen1(ssa.OpConvertToFloat32Float64x2, types.TypeVec128), sys.AMD64)
|
||||||
|
addF(simdPackage, "Float64x4.ConvertToFloat32", opLen1(ssa.OpConvertToFloat32Float64x4, types.TypeVec128), sys.AMD64)
|
||||||
|
addF(simdPackage, "Float64x8.ConvertToFloat32", opLen1(ssa.OpConvertToFloat32Float64x8, types.TypeVec256), sys.AMD64)
|
||||||
addF(simdPackage, "Int32x4.ConvertToFloat32", opLen1(ssa.OpConvertToFloat32Int32x4, types.TypeVec128), sys.AMD64)
|
addF(simdPackage, "Int32x4.ConvertToFloat32", opLen1(ssa.OpConvertToFloat32Int32x4, types.TypeVec128), sys.AMD64)
|
||||||
addF(simdPackage, "Int32x8.ConvertToFloat32", opLen1(ssa.OpConvertToFloat32Int32x8, types.TypeVec256), sys.AMD64)
|
addF(simdPackage, "Int32x8.ConvertToFloat32", opLen1(ssa.OpConvertToFloat32Int32x8, types.TypeVec256), sys.AMD64)
|
||||||
addF(simdPackage, "Int32x16.ConvertToFloat32", opLen1(ssa.OpConvertToFloat32Int32x16, types.TypeVec512), sys.AMD64)
|
addF(simdPackage, "Int32x16.ConvertToFloat32", opLen1(ssa.OpConvertToFloat32Int32x16, types.TypeVec512), sys.AMD64)
|
||||||
|
|
@ -273,6 +276,8 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies .
|
||||||
addF(simdPackage, "Uint64x2.ConvertToFloat32", opLen1(ssa.OpConvertToFloat32Uint64x2, types.TypeVec128), sys.AMD64)
|
addF(simdPackage, "Uint64x2.ConvertToFloat32", opLen1(ssa.OpConvertToFloat32Uint64x2, types.TypeVec128), sys.AMD64)
|
||||||
addF(simdPackage, "Uint64x4.ConvertToFloat32", opLen1(ssa.OpConvertToFloat32Uint64x4, types.TypeVec128), sys.AMD64)
|
addF(simdPackage, "Uint64x4.ConvertToFloat32", opLen1(ssa.OpConvertToFloat32Uint64x4, types.TypeVec128), sys.AMD64)
|
||||||
addF(simdPackage, "Uint64x8.ConvertToFloat32", opLen1(ssa.OpConvertToFloat32Uint64x8, types.TypeVec256), sys.AMD64)
|
addF(simdPackage, "Uint64x8.ConvertToFloat32", opLen1(ssa.OpConvertToFloat32Uint64x8, types.TypeVec256), sys.AMD64)
|
||||||
|
addF(simdPackage, "Float32x4.ConvertToFloat64", opLen1(ssa.OpConvertToFloat64Float32x4, types.TypeVec256), sys.AMD64)
|
||||||
|
addF(simdPackage, "Float32x8.ConvertToFloat64", opLen1(ssa.OpConvertToFloat64Float32x8, types.TypeVec512), sys.AMD64)
|
||||||
addF(simdPackage, "Int32x4.ConvertToFloat64", opLen1(ssa.OpConvertToFloat64Int32x4, types.TypeVec256), sys.AMD64)
|
addF(simdPackage, "Int32x4.ConvertToFloat64", opLen1(ssa.OpConvertToFloat64Int32x4, types.TypeVec256), sys.AMD64)
|
||||||
addF(simdPackage, "Int32x8.ConvertToFloat64", opLen1(ssa.OpConvertToFloat64Int32x8, types.TypeVec512), sys.AMD64)
|
addF(simdPackage, "Int32x8.ConvertToFloat64", opLen1(ssa.OpConvertToFloat64Int32x8, types.TypeVec512), sys.AMD64)
|
||||||
addF(simdPackage, "Int64x2.ConvertToFloat64", opLen1(ssa.OpConvertToFloat64Int64x2, types.TypeVec128), sys.AMD64)
|
addF(simdPackage, "Int64x2.ConvertToFloat64", opLen1(ssa.OpConvertToFloat64Int64x2, types.TypeVec128), sys.AMD64)
|
||||||
|
|
|
||||||
|
|
@ -337,7 +337,9 @@ func (op *Operation) sortOperand() {
|
||||||
|
|
||||||
// adjustAsm adjusts the asm to make it align with Go's assembler.
|
// adjustAsm adjusts the asm to make it align with Go's assembler.
|
||||||
func (op *Operation) adjustAsm() {
|
func (op *Operation) adjustAsm() {
|
||||||
if op.Asm == "VCVTTPD2DQ" || op.Asm == "VCVTTPD2UDQ" || op.Asm == "VCVTQQ2PS" || op.Asm == "VCVTUQQ2PS" {
|
if op.Asm == "VCVTTPD2DQ" || op.Asm == "VCVTTPD2UDQ" ||
|
||||||
|
op.Asm == "VCVTQQ2PS" || op.Asm == "VCVTUQQ2PS" ||
|
||||||
|
op.Asm == "VCVTPD2PS" {
|
||||||
switch *op.In[0].Bits {
|
switch *op.In[0].Bits {
|
||||||
case 128:
|
case 128:
|
||||||
op.Asm += "X"
|
op.Asm += "X"
|
||||||
|
|
|
||||||
|
|
@ -32,12 +32,12 @@
|
||||||
// When a conversion is inexact, a truncated (round toward zero) value is returned.
|
// When a conversion is inexact, a truncated (round toward zero) value is returned.
|
||||||
// If a converted result cannot be represented in uint64, an implementation-defined
|
// If a converted result cannot be represented in uint64, an implementation-defined
|
||||||
// architecture-specific value is returned.
|
// architecture-specific value is returned.
|
||||||
- go: "ConvertToFloat32"
|
- go: "ConvertToFloat32" # Also float64 -> float32
|
||||||
commutative: false
|
commutative: false
|
||||||
regexpTag: "convert"
|
regexpTag: "convert"
|
||||||
documentation: !string |-
|
documentation: !string |-
|
||||||
// NAME converts element values to float32.
|
// NAME converts element values to float32.
|
||||||
- go: "ConvertToFloat64"
|
- go: "ConvertToFloat64" # Also float32 -> float64
|
||||||
commutative: false
|
commutative: false
|
||||||
regexpTag: "convert"
|
regexpTag: "convert"
|
||||||
documentation: !string |-
|
documentation: !string |-
|
||||||
|
|
|
||||||
|
|
@ -112,6 +112,28 @@
|
||||||
out:
|
out:
|
||||||
- base: float
|
- base: float
|
||||||
bits: 256|512
|
bits: 256|512
|
||||||
|
# float64 -> float32
|
||||||
|
- go: ConvertToFloat32
|
||||||
|
regexpTag: "convert"
|
||||||
|
asm: "VCVTPD2PS"
|
||||||
|
addDoc:
|
||||||
|
!string |-
|
||||||
|
// The result vector's elements are rounded to the nearest value.
|
||||||
|
in: &fp64
|
||||||
|
- base: float
|
||||||
|
elemBits: 64
|
||||||
|
out: &fp32
|
||||||
|
- base: float
|
||||||
|
elemBits: 32
|
||||||
|
# float32 -> float64
|
||||||
|
- go: ConvertToFloat64
|
||||||
|
regexpTag: "convert"
|
||||||
|
asm: "VCVTPS2PD"
|
||||||
|
in: *fp32
|
||||||
|
out:
|
||||||
|
- base: float
|
||||||
|
elemBits: 64
|
||||||
|
bits: 256|512
|
||||||
|
|
||||||
# Widening integer conversions.
|
# Widening integer conversions.
|
||||||
# uint8 -> uint16
|
# uint8 -> uint16
|
||||||
|
|
|
||||||
|
|
@ -1546,6 +1546,24 @@ func (x Uint8x64) ConcatShiftBytesRightGrouped(constant uint8, y Uint8x64) Uint8
|
||||||
|
|
||||||
/* ConvertToFloat32 */
|
/* ConvertToFloat32 */
|
||||||
|
|
||||||
|
// ConvertToFloat32 converts element values to float32.
|
||||||
|
// The result vector's elements are rounded to the nearest value.
|
||||||
|
//
|
||||||
|
// Asm: VCVTPD2PSX, CPU Feature: AVX
|
||||||
|
func (x Float64x2) ConvertToFloat32() Float32x4
|
||||||
|
|
||||||
|
// ConvertToFloat32 converts element values to float32.
|
||||||
|
// The result vector's elements are rounded to the nearest value.
|
||||||
|
//
|
||||||
|
// Asm: VCVTPD2PSY, CPU Feature: AVX
|
||||||
|
func (x Float64x4) ConvertToFloat32() Float32x4
|
||||||
|
|
||||||
|
// ConvertToFloat32 converts element values to float32.
|
||||||
|
// The result vector's elements are rounded to the nearest value.
|
||||||
|
//
|
||||||
|
// Asm: VCVTPD2PS, CPU Feature: AVX512
|
||||||
|
func (x Float64x8) ConvertToFloat32() Float32x8
|
||||||
|
|
||||||
// ConvertToFloat32 converts element values to float32.
|
// ConvertToFloat32 converts element values to float32.
|
||||||
//
|
//
|
||||||
// Asm: VCVTDQ2PS, CPU Feature: AVX
|
// Asm: VCVTDQ2PS, CPU Feature: AVX
|
||||||
|
|
@ -1608,6 +1626,16 @@ func (x Uint64x8) ConvertToFloat32() Float32x8
|
||||||
|
|
||||||
/* ConvertToFloat64 */
|
/* ConvertToFloat64 */
|
||||||
|
|
||||||
|
// ConvertToFloat64 converts element values to float64.
|
||||||
|
//
|
||||||
|
// Asm: VCVTPS2PD, CPU Feature: AVX
|
||||||
|
func (x Float32x4) ConvertToFloat64() Float64x4
|
||||||
|
|
||||||
|
// ConvertToFloat64 converts element values to float64.
|
||||||
|
//
|
||||||
|
// Asm: VCVTPS2PD, CPU Feature: AVX512
|
||||||
|
func (x Float32x8) ConvertToFloat64() Float64x8
|
||||||
|
|
||||||
// ConvertToFloat64 converts element values to float64.
|
// ConvertToFloat64 converts element values to float64.
|
||||||
//
|
//
|
||||||
// Asm: VCVTDQ2PD, CPU Feature: AVX
|
// Asm: VCVTDQ2PD, CPU Feature: AVX
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue