test/codegen: simplify asmcheck pattern matching

Separate patterns in asmcheck by spaces instead of commas.
Many patterns end in comma (like "MOV [$]123,") so separating
patterns by comma is not great; they're already quoted, so spaces are fine.

Also replace all tabs in the assembly lines with spaces before matching.
Finally, replace \$ or \\$ with [$] as the matching idiom.
The effect of all these is to make the patterns look like:

  	   // amd64:"BSFQ" "ORQ [$]256"

instead of the old:

  	   // amd64:"BSFQ","ORQ\t\\$256"

Update all tests as well.

Change-Id: Ia39febe5d7f67ba115846422789e11b185d5c807
Reviewed-on: https://go-review.googlesource.com/c/go/+/716060
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Alan Donovan <adonovan@google.com>
Reviewed-by: Jorropo <jorropo.pgm@gmail.com>
This commit is contained in:
Russ Cox 2025-10-26 22:51:14 -04:00
parent 32ee3f3f73
commit 915c1839fe
48 changed files with 2035 additions and 2032 deletions

View file

@ -12,37 +12,37 @@ var sink64 [8]float64
func approx(x float64) {
// amd64/v2:-".*x86HasSSE41" amd64/v3:-".*x86HasSSE41"
// amd64:"ROUNDSD\t[$]2"
// s390x:"FIDBR\t[$]6"
// amd64:"ROUNDSD [$]2"
// s390x:"FIDBR [$]6"
// arm64:"FRINTPD"
// ppc64x:"FRIP"
// wasm:"F64Ceil"
sink64[0] = math.Ceil(x)
// amd64/v2:-".*x86HasSSE41" amd64/v3:-".*x86HasSSE41"
// amd64:"ROUNDSD\t[$]1"
// s390x:"FIDBR\t[$]7"
// amd64:"ROUNDSD [$]1"
// s390x:"FIDBR [$]7"
// arm64:"FRINTMD"
// ppc64x:"FRIM"
// wasm:"F64Floor"
sink64[1] = math.Floor(x)
// s390x:"FIDBR\t[$]1"
// s390x:"FIDBR [$]1"
// arm64:"FRINTAD"
// ppc64x:"FRIN"
sink64[2] = math.Round(x)
// amd64/v2:-".*x86HasSSE41" amd64/v3:-".*x86HasSSE41"
// amd64:"ROUNDSD\t[$]3"
// s390x:"FIDBR\t[$]5"
// amd64:"ROUNDSD [$]3"
// s390x:"FIDBR [$]5"
// arm64:"FRINTZD"
// ppc64x:"FRIZ"
// wasm:"F64Trunc"
sink64[3] = math.Trunc(x)
// amd64/v2:-".*x86HasSSE41" amd64/v3:-".*x86HasSSE41"
// amd64:"ROUNDSD\t[$]0"
// s390x:"FIDBR\t[$]4"
// amd64:"ROUNDSD [$]0"
// s390x:"FIDBR [$]4"
// arm64:"FRINTND"
// wasm:"F64Nearest"
sink64[4] = math.RoundToEven(x)
@ -78,43 +78,43 @@ func sqrt32(x float32) float32 {
// Check that it's using integer registers
func abs(x, y float64) {
// amd64:"BTRQ\t[$]63"
// arm64:"FABSD\t"
// loong64:"ABSD\t"
// s390x:"LPDFR\t",-"MOVD\t" (no integer load/store)
// ppc64x:"FABS\t"
// riscv64:"FABSD\t"
// amd64:"BTRQ [$]63"
// arm64:"FABSD "
// loong64:"ABSD "
// s390x:"LPDFR " -"MOVD " (no integer load/store)
// ppc64x:"FABS "
// riscv64:"FABSD "
// wasm:"F64Abs"
// arm/6:"ABSD\t"
// mips64/hardfloat:"ABSD\t"
// mips/hardfloat:"ABSD\t"
// arm/6:"ABSD "
// mips64/hardfloat:"ABSD "
// mips/hardfloat:"ABSD "
sink64[0] = math.Abs(x)
// amd64:"BTRQ\t[$]63","PXOR" (TODO: this should be BTSQ)
// s390x:"LNDFR\t",-"MOVD\t" (no integer load/store)
// ppc64x:"FNABS\t"
// amd64:"BTRQ [$]63" "PXOR" (TODO: this should be BTSQ)
// s390x:"LNDFR " -"MOVD " (no integer load/store)
// ppc64x:"FNABS "
sink64[1] = -math.Abs(y)
}
// Check that it's using integer registers
func abs32(x float32) float32 {
// s390x:"LPDFR",-"LDEBR",-"LEDBR" (no float64 conversion)
// s390x:"LPDFR" -"LDEBR" -"LEDBR" (no float64 conversion)
return float32(math.Abs(float64(x)))
}
// Check that it's using integer registers
func copysign(a, b, c float64) {
// amd64:"BTRQ\t[$]63","ANDQ","ORQ"
// amd64:"BTRQ [$]63" "ANDQ" "ORQ"
// loong64:"FCOPYSGD"
// s390x:"CPSDR",-"MOVD" (no integer load/store)
// s390x:"CPSDR" -"MOVD" (no integer load/store)
// ppc64x:"FCPSGN"
// riscv64:"FSGNJD"
// wasm:"F64Copysign"
sink64[0] = math.Copysign(a, b)
// amd64:"BTSQ\t[$]63"
// amd64:"BTSQ [$]63"
// loong64:"FCOPYSGD"
// s390x:"LNDFR\t",-"MOVD\t" (no integer load/store)
// s390x:"LNDFR " -"MOVD " (no integer load/store)
// ppc64x:"FCPSGN"
// riscv64:"FSGNJD"
// arm64:"ORR", -"AND"
@ -122,12 +122,12 @@ func copysign(a, b, c float64) {
// Like math.Copysign(c, -1), but with integer operations. Useful
// for platforms that have a copysign opcode to see if it's detected.
// s390x:"LNDFR\t",-"MOVD\t" (no integer load/store)
// s390x:"LNDFR " -"MOVD " (no integer load/store)
sink64[2] = math.Float64frombits(math.Float64bits(a) | 1<<63)
// amd64:"ANDQ","ORQ"
// amd64:"ANDQ" "ORQ"
// loong64:"FCOPYSGD"
// s390x:"CPSDR\t",-"MOVD\t" (no integer load/store)
// s390x:"CPSDR " -"MOVD " (no integer load/store)
// ppc64x:"FCPSGN"
// riscv64:"FSGNJD"
sink64[3] = math.Copysign(-1, c)
@ -151,12 +151,12 @@ func fms(x, y, z float64) float64 {
}
func fnms(x, y, z float64) float64 {
// riscv64:"FNMSUBD",-"FNMADDD"
// riscv64:"FNMSUBD" -"FNMADDD"
return math.FMA(-x, y, z)
}
func fnma(x, y, z float64) float64 {
// riscv64:"FNMADDD",-"FNMSUBD"
// riscv64:"FNMADDD" -"FNMSUBD"
return math.FMA(x, -y, -z)
}
@ -221,39 +221,39 @@ func isNotNegInfCmp(x float64) bool {
}
func fromFloat64(f64 float64) uint64 {
// amd64:"MOVQ\tX.*, [^X].*"
// arm64:"FMOVD\tF.*, R.*"
// loong64:"MOVV\tF.*, R.*"
// amd64:"MOVQ X.*, [^X].*"
// arm64:"FMOVD F.*, R.*"
// loong64:"MOVV F.*, R.*"
// ppc64x:"MFVSRD"
// mips64/hardfloat:"MOVV\tF.*, R.*"
// mips64/hardfloat:"MOVV F.*, R.*"
// riscv64:"FMVXD"
return math.Float64bits(f64+1) + 1
}
func fromFloat32(f32 float32) uint32 {
// amd64:"MOVL\tX.*, [^X].*"
// arm64:"FMOVS\tF.*, R.*"
// loong64:"MOVW\tF.*, R.*"
// mips64/hardfloat:"MOVW\tF.*, R.*"
// amd64:"MOVL X.*, [^X].*"
// arm64:"FMOVS F.*, R.*"
// loong64:"MOVW F.*, R.*"
// mips64/hardfloat:"MOVW F.*, R.*"
// riscv64:"FMVXW"
return math.Float32bits(f32+1) + 1
}
func toFloat64(u64 uint64) float64 {
// amd64:"MOVQ\t[^X].*, X.*"
// arm64:"FMOVD\tR.*, F.*"
// loong64:"MOVV\tR.*, F.*"
// amd64:"MOVQ [^X].*, X.*"
// arm64:"FMOVD R.*, F.*"
// loong64:"MOVV R.*, F.*"
// ppc64x:"MTVSRD"
// mips64/hardfloat:"MOVV\tR.*, F.*"
// mips64/hardfloat:"MOVV R.*, F.*"
// riscv64:"FMVDX"
return math.Float64frombits(u64+1) + 1
}
func toFloat32(u32 uint32) float32 {
// amd64:"MOVL\t[^X].*, X.*"
// arm64:"FMOVS\tR.*, F.*"
// loong64:"MOVW\tR.*, F.*"
// mips64/hardfloat:"MOVW\tR.*, F.*"
// amd64:"MOVL [^X].*, X.*"
// arm64:"FMOVS R.*, F.*"
// loong64:"MOVW R.*, F.*"
// mips64/hardfloat:"MOVW R.*, F.*"
// riscv64:"FMVWX"
return math.Float32frombits(u32+1) + 1
}
@ -262,14 +262,14 @@ func toFloat32(u32 uint32) float32 {
// are evaluated at compile-time
func constantCheck64() bool {
// amd64:"(MOVB\t[$]0)|(XORL\t[A-Z][A-Z0-9]+, [A-Z][A-Z0-9]+)",-"FCMP",-"MOVB\t[$]1"
// s390x:"MOV(B|BZ|D)\t[$]0,",-"FCMPU",-"MOV(B|BZ|D)\t[$]1,"
// amd64:"(MOVB [$]0)|(XORL [A-Z][A-Z0-9]+, [A-Z][A-Z0-9]+)" -"FCMP" -"MOVB [$]1"
// s390x:"MOV(B|BZ|D) [$]0," -"FCMPU" -"MOV(B|BZ|D) [$]1,"
return 0.5 == float64(uint32(1)) || 1.5 > float64(uint64(1<<63))
}
func constantCheck32() bool {
// amd64:"MOV(B|L)\t[$]1",-"FCMP",-"MOV(B|L)\t[$]0"
// s390x:"MOV(B|BZ|D)\t[$]1,",-"FCMPU",-"MOV(B|BZ|D)\t[$]0,"
// amd64:"MOV(B|L) [$]1" -"FCMP" -"MOV(B|L) [$]0"
// s390x:"MOV(B|BZ|D) [$]1," -"FCMPU" -"MOV(B|BZ|D) [$]0,"
return float32(0.5) <= float32(int64(1)) && float32(1.5) >= float32(int32(-1<<31))
}
@ -277,12 +277,12 @@ func constantCheck32() bool {
// at compile-time
func constantConvert32(x float32) float32 {
// amd64:"MOVSS\t[$]f32.3f800000\\(SB\\)"
// s390x:"FMOVS\t[$]f32.3f800000\\(SB\\)"
// ppc64x/power8:"FMOVS\t[$]f32.3f800000\\(SB\\)"
// ppc64x/power9:"FMOVS\t[$]f32.3f800000\\(SB\\)"
// ppc64x/power10:"XXSPLTIDP\t[$]1065353216, VS0"
// arm64:"FMOVS\t[$]\\(1.0\\)"
// amd64:"MOVSS [$]f32.3f800000\\(SB\\)"
// s390x:"FMOVS [$]f32.3f800000\\(SB\\)"
// ppc64x/power8:"FMOVS [$]f32.3f800000\\(SB\\)"
// ppc64x/power9:"FMOVS [$]f32.3f800000\\(SB\\)"
// ppc64x/power10:"XXSPLTIDP [$]1065353216, VS0"
// arm64:"FMOVS [$]\\(1.0\\)"
if x > math.Float32frombits(0x3f800000) {
return -x
}
@ -343,7 +343,7 @@ func outOfBoundsConv(i32 *[2]int32, u32 *[2]uint32, i64 *[2]int64, u64 *[2]uint6
u32[0] = uint32(two41())
// on arm64, this uses an explicit <0 comparison, so it constant folds.
// on amd64, this uses an explicit <0 comparison, so it constant folds.
// amd64: "MOVL\t[$]0,"
// amd64: "MOVL [$]0,"
u32[1] = uint32(minus1())
// arm64: "FCVTZSD"
// amd64: "CVTTSD2SQ"
@ -356,7 +356,7 @@ func outOfBoundsConv(i32 *[2]int32, u32 *[2]uint32, i64 *[2]int64, u64 *[2]uint6
u64[0] = uint64(two81())
// arm64: "FCVTZUD"
// on amd64, this uses an explicit <0 comparison, so it constant folds.
// amd64: "MOVQ\t[$]0,"
// amd64: "MOVQ [$]0,"
u64[1] = uint64(minus1())
}