go/src/cmd/compile/internal/gc/asm_test.go

1703 lines
26 KiB
Go
Raw Normal View History

// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"bytes"
"fmt"
"internal/testenv"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strings"
"testing"
)
// TestAssembly checks to make sure the assembly generated for
// functions contains certain expected instructions.
func TestAssembly(t *testing.T) {
testenv.MustHaveGoBuild(t)
if runtime.GOOS == "windows" {
// TODO: remove if we can get "go tool compile -S" to work on windows.
t.Skipf("skipping test: recursive windows compile not working")
}
dir, err := ioutil.TempDir("", "TestAssembly")
if err != nil {
t.Fatalf("could not create directory: %v", err)
}
defer os.RemoveAll(dir)
nameRegexp := regexp.MustCompile("func \\w+")
t.Run("platform", func(t *testing.T) {
for _, ats := range allAsmTests {
ats := ats
t.Run(ats.os+"/"+ats.arch, func(tt *testing.T) {
tt.Parallel()
asm := ats.compileToAsm(tt, dir)
for _, at := range ats.tests {
funcName := nameRegexp.FindString(at.function)[len("func "):]
fa := funcAsm(tt, asm, funcName)
if fa != "" {
at.verifyAsm(tt, fa)
}
}
})
}
})
}
var nextTextRegexp = regexp.MustCompile(`\n\S`)
// funcAsm returns the assembly listing for the given function name.
func funcAsm(t *testing.T, asm string, funcName string) string {
if i := strings.Index(asm, fmt.Sprintf("TEXT\t\"\".%s(SB)", funcName)); i >= 0 {
asm = asm[i:]
} else {
t.Errorf("could not find assembly for function %v", funcName)
return ""
}
// Find the next line that doesn't begin with whitespace.
loc := nextTextRegexp.FindStringIndex(asm)
if loc != nil {
asm = asm[:loc[0]]
}
return asm
}
type asmTest struct {
// function to compile, must be named fX,
// where X is this test's index in asmTests.tests.
function string
// regexps that must match the generated assembly
regexps []string
}
func (at asmTest) verifyAsm(t *testing.T, fa string) {
for _, r := range at.regexps {
if b, err := regexp.MatchString(r, fa); !b || err != nil {
t.Errorf("expected:%s\ngo:%s\nasm:%s\n", r, at.function, fa)
}
}
}
type asmTests struct {
arch string
os string
imports []string
tests []*asmTest
}
func (ats *asmTests) generateCode() []byte {
var buf bytes.Buffer
fmt.Fprintln(&buf, "package main")
for _, s := range ats.imports {
fmt.Fprintf(&buf, "import %q\n", s)
}
for _, t := range ats.tests {
fmt.Fprintln(&buf, t.function)
}
return buf.Bytes()
}
// compile compiles the package pkg for architecture arch and
// returns the generated assembly. dir is a scratch directory.
func (ats *asmTests) compileToAsm(t *testing.T, dir string) string {
// create test directory
testDir := filepath.Join(dir, fmt.Sprintf("%s_%s", ats.arch, ats.os))
err := os.Mkdir(testDir, 0700)
if err != nil {
t.Fatalf("could not create directory: %v", err)
}
// Create source.
src := filepath.Join(testDir, "test.go")
err = ioutil.WriteFile(src, ats.generateCode(), 0600)
if err != nil {
t.Fatalf("error writing code: %v", err)
}
// First, install any dependencies we need. This builds the required export data
// for any packages that are imported.
for _, i := range ats.imports {
out := filepath.Join(testDir, i+".a")
if s := ats.runGo(t, "build", "-o", out, "-gcflags=-dolinkobj=false", i); s != "" {
t.Fatalf("Stdout = %s\nWant empty", s)
}
}
// Now, compile the individual file for which we want to see the generated assembly.
asm := ats.runGo(t, "tool", "compile", "-I", testDir, "-S", "-o", filepath.Join(testDir, "out.o"), src)
return asm
}
// runGo runs go command with the given args and returns stdout string.
// go is run with GOARCH and GOOS set as ats.arch and ats.os respectively
func (ats *asmTests) runGo(t *testing.T, args ...string) string {
var stdout, stderr bytes.Buffer
cmd := exec.Command(testenv.GoToolPath(t), args...)
cmd.Env = append(os.Environ(), "GOARCH="+ats.arch, "GOOS="+ats.os)
cmd.Stdout = &stdout
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
t.Fatalf("error running cmd: %v\nstdout:\n%sstderr:\n%s\n", err, stdout.String(), stderr.String())
}
if s := stderr.String(); s != "" {
t.Fatalf("Stderr = %s\nWant empty", s)
}
return stdout.String()
}
var allAsmTests = []*asmTests{
{
arch: "amd64",
os: "linux",
imports: []string{"encoding/binary", "math/bits", "unsafe"},
tests: linuxAMD64Tests,
},
{
arch: "386",
os: "linux",
imports: []string{"encoding/binary"},
tests: linux386Tests,
},
{
arch: "s390x",
os: "linux",
imports: []string{"encoding/binary", "math/bits"},
tests: linuxS390XTests,
},
{
arch: "arm",
os: "linux",
imports: []string{"math/bits"},
tests: linuxARMTests,
},
{
arch: "arm64",
os: "linux",
imports: []string{"math/bits"},
tests: linuxARM64Tests,
},
{
arch: "mips",
os: "linux",
imports: []string{"math/bits"},
tests: linuxMIPSTests,
},
{
arch: "ppc64le",
os: "linux",
tests: linuxPPC64LETests,
},
}
var linuxAMD64Tests = []*asmTest{
{
`
func f0(x int) int {
return x * 64
}
`,
[]string{"\tSHLQ\t\\$6,"},
},
{
`
func f1(x int) int {
return x * 96
}
`,
[]string{"\tSHLQ\t\\$5,", "\tLEAQ\t\\(.*\\)\\(.*\\*2\\),"},
},
// Load-combining tests.
{
`
func f2(b []byte) uint64 {
return binary.LittleEndian.Uint64(b)
}
`,
[]string{"\tMOVQ\t\\(.*\\),"},
},
{
`
func f3(b []byte, i int) uint64 {
return binary.LittleEndian.Uint64(b[i:])
}
`,
[]string{"\tMOVQ\t\\(.*\\)\\(.*\\*1\\),"},
},
{
`
func f4(b []byte) uint32 {
return binary.LittleEndian.Uint32(b)
}
`,
[]string{"\tMOVL\t\\(.*\\),"},
},
{
`
func f5(b []byte, i int) uint32 {
return binary.LittleEndian.Uint32(b[i:])
}
`,
[]string{"\tMOVL\t\\(.*\\)\\(.*\\*1\\),"},
},
{
`
func f6(b []byte) uint64 {
return binary.BigEndian.Uint64(b)
}
`,
[]string{"\tBSWAPQ\t"},
},
{
`
func f7(b []byte, i int) uint64 {
return binary.BigEndian.Uint64(b[i:])
}
`,
[]string{"\tBSWAPQ\t"},
},
{
`
func f8(b []byte, v uint64) {
binary.BigEndian.PutUint64(b, v)
}
`,
[]string{"\tBSWAPQ\t"},
},
{
`
func f9(b []byte, i int, v uint64) {
binary.BigEndian.PutUint64(b[i:], v)
}
`,
cmd/compile/internal/ssa: generate bswap/store for indexed bigendian byte stores too on AMD64 Commit 10f75748 (CL 32222) added rewrite rules to combine byte loads/stores + shifts into larger loads/stores + bswap. For loads both MOVBload and MOVBloadidx1 were handled but for store only MOVBstore was there without MOVBstoreidx added to rewrite pattern. Fix it. Here is how generated code changes for the following 2 functions (ommitting staying the same prologue/epilogue): func put32(b []byte, i int, v uint32) { binary.BigEndian.PutUint32(b[i:], v) } func put64(b []byte, i int, v uint64) { binary.BigEndian.PutUint64(b[i:], v) } "".put32 t=1 size=100 args=0x28 locals=0x0 // before 0x0032 00050 (x.go:5) MOVL CX, DX 0x0034 00052 (x.go:5) SHRL $24, CX 0x0037 00055 (x.go:5) MOVQ "".b+8(FP), BX 0x003c 00060 (x.go:5) MOVB CL, (BX)(AX*1) 0x003f 00063 (x.go:5) MOVL DX, CX 0x0041 00065 (x.go:5) SHRL $16, DX 0x0044 00068 (x.go:5) MOVB DL, 1(BX)(AX*1) 0x0048 00072 (x.go:5) MOVL CX, DX 0x004a 00074 (x.go:5) SHRL $8, CX 0x004d 00077 (x.go:5) MOVB CL, 2(BX)(AX*1) 0x0051 00081 (x.go:5) MOVB DL, 3(BX)(AX*1) // after 0x0032 00050 (x.go:5) BSWAPL CX 0x0034 00052 (x.go:5) MOVQ "".b+8(FP), DX 0x0039 00057 (x.go:5) MOVL CX, (DX)(AX*1) "".put64 t=1 size=155 args=0x28 locals=0x0 // before 0x0037 00055 (x.go:9) MOVQ CX, DX 0x003a 00058 (x.go:9) SHRQ $56, CX 0x003e 00062 (x.go:9) MOVQ "".b+8(FP), BX 0x0043 00067 (x.go:9) MOVB CL, (BX)(AX*1) 0x0046 00070 (x.go:9) MOVQ DX, CX 0x0049 00073 (x.go:9) SHRQ $48, DX 0x004d 00077 (x.go:9) MOVB DL, 1(BX)(AX*1) 0x0051 00081 (x.go:9) MOVQ CX, DX 0x0054 00084 (x.go:9) SHRQ $40, CX 0x0058 00088 (x.go:9) MOVB CL, 2(BX)(AX*1) 0x005c 00092 (x.go:9) MOVQ DX, CX 0x005f 00095 (x.go:9) SHRQ $32, DX 0x0063 00099 (x.go:9) MOVB DL, 3(BX)(AX*1) 0x0067 00103 (x.go:9) MOVQ CX, DX 0x006a 00106 (x.go:9) SHRQ $24, CX 0x006e 00110 (x.go:9) MOVB CL, 4(BX)(AX*1) 0x0072 00114 (x.go:9) MOVQ DX, CX 0x0075 00117 (x.go:9) SHRQ $16, DX 0x0079 00121 (x.go:9) MOVB DL, 5(BX)(AX*1) 0x007d 00125 (x.go:9) MOVQ CX, DX 0x0080 00128 (x.go:9) SHRQ $8, CX 0x0084 00132 (x.go:9) MOVB CL, 6(BX)(AX*1) 0x0088 00136 (x.go:9) MOVB DL, 7(BX)(AX*1) // after 0x0033 00051 (x.go:9) BSWAPQ CX 0x0036 00054 (x.go:9) MOVQ "".b+8(FP), DX 0x003b 00059 (x.go:9) MOVQ CX, (DX)(AX*1) Updates #17151 Change-Id: I3f4a7f28f210e62e153e60da5abd1d39508cc6c4 Reviewed-on: https://go-review.googlesource.com/34635 Run-TryBot: Ilya Tocar <ilya.tocar@intel.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Ilya Tocar <ilya.tocar@intel.com>
2016-12-01 22:13:16 +03:00
[]string{"\tBSWAPQ\t"},
},
{
`
func f10(b []byte) uint32 {
return binary.BigEndian.Uint32(b)
}
`,
[]string{"\tBSWAPL\t"},
},
{
`
func f11(b []byte, i int) uint32 {
return binary.BigEndian.Uint32(b[i:])
}
`,
[]string{"\tBSWAPL\t"},
},
{
`
func f12(b []byte, v uint32) {
binary.BigEndian.PutUint32(b, v)
}
`,
cmd/compile/internal/ssa: generate bswap/store for indexed bigendian byte stores too on AMD64 Commit 10f75748 (CL 32222) added rewrite rules to combine byte loads/stores + shifts into larger loads/stores + bswap. For loads both MOVBload and MOVBloadidx1 were handled but for store only MOVBstore was there without MOVBstoreidx added to rewrite pattern. Fix it. Here is how generated code changes for the following 2 functions (ommitting staying the same prologue/epilogue): func put32(b []byte, i int, v uint32) { binary.BigEndian.PutUint32(b[i:], v) } func put64(b []byte, i int, v uint64) { binary.BigEndian.PutUint64(b[i:], v) } "".put32 t=1 size=100 args=0x28 locals=0x0 // before 0x0032 00050 (x.go:5) MOVL CX, DX 0x0034 00052 (x.go:5) SHRL $24, CX 0x0037 00055 (x.go:5) MOVQ "".b+8(FP), BX 0x003c 00060 (x.go:5) MOVB CL, (BX)(AX*1) 0x003f 00063 (x.go:5) MOVL DX, CX 0x0041 00065 (x.go:5) SHRL $16, DX 0x0044 00068 (x.go:5) MOVB DL, 1(BX)(AX*1) 0x0048 00072 (x.go:5) MOVL CX, DX 0x004a 00074 (x.go:5) SHRL $8, CX 0x004d 00077 (x.go:5) MOVB CL, 2(BX)(AX*1) 0x0051 00081 (x.go:5) MOVB DL, 3(BX)(AX*1) // after 0x0032 00050 (x.go:5) BSWAPL CX 0x0034 00052 (x.go:5) MOVQ "".b+8(FP), DX 0x0039 00057 (x.go:5) MOVL CX, (DX)(AX*1) "".put64 t=1 size=155 args=0x28 locals=0x0 // before 0x0037 00055 (x.go:9) MOVQ CX, DX 0x003a 00058 (x.go:9) SHRQ $56, CX 0x003e 00062 (x.go:9) MOVQ "".b+8(FP), BX 0x0043 00067 (x.go:9) MOVB CL, (BX)(AX*1) 0x0046 00070 (x.go:9) MOVQ DX, CX 0x0049 00073 (x.go:9) SHRQ $48, DX 0x004d 00077 (x.go:9) MOVB DL, 1(BX)(AX*1) 0x0051 00081 (x.go:9) MOVQ CX, DX 0x0054 00084 (x.go:9) SHRQ $40, CX 0x0058 00088 (x.go:9) MOVB CL, 2(BX)(AX*1) 0x005c 00092 (x.go:9) MOVQ DX, CX 0x005f 00095 (x.go:9) SHRQ $32, DX 0x0063 00099 (x.go:9) MOVB DL, 3(BX)(AX*1) 0x0067 00103 (x.go:9) MOVQ CX, DX 0x006a 00106 (x.go:9) SHRQ $24, CX 0x006e 00110 (x.go:9) MOVB CL, 4(BX)(AX*1) 0x0072 00114 (x.go:9) MOVQ DX, CX 0x0075 00117 (x.go:9) SHRQ $16, DX 0x0079 00121 (x.go:9) MOVB DL, 5(BX)(AX*1) 0x007d 00125 (x.go:9) MOVQ CX, DX 0x0080 00128 (x.go:9) SHRQ $8, CX 0x0084 00132 (x.go:9) MOVB CL, 6(BX)(AX*1) 0x0088 00136 (x.go:9) MOVB DL, 7(BX)(AX*1) // after 0x0033 00051 (x.go:9) BSWAPQ CX 0x0036 00054 (x.go:9) MOVQ "".b+8(FP), DX 0x003b 00059 (x.go:9) MOVQ CX, (DX)(AX*1) Updates #17151 Change-Id: I3f4a7f28f210e62e153e60da5abd1d39508cc6c4 Reviewed-on: https://go-review.googlesource.com/34635 Run-TryBot: Ilya Tocar <ilya.tocar@intel.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Ilya Tocar <ilya.tocar@intel.com>
2016-12-01 22:13:16 +03:00
[]string{"\tBSWAPL\t"},
},
{
`
func f13(b []byte, i int, v uint32) {
binary.BigEndian.PutUint32(b[i:], v)
}
`,
[]string{"\tBSWAPL\t"},
},
{
`
func f14(b []byte) uint16 {
return binary.BigEndian.Uint16(b)
}
`,
cmd/compile/internal/ssa: combine 2 byte loads + shifts into word load + rolw 8 on AMD64 ... and same for stores. This does for binary.BigEndian.Uint16() what was already done for Uint32 and Uint64 with BSWAP in 10f75748 (CL 32222). Here is how generated code changes e.g. for the following function (omitting saying the same prologue/epilogue): func get16(b [2]byte) uint16 { return binary.BigEndian.Uint16(b[:]) } "".get16 t=1 size=21 args=0x10 locals=0x0 // before 0x0000 00000 (x.go:15) MOVBLZX "".b+9(FP), AX 0x0005 00005 (x.go:15) MOVBLZX "".b+8(FP), CX 0x000a 00010 (x.go:15) SHLL $8, CX 0x000d 00013 (x.go:15) ORL CX, AX // after 0x0000 00000 (x.go:15) MOVWLZX "".b+8(FP), AX 0x0005 00005 (x.go:15) ROLW $8, AX encoding/binary is speedup overall a bit: name old time/op new time/op delta ReadSlice1000Int32s-4 4.83µs ± 0% 4.83µs ± 0% ~ (p=0.206 n=4+5) ReadStruct-4 1.29µs ± 2% 1.28µs ± 1% -1.27% (p=0.032 n=4+5) ReadInts-4 384ns ± 1% 385ns ± 1% ~ (p=0.968 n=4+5) WriteInts-4 534ns ± 3% 526ns ± 0% -1.54% (p=0.048 n=4+5) WriteSlice1000Int32s-4 5.02µs ± 0% 5.11µs ± 3% ~ (p=0.175 n=4+5) PutUint16-4 0.59ns ± 0% 0.49ns ± 2% -16.95% (p=0.016 n=4+5) PutUint32-4 0.52ns ± 0% 0.52ns ± 0% ~ (all equal) PutUint64-4 0.53ns ± 0% 0.53ns ± 0% ~ (all equal) PutUvarint32-4 19.9ns ± 0% 19.9ns ± 1% ~ (p=0.556 n=4+5) PutUvarint64-4 54.5ns ± 1% 54.2ns ± 0% ~ (p=0.333 n=4+5) name old speed new speed delta ReadSlice1000Int32s-4 829MB/s ± 0% 828MB/s ± 0% ~ (p=0.190 n=4+5) ReadStruct-4 58.0MB/s ± 2% 58.7MB/s ± 1% +1.30% (p=0.032 n=4+5) ReadInts-4 78.0MB/s ± 1% 77.8MB/s ± 1% ~ (p=0.968 n=4+5) WriteInts-4 56.1MB/s ± 3% 57.0MB/s ± 0% ~ (p=0.063 n=4+5) WriteSlice1000Int32s-4 797MB/s ± 0% 783MB/s ± 3% ~ (p=0.190 n=4+5) PutUint16-4 3.37GB/s ± 0% 4.07GB/s ± 2% +20.83% (p=0.016 n=4+5) PutUint32-4 7.73GB/s ± 0% 7.72GB/s ± 0% ~ (p=0.556 n=4+5) PutUint64-4 15.1GB/s ± 0% 15.1GB/s ± 0% ~ (p=0.905 n=4+5) PutUvarint32-4 201MB/s ± 0% 201MB/s ± 0% ~ (p=0.905 n=4+5) PutUvarint64-4 147MB/s ± 1% 147MB/s ± 0% ~ (p=0.286 n=4+5) ( "a bit" only because most of the time is spent in reflection-like things there, not actual bytes decoding. Even for direct PutUint16 benchmark the looping adds overhead and lowers visible benefit. For code-generated encoders / decoders actual effect is more than 20% ) Adding Uint32 and Uint64 raw benchmarks too for completeness. NOTE I had to adjust load-combining rule for bswap case to match first 2 bytes loads as result of "2-bytes load+shift" -> "loadw + rorw 8" rewrite. Reason is: for loads+shift, even e.g. into uint16 var var b []byte var v uin16 v = uint16(b[1]) | uint16(b[0])<<8 the compiler eventually generates L(ong) shift - SHLLconst [8], probably because it is more straightforward / other reasons to work on the whole register. This way 2 bytes rewriting rule is using SHLLconst (not SHLWconst) in its pattern, and then it always gets matched first, even if 2-byte rule comes syntactically after 4-byte rule in AMD64.rules because 4-bytes rule seemingly needs more applyRewrite() cycles to trigger. If 2-bytes rule gets matched for inner half of var b []byte var v uin32 v = uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 and we keep 4-byte load rule unchanged, the result will be MOVW + RORW $8 and then series of byte loads and shifts - not one MOVL + BSWAPL. There is no such problem for stores: there compiler, since it probably knows store destination is 2 bytes wide, uses SHRWconst 8 (not SHRLconst 8) and thus 2-byte store rule is not a subset of rule for 4-byte stores. Fixes #17151 (int16 was last missing piece there) Change-Id: Idc03ba965bfce2b94fef456b02ff6742194748f6 Reviewed-on: https://go-review.googlesource.com/34636 Reviewed-by: Ilya Tocar <ilya.tocar@intel.com> Run-TryBot: Ilya Tocar <ilya.tocar@intel.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
2016-12-01 23:43:21 +03:00
[]string{"\tROLW\t\\$8,"},
},
{
`
func f15(b []byte, i int) uint16 {
return binary.BigEndian.Uint16(b[i:])
}
`,
cmd/compile/internal/ssa: combine 2 byte loads + shifts into word load + rolw 8 on AMD64 ... and same for stores. This does for binary.BigEndian.Uint16() what was already done for Uint32 and Uint64 with BSWAP in 10f75748 (CL 32222). Here is how generated code changes e.g. for the following function (omitting saying the same prologue/epilogue): func get16(b [2]byte) uint16 { return binary.BigEndian.Uint16(b[:]) } "".get16 t=1 size=21 args=0x10 locals=0x0 // before 0x0000 00000 (x.go:15) MOVBLZX "".b+9(FP), AX 0x0005 00005 (x.go:15) MOVBLZX "".b+8(FP), CX 0x000a 00010 (x.go:15) SHLL $8, CX 0x000d 00013 (x.go:15) ORL CX, AX // after 0x0000 00000 (x.go:15) MOVWLZX "".b+8(FP), AX 0x0005 00005 (x.go:15) ROLW $8, AX encoding/binary is speedup overall a bit: name old time/op new time/op delta ReadSlice1000Int32s-4 4.83µs ± 0% 4.83µs ± 0% ~ (p=0.206 n=4+5) ReadStruct-4 1.29µs ± 2% 1.28µs ± 1% -1.27% (p=0.032 n=4+5) ReadInts-4 384ns ± 1% 385ns ± 1% ~ (p=0.968 n=4+5) WriteInts-4 534ns ± 3% 526ns ± 0% -1.54% (p=0.048 n=4+5) WriteSlice1000Int32s-4 5.02µs ± 0% 5.11µs ± 3% ~ (p=0.175 n=4+5) PutUint16-4 0.59ns ± 0% 0.49ns ± 2% -16.95% (p=0.016 n=4+5) PutUint32-4 0.52ns ± 0% 0.52ns ± 0% ~ (all equal) PutUint64-4 0.53ns ± 0% 0.53ns ± 0% ~ (all equal) PutUvarint32-4 19.9ns ± 0% 19.9ns ± 1% ~ (p=0.556 n=4+5) PutUvarint64-4 54.5ns ± 1% 54.2ns ± 0% ~ (p=0.333 n=4+5) name old speed new speed delta ReadSlice1000Int32s-4 829MB/s ± 0% 828MB/s ± 0% ~ (p=0.190 n=4+5) ReadStruct-4 58.0MB/s ± 2% 58.7MB/s ± 1% +1.30% (p=0.032 n=4+5) ReadInts-4 78.0MB/s ± 1% 77.8MB/s ± 1% ~ (p=0.968 n=4+5) WriteInts-4 56.1MB/s ± 3% 57.0MB/s ± 0% ~ (p=0.063 n=4+5) WriteSlice1000Int32s-4 797MB/s ± 0% 783MB/s ± 3% ~ (p=0.190 n=4+5) PutUint16-4 3.37GB/s ± 0% 4.07GB/s ± 2% +20.83% (p=0.016 n=4+5) PutUint32-4 7.73GB/s ± 0% 7.72GB/s ± 0% ~ (p=0.556 n=4+5) PutUint64-4 15.1GB/s ± 0% 15.1GB/s ± 0% ~ (p=0.905 n=4+5) PutUvarint32-4 201MB/s ± 0% 201MB/s ± 0% ~ (p=0.905 n=4+5) PutUvarint64-4 147MB/s ± 1% 147MB/s ± 0% ~ (p=0.286 n=4+5) ( "a bit" only because most of the time is spent in reflection-like things there, not actual bytes decoding. Even for direct PutUint16 benchmark the looping adds overhead and lowers visible benefit. For code-generated encoders / decoders actual effect is more than 20% ) Adding Uint32 and Uint64 raw benchmarks too for completeness. NOTE I had to adjust load-combining rule for bswap case to match first 2 bytes loads as result of "2-bytes load+shift" -> "loadw + rorw 8" rewrite. Reason is: for loads+shift, even e.g. into uint16 var var b []byte var v uin16 v = uint16(b[1]) | uint16(b[0])<<8 the compiler eventually generates L(ong) shift - SHLLconst [8], probably because it is more straightforward / other reasons to work on the whole register. This way 2 bytes rewriting rule is using SHLLconst (not SHLWconst) in its pattern, and then it always gets matched first, even if 2-byte rule comes syntactically after 4-byte rule in AMD64.rules because 4-bytes rule seemingly needs more applyRewrite() cycles to trigger. If 2-bytes rule gets matched for inner half of var b []byte var v uin32 v = uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 and we keep 4-byte load rule unchanged, the result will be MOVW + RORW $8 and then series of byte loads and shifts - not one MOVL + BSWAPL. There is no such problem for stores: there compiler, since it probably knows store destination is 2 bytes wide, uses SHRWconst 8 (not SHRLconst 8) and thus 2-byte store rule is not a subset of rule for 4-byte stores. Fixes #17151 (int16 was last missing piece there) Change-Id: Idc03ba965bfce2b94fef456b02ff6742194748f6 Reviewed-on: https://go-review.googlesource.com/34636 Reviewed-by: Ilya Tocar <ilya.tocar@intel.com> Run-TryBot: Ilya Tocar <ilya.tocar@intel.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
2016-12-01 23:43:21 +03:00
[]string{"\tROLW\t\\$8,"},
},
{
`
func f16(b []byte, v uint16) {
binary.BigEndian.PutUint16(b, v)
}
`,
cmd/compile/internal/ssa: combine 2 byte loads + shifts into word load + rolw 8 on AMD64 ... and same for stores. This does for binary.BigEndian.Uint16() what was already done for Uint32 and Uint64 with BSWAP in 10f75748 (CL 32222). Here is how generated code changes e.g. for the following function (omitting saying the same prologue/epilogue): func get16(b [2]byte) uint16 { return binary.BigEndian.Uint16(b[:]) } "".get16 t=1 size=21 args=0x10 locals=0x0 // before 0x0000 00000 (x.go:15) MOVBLZX "".b+9(FP), AX 0x0005 00005 (x.go:15) MOVBLZX "".b+8(FP), CX 0x000a 00010 (x.go:15) SHLL $8, CX 0x000d 00013 (x.go:15) ORL CX, AX // after 0x0000 00000 (x.go:15) MOVWLZX "".b+8(FP), AX 0x0005 00005 (x.go:15) ROLW $8, AX encoding/binary is speedup overall a bit: name old time/op new time/op delta ReadSlice1000Int32s-4 4.83µs ± 0% 4.83µs ± 0% ~ (p=0.206 n=4+5) ReadStruct-4 1.29µs ± 2% 1.28µs ± 1% -1.27% (p=0.032 n=4+5) ReadInts-4 384ns ± 1% 385ns ± 1% ~ (p=0.968 n=4+5) WriteInts-4 534ns ± 3% 526ns ± 0% -1.54% (p=0.048 n=4+5) WriteSlice1000Int32s-4 5.02µs ± 0% 5.11µs ± 3% ~ (p=0.175 n=4+5) PutUint16-4 0.59ns ± 0% 0.49ns ± 2% -16.95% (p=0.016 n=4+5) PutUint32-4 0.52ns ± 0% 0.52ns ± 0% ~ (all equal) PutUint64-4 0.53ns ± 0% 0.53ns ± 0% ~ (all equal) PutUvarint32-4 19.9ns ± 0% 19.9ns ± 1% ~ (p=0.556 n=4+5) PutUvarint64-4 54.5ns ± 1% 54.2ns ± 0% ~ (p=0.333 n=4+5) name old speed new speed delta ReadSlice1000Int32s-4 829MB/s ± 0% 828MB/s ± 0% ~ (p=0.190 n=4+5) ReadStruct-4 58.0MB/s ± 2% 58.7MB/s ± 1% +1.30% (p=0.032 n=4+5) ReadInts-4 78.0MB/s ± 1% 77.8MB/s ± 1% ~ (p=0.968 n=4+5) WriteInts-4 56.1MB/s ± 3% 57.0MB/s ± 0% ~ (p=0.063 n=4+5) WriteSlice1000Int32s-4 797MB/s ± 0% 783MB/s ± 3% ~ (p=0.190 n=4+5) PutUint16-4 3.37GB/s ± 0% 4.07GB/s ± 2% +20.83% (p=0.016 n=4+5) PutUint32-4 7.73GB/s ± 0% 7.72GB/s ± 0% ~ (p=0.556 n=4+5) PutUint64-4 15.1GB/s ± 0% 15.1GB/s ± 0% ~ (p=0.905 n=4+5) PutUvarint32-4 201MB/s ± 0% 201MB/s ± 0% ~ (p=0.905 n=4+5) PutUvarint64-4 147MB/s ± 1% 147MB/s ± 0% ~ (p=0.286 n=4+5) ( "a bit" only because most of the time is spent in reflection-like things there, not actual bytes decoding. Even for direct PutUint16 benchmark the looping adds overhead and lowers visible benefit. For code-generated encoders / decoders actual effect is more than 20% ) Adding Uint32 and Uint64 raw benchmarks too for completeness. NOTE I had to adjust load-combining rule for bswap case to match first 2 bytes loads as result of "2-bytes load+shift" -> "loadw + rorw 8" rewrite. Reason is: for loads+shift, even e.g. into uint16 var var b []byte var v uin16 v = uint16(b[1]) | uint16(b[0])<<8 the compiler eventually generates L(ong) shift - SHLLconst [8], probably because it is more straightforward / other reasons to work on the whole register. This way 2 bytes rewriting rule is using SHLLconst (not SHLWconst) in its pattern, and then it always gets matched first, even if 2-byte rule comes syntactically after 4-byte rule in AMD64.rules because 4-bytes rule seemingly needs more applyRewrite() cycles to trigger. If 2-bytes rule gets matched for inner half of var b []byte var v uin32 v = uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 and we keep 4-byte load rule unchanged, the result will be MOVW + RORW $8 and then series of byte loads and shifts - not one MOVL + BSWAPL. There is no such problem for stores: there compiler, since it probably knows store destination is 2 bytes wide, uses SHRWconst 8 (not SHRLconst 8) and thus 2-byte store rule is not a subset of rule for 4-byte stores. Fixes #17151 (int16 was last missing piece there) Change-Id: Idc03ba965bfce2b94fef456b02ff6742194748f6 Reviewed-on: https://go-review.googlesource.com/34636 Reviewed-by: Ilya Tocar <ilya.tocar@intel.com> Run-TryBot: Ilya Tocar <ilya.tocar@intel.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
2016-12-01 23:43:21 +03:00
[]string{"\tROLW\t\\$8,"},
},
{
`
func f17(b []byte, i int, v uint16) {
binary.BigEndian.PutUint16(b[i:], v)
}
`,
cmd/compile/internal/ssa: combine 2 byte loads + shifts into word load + rolw 8 on AMD64 ... and same for stores. This does for binary.BigEndian.Uint16() what was already done for Uint32 and Uint64 with BSWAP in 10f75748 (CL 32222). Here is how generated code changes e.g. for the following function (omitting saying the same prologue/epilogue): func get16(b [2]byte) uint16 { return binary.BigEndian.Uint16(b[:]) } "".get16 t=1 size=21 args=0x10 locals=0x0 // before 0x0000 00000 (x.go:15) MOVBLZX "".b+9(FP), AX 0x0005 00005 (x.go:15) MOVBLZX "".b+8(FP), CX 0x000a 00010 (x.go:15) SHLL $8, CX 0x000d 00013 (x.go:15) ORL CX, AX // after 0x0000 00000 (x.go:15) MOVWLZX "".b+8(FP), AX 0x0005 00005 (x.go:15) ROLW $8, AX encoding/binary is speedup overall a bit: name old time/op new time/op delta ReadSlice1000Int32s-4 4.83µs ± 0% 4.83µs ± 0% ~ (p=0.206 n=4+5) ReadStruct-4 1.29µs ± 2% 1.28µs ± 1% -1.27% (p=0.032 n=4+5) ReadInts-4 384ns ± 1% 385ns ± 1% ~ (p=0.968 n=4+5) WriteInts-4 534ns ± 3% 526ns ± 0% -1.54% (p=0.048 n=4+5) WriteSlice1000Int32s-4 5.02µs ± 0% 5.11µs ± 3% ~ (p=0.175 n=4+5) PutUint16-4 0.59ns ± 0% 0.49ns ± 2% -16.95% (p=0.016 n=4+5) PutUint32-4 0.52ns ± 0% 0.52ns ± 0% ~ (all equal) PutUint64-4 0.53ns ± 0% 0.53ns ± 0% ~ (all equal) PutUvarint32-4 19.9ns ± 0% 19.9ns ± 1% ~ (p=0.556 n=4+5) PutUvarint64-4 54.5ns ± 1% 54.2ns ± 0% ~ (p=0.333 n=4+5) name old speed new speed delta ReadSlice1000Int32s-4 829MB/s ± 0% 828MB/s ± 0% ~ (p=0.190 n=4+5) ReadStruct-4 58.0MB/s ± 2% 58.7MB/s ± 1% +1.30% (p=0.032 n=4+5) ReadInts-4 78.0MB/s ± 1% 77.8MB/s ± 1% ~ (p=0.968 n=4+5) WriteInts-4 56.1MB/s ± 3% 57.0MB/s ± 0% ~ (p=0.063 n=4+5) WriteSlice1000Int32s-4 797MB/s ± 0% 783MB/s ± 3% ~ (p=0.190 n=4+5) PutUint16-4 3.37GB/s ± 0% 4.07GB/s ± 2% +20.83% (p=0.016 n=4+5) PutUint32-4 7.73GB/s ± 0% 7.72GB/s ± 0% ~ (p=0.556 n=4+5) PutUint64-4 15.1GB/s ± 0% 15.1GB/s ± 0% ~ (p=0.905 n=4+5) PutUvarint32-4 201MB/s ± 0% 201MB/s ± 0% ~ (p=0.905 n=4+5) PutUvarint64-4 147MB/s ± 1% 147MB/s ± 0% ~ (p=0.286 n=4+5) ( "a bit" only because most of the time is spent in reflection-like things there, not actual bytes decoding. Even for direct PutUint16 benchmark the looping adds overhead and lowers visible benefit. For code-generated encoders / decoders actual effect is more than 20% ) Adding Uint32 and Uint64 raw benchmarks too for completeness. NOTE I had to adjust load-combining rule for bswap case to match first 2 bytes loads as result of "2-bytes load+shift" -> "loadw + rorw 8" rewrite. Reason is: for loads+shift, even e.g. into uint16 var var b []byte var v uin16 v = uint16(b[1]) | uint16(b[0])<<8 the compiler eventually generates L(ong) shift - SHLLconst [8], probably because it is more straightforward / other reasons to work on the whole register. This way 2 bytes rewriting rule is using SHLLconst (not SHLWconst) in its pattern, and then it always gets matched first, even if 2-byte rule comes syntactically after 4-byte rule in AMD64.rules because 4-bytes rule seemingly needs more applyRewrite() cycles to trigger. If 2-bytes rule gets matched for inner half of var b []byte var v uin32 v = uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 and we keep 4-byte load rule unchanged, the result will be MOVW + RORW $8 and then series of byte loads and shifts - not one MOVL + BSWAPL. There is no such problem for stores: there compiler, since it probably knows store destination is 2 bytes wide, uses SHRWconst 8 (not SHRLconst 8) and thus 2-byte store rule is not a subset of rule for 4-byte stores. Fixes #17151 (int16 was last missing piece there) Change-Id: Idc03ba965bfce2b94fef456b02ff6742194748f6 Reviewed-on: https://go-review.googlesource.com/34636 Reviewed-by: Ilya Tocar <ilya.tocar@intel.com> Run-TryBot: Ilya Tocar <ilya.tocar@intel.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
2016-12-01 23:43:21 +03:00
[]string{"\tROLW\t\\$8,"},
},
// Structure zeroing. See issue #18370.
{
`
type T1 struct {
a, b, c int
}
func f18(t *T1) {
*t = T1{}
}
`,
[]string{"\tMOVQ\t\\$0, \\(.*\\)", "\tMOVQ\t\\$0, 8\\(.*\\)", "\tMOVQ\t\\$0, 16\\(.*\\)"},
},
// SSA-able composite literal initialization. Issue 18872.
{
`
type T18872 struct {
a, b, c, d int
}
func f18872(p *T18872) {
*p = T18872{1, 2, 3, 4}
}
`,
[]string{"\tMOVQ\t[$]1", "\tMOVQ\t[$]2", "\tMOVQ\t[$]3", "\tMOVQ\t[$]4"},
},
// Also test struct containing pointers (this was special because of write barriers).
{
`
type T2 struct {
a, b, c *int
}
func f19(t *T2) {
*t = T2{}
}
`,
[]string{"\tMOVQ\t\\$0, \\(.*\\)", "\tMOVQ\t\\$0, 8\\(.*\\)", "\tMOVQ\t\\$0, 16\\(.*\\)", "\tCALL\truntime\\.writebarrierptr\\(SB\\)"},
},
// Rotate tests
{
`
func f20(x uint64) uint64 {
return x<<7 | x>>57
}
`,
[]string{"\tROLQ\t[$]7,"},
},
{
`
func f21(x uint64) uint64 {
return x<<7 + x>>57
}
`,
[]string{"\tROLQ\t[$]7,"},
},
{
`
func f22(x uint64) uint64 {
return x<<7 ^ x>>57
}
`,
[]string{"\tROLQ\t[$]7,"},
},
{
`
func f23(x uint32) uint32 {
return x<<7 + x>>25
}
`,
[]string{"\tROLL\t[$]7,"},
},
{
`
func f24(x uint32) uint32 {
return x<<7 | x>>25
}
`,
[]string{"\tROLL\t[$]7,"},
},
{
`
func f25(x uint32) uint32 {
return x<<7 ^ x>>25
}
`,
[]string{"\tROLL\t[$]7,"},
},
{
`
func f26(x uint16) uint16 {
return x<<7 + x>>9
}
`,
[]string{"\tROLW\t[$]7,"},
},
{
`
func f27(x uint16) uint16 {
return x<<7 | x>>9
}
`,
[]string{"\tROLW\t[$]7,"},
},
{
`
func f28(x uint16) uint16 {
return x<<7 ^ x>>9
}
`,
[]string{"\tROLW\t[$]7,"},
},
{
`
func f29(x uint8) uint8 {
return x<<7 + x>>1
}
`,
[]string{"\tROLB\t[$]7,"},
},
{
`
func f30(x uint8) uint8 {
return x<<7 | x>>1
}
`,
[]string{"\tROLB\t[$]7,"},
},
{
`
func f31(x uint8) uint8 {
return x<<7 ^ x>>1
}
`,
[]string{"\tROLB\t[$]7,"},
},
// Rotate after inlining (see issue 18254).
{
`
func f32(x uint32) uint32 {
return g(x, 7)
}
func g(x uint32, k uint) uint32 {
return x<<k | x>>(32-k)
}
`,
[]string{"\tROLL\t[$]7,"},
},
{
`
func f33(m map[int]int) int {
return m[5]
}
`,
[]string{"\tMOVQ\t[$]5,"},
},
// Direct use of constants in fast map access calls. Issue 19015.
{
`
func f34(m map[int]int) bool {
_, ok := m[5]
return ok
}
`,
[]string{"\tMOVQ\t[$]5,"},
},
{
`
func f35(m map[string]int) int {
return m["abc"]
}
`,
[]string{"\"abc\""},
},
{
`
func f36(m map[string]int) bool {
_, ok := m["abc"]
return ok
}
`,
[]string{"\"abc\""},
},
// Bit test ops on amd64, issue 18943.
{
`
func f37(a, b uint64) int {
if a&(1<<(b&63)) != 0 {
return 1
}
return -1
}
`,
[]string{"\tBTQ\t"},
},
{
`
func f38(a, b uint64) bool {
return a&(1<<(b&63)) != 0
}
`,
[]string{"\tBTQ\t"},
},
{
`
func f39(a uint64) int {
if a&(1<<60) != 0 {
return 1
}
return -1
}
`,
[]string{"\tBTQ\t\\$60"},
},
{
`
func f40(a uint64) bool {
return a&(1<<60) != 0
}
`,
[]string{"\tBTQ\t\\$60"},
},
// Intrinsic tests for math/bits
{
`
func f41(a uint64) int {
return bits.TrailingZeros64(a)
}
`,
cmd/compile: use MOVL instead of MOVQ for small constants on amd64 The encoding of MOVL to a register is 2 bytes shorter than for MOVQ. The upper 32bit are automatically zeroed when MOVL to a register is used. Replaces 1657 MOVQ by MOVL in the go binary. Reduces go binary size by 4 kilobyte. name old time/op new time/op delta BinaryTree17 1.93s ± 0% 1.93s ± 0% -0.32% (p=0.000 n=9+9) Fannkuch11 2.66s ± 0% 2.48s ± 0% -6.60% (p=0.000 n=9+9) FmtFprintfEmpty 31.8ns ± 0% 31.6ns ± 0% -0.63% (p=0.000 n=10+10) FmtFprintfString 52.0ns ± 0% 51.9ns ± 0% -0.19% (p=0.000 n=10+10) FmtFprintfInt 55.6ns ± 0% 54.6ns ± 0% -1.80% (p=0.002 n=8+10) FmtFprintfIntInt 87.7ns ± 0% 84.8ns ± 0% -3.31% (p=0.000 n=9+9) FmtFprintfPrefixedInt 98.9ns ± 0% 102.0ns ± 0% +3.10% (p=0.000 n=10+10) FmtFprintfFloat 165ns ± 0% 164ns ± 0% -0.61% (p=0.000 n=10+10) FmtManyArgs 368ns ± 0% 361ns ± 0% -1.98% (p=0.000 n=8+10) GobDecode 4.53ms ± 0% 4.58ms ± 0% +1.08% (p=0.000 n=9+10) GobEncode 3.74ms ± 0% 3.73ms ± 0% -0.27% (p=0.000 n=10+10) Gzip 164ms ± 0% 163ms ± 0% -0.48% (p=0.000 n=10+10) Gunzip 26.7ms ± 0% 26.6ms ± 0% -0.13% (p=0.000 n=9+10) HTTPClientServer 30.4µs ± 1% 30.3µs ± 1% -0.41% (p=0.016 n=10+10) JSONEncode 10.9ms ± 0% 11.0ms ± 0% +0.70% (p=0.000 n=10+10) JSONDecode 36.8ms ± 0% 37.0ms ± 0% +0.59% (p=0.000 n=9+10) Mandelbrot200 3.20ms ± 0% 3.21ms ± 0% +0.44% (p=0.000 n=9+10) GoParse 2.35ms ± 0% 2.35ms ± 0% +0.26% (p=0.000 n=10+9) RegexpMatchEasy0_32 58.3ns ± 0% 58.4ns ± 0% +0.17% (p=0.000 n=10+10) RegexpMatchEasy0_1K 138ns ± 0% 142ns ± 0% +2.68% (p=0.000 n=10+10) RegexpMatchEasy1_32 55.1ns ± 0% 55.6ns ± 1% ~ (p=0.104 n=10+10) RegexpMatchEasy1_1K 242ns ± 0% 243ns ± 0% +0.41% (p=0.000 n=10+10) RegexpMatchMedium_32 87.4ns ± 0% 89.9ns ± 0% +2.86% (p=0.000 n=10+10) RegexpMatchMedium_1K 27.4µs ± 0% 27.4µs ± 0% +0.15% (p=0.000 n=10+10) RegexpMatchHard_32 1.30µs ± 0% 1.32µs ± 1% +1.91% (p=0.000 n=10+10) RegexpMatchHard_1K 39.0µs ± 0% 39.5µs ± 0% +1.38% (p=0.000 n=10+10) Revcomp 316ms ± 0% 319ms ± 0% +1.13% (p=0.000 n=9+8) Template 40.6ms ± 0% 40.6ms ± 0% ~ (p=0.123 n=10+10) TimeParse 224ns ± 0% 224ns ± 0% ~ (all equal) TimeFormat 230ns ± 0% 225ns ± 0% -2.17% (p=0.000 n=10+10) Change-Id: I32a099b65f9e6d4ad7288ed48546655c534757d8 Reviewed-on: https://go-review.googlesource.com/38630 Run-TryBot: Martin Möhrmann <moehrmann@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
2017-03-24 08:13:17 +01:00
[]string{"\tBSFQ\t", "\tMOVL\t\\$64,", "\tCMOVQEQ\t"},
},
{
`
func f42(a uint32) int {
return bits.TrailingZeros32(a)
}
`,
[]string{"\tBSFQ\t", "\tORQ\t[^$]", "\tMOVQ\t\\$4294967296,"},
},
{
`
func f43(a uint16) int {
return bits.TrailingZeros16(a)
}
`,
[]string{"\tBSFQ\t", "\tORQ\t\\$65536,"},
},
{
`
func f44(a uint8) int {
return bits.TrailingZeros8(a)
}
`,
[]string{"\tBSFQ\t", "\tORQ\t\\$256,"},
},
{
`
func f45(a uint64) uint64 {
return bits.ReverseBytes64(a)
}
`,
[]string{"\tBSWAPQ\t"},
},
{
`
func f46(a uint32) uint32 {
return bits.ReverseBytes32(a)
}
`,
[]string{"\tBSWAPL\t"},
},
{
`
func f47(a uint16) uint16 {
return bits.ReverseBytes16(a)
}
`,
[]string{"\tROLW\t\\$8,"},
},
{
`
func f48(a uint64) int {
return bits.Len64(a)
}
`,
[]string{"\tBSRQ\t"},
},
{
`
func f49(a uint32) int {
return bits.Len32(a)
}
`,
[]string{"\tBSRQ\t"},
},
{
`
func f50(a uint16) int {
return bits.Len16(a)
}
`,
[]string{"\tBSRQ\t"},
},
/* see ssa.go
{
`
func f51(a uint8) int {
return bits.Len8(a)
}
`,
[]string{"\tBSRQ\t"},
},
*/
{
`
func f52(a uint) int {
return bits.Len(a)
}
`,
[]string{"\tBSRQ\t"},
},
{
`
func f53(a uint64) int {
return bits.LeadingZeros64(a)
}
`,
[]string{"\tBSRQ\t"},
},
{
`
func f54(a uint32) int {
return bits.LeadingZeros32(a)
}
`,
[]string{"\tBSRQ\t"},
},
{
`
func f55(a uint16) int {
return bits.LeadingZeros16(a)
}
`,
[]string{"\tBSRQ\t"},
},
/* see ssa.go
{
`
func f56(a uint8) int {
return bits.LeadingZeros8(a)
}
`,
[]string{"\tBSRQ\t"},
},
*/
{
`
func f57(a uint) int {
return bits.LeadingZeros(a)
}
`,
[]string{"\tBSRQ\t"},
},
{
`
func pop1(x uint64) int {
return bits.OnesCount64(x)
}`,
[]string{"\tPOPCNTQ\t", "support_popcnt"},
},
{
`
func pop2(x uint32) int {
return bits.OnesCount32(x)
}`,
[]string{"\tPOPCNTL\t", "support_popcnt"},
},
{
`
func pop3(x uint16) int {
return bits.OnesCount16(x)
}`,
[]string{"\tPOPCNTL\t", "support_popcnt"},
},
{
`
func pop4(x uint) int {
return bits.OnesCount(x)
}`,
[]string{"\tPOPCNTQ\t", "support_popcnt"},
},
// see issue 19595.
// We want to merge load+op in f58, but not in f59.
{
`
func f58(p, q *int) {
x := *p
*q += x
}`,
[]string{"\tADDQ\t\\("},
},
{
`
func f59(p, q *int) {
x := *p
for i := 0; i < 10; i++ {
*q += x
}
}`,
[]string{"\tADDQ\t[A-Z]"},
},
// Floating-point strength reduction
{
`
func f60(f float64) float64 {
return f * 2.0
}`,
[]string{"\tADDSD\t"},
},
{
`
func f62(f float64) float64 {
return f / 16.0
}`,
[]string{"\tMULSD\t"},
},
{
`
func f63(f float64) float64 {
return f / 0.125
}`,
[]string{"\tMULSD\t"},
},
{
`
func f64(f float64) float64 {
return f / 0.5
}`,
[]string{"\tADDSD\t"},
},
// Check that compare to constant string uses 2/4/8 byte compares
{
`
func f65(a string) bool {
return a == "xx"
}`,
[]string{"\tCMPW\t[A-Z]"},
},
{
`
func f66(a string) bool {
return a == "xxxx"
}`,
[]string{"\tCMPL\t[A-Z]"},
},
{
`
func f67(a string) bool {
return a == "xxxxxxxx"
}`,
[]string{"\tCMPQ\t[A-Z]"},
},
// Non-constant rotate
{
`func rot64l(x uint64, y int) uint64 {
z := uint(y & 63)
return x << z | x >> (64-z)
}`,
[]string{"\tROLQ\t"},
},
{
`func rot64r(x uint64, y int) uint64 {
z := uint(y & 63)
return x >> z | x << (64-z)
}`,
[]string{"\tRORQ\t"},
},
{
`func rot32l(x uint32, y int) uint32 {
z := uint(y & 31)
return x << z | x >> (32-z)
}`,
[]string{"\tROLL\t"},
},
{
`func rot32r(x uint32, y int) uint32 {
z := uint(y & 31)
return x >> z | x << (32-z)
}`,
[]string{"\tRORL\t"},
},
{
`func rot16l(x uint16, y int) uint16 {
z := uint(y & 15)
return x << z | x >> (16-z)
}`,
[]string{"\tROLW\t"},
},
{
`func rot16r(x uint16, y int) uint16 {
z := uint(y & 15)
return x >> z | x << (16-z)
}`,
[]string{"\tRORW\t"},
},
{
`func rot8l(x uint8, y int) uint8 {
z := uint(y & 7)
return x << z | x >> (8-z)
}`,
[]string{"\tROLB\t"},
},
{
`func rot8r(x uint8, y int) uint8 {
z := uint(y & 7)
return x >> z | x << (8-z)
}`,
[]string{"\tRORB\t"},
},
// Check that array compare uses 2/4/8 byte compares
{
`
func f68(a,b [2]byte) bool {
return a == b
}`,
[]string{"\tCMPW\t[A-Z]"},
},
{
`
func f69(a,b [3]uint16) bool {
return a == b
}`,
[]string{"\tCMPL\t[A-Z]"},
},
{
`
func f70(a,b [15]byte) bool {
return a == b
}`,
[]string{"\tCMPQ\t[A-Z]"},
},
{
`
func f71(a,b unsafe.Pointer) bool { // This was a TODO in mapaccess1_faststr
return *((*[4]byte)(a)) != *((*[4]byte)(b))
}`,
[]string{"\tCMPL\t[A-Z]"},
},
{
// make sure assembly output has matching offset and base register.
`
func f72(a, b int) int {
var x [16]byte // use some frame
_ = x
return b
}
`,
[]string{"b\\+40\\(SP\\)"},
},
}
var linux386Tests = []*asmTest{
{
`
func f0(b []byte) uint32 {
return binary.LittleEndian.Uint32(b)
}
`,
[]string{"\tMOVL\t\\(.*\\),"},
},
{
`
func f1(b []byte, i int) uint32 {
return binary.LittleEndian.Uint32(b[i:])
}
`,
[]string{"\tMOVL\t\\(.*\\)\\(.*\\*1\\),"},
},
}
var linuxS390XTests = []*asmTest{
{
`
func f0(b []byte) uint32 {
return binary.LittleEndian.Uint32(b)
}
`,
[]string{"\tMOVWBR\t\\(.*\\),"},
},
{
`
func f1(b []byte, i int) uint32 {
return binary.LittleEndian.Uint32(b[i:])
}
`,
[]string{"\tMOVWBR\t\\(.*\\)\\(.*\\*1\\),"},
},
{
`
func f2(b []byte) uint64 {
return binary.LittleEndian.Uint64(b)
}
`,
[]string{"\tMOVDBR\t\\(.*\\),"},
},
{
`
func f3(b []byte, i int) uint64 {
return binary.LittleEndian.Uint64(b[i:])
}
`,
[]string{"\tMOVDBR\t\\(.*\\)\\(.*\\*1\\),"},
},
{
`
func f4(b []byte) uint32 {
return binary.BigEndian.Uint32(b)
}
`,
[]string{"\tMOVWZ\t\\(.*\\),"},
},
{
`
func f5(b []byte, i int) uint32 {
return binary.BigEndian.Uint32(b[i:])
}
`,
[]string{"\tMOVWZ\t\\(.*\\)\\(.*\\*1\\),"},
},
{
`
func f6(b []byte) uint64 {
return binary.BigEndian.Uint64(b)
}
`,
[]string{"\tMOVD\t\\(.*\\),"},
},
{
`
func f7(b []byte, i int) uint64 {
return binary.BigEndian.Uint64(b[i:])
}
`,
[]string{"\tMOVD\t\\(.*\\)\\(.*\\*1\\),"},
},
{
`
func f8(x uint64) uint64 {
return x<<7 + x>>57
}
`,
[]string{"\tRLLG\t[$]7,"},
},
{
`
func f9(x uint64) uint64 {
return x<<7 | x>>57
}
`,
[]string{"\tRLLG\t[$]7,"},
},
{
`
func f10(x uint64) uint64 {
return x<<7 ^ x>>57
}
`,
[]string{"\tRLLG\t[$]7,"},
},
{
`
func f11(x uint32) uint32 {
return x<<7 + x>>25
}
`,
[]string{"\tRLL\t[$]7,"},
},
{
`
func f12(x uint32) uint32 {
return x<<7 | x>>25
}
`,
[]string{"\tRLL\t[$]7,"},
},
{
`
func f13(x uint32) uint32 {
return x<<7 ^ x>>25
}
`,
[]string{"\tRLL\t[$]7,"},
},
// Fused multiply-add/sub instructions.
{
`
func f14(x, y, z float64) float64 {
return x * y + z
}
`,
[]string{"\tFMADD\t"},
},
{
`
func f15(x, y, z float64) float64 {
return x * y - z
}
`,
[]string{"\tFMSUB\t"},
},
{
`
func f16(x, y, z float32) float32 {
return x * y + z
}
`,
[]string{"\tFMADDS\t"},
},
{
`
func f17(x, y, z float32) float32 {
return x * y - z
}
`,
[]string{"\tFMSUBS\t"},
},
// Intrinsic tests for math/bits
{
`
func f18(a uint64) int {
return bits.TrailingZeros64(a)
}
`,
[]string{"\tFLOGR\t"},
},
{
`
func f19(a uint32) int {
return bits.TrailingZeros32(a)
}
`,
[]string{"\tFLOGR\t", "\tMOVWZ\t"},
},
{
`
func f20(a uint16) int {
return bits.TrailingZeros16(a)
}
`,
[]string{"\tFLOGR\t", "\tOR\t\\$65536,"},
},
{
`
func f21(a uint8) int {
return bits.TrailingZeros8(a)
}
`,
[]string{"\tFLOGR\t", "\tOR\t\\$256,"},
},
// Intrinsic tests for math/bits
{
`
func f22(a uint64) uint64 {
return bits.ReverseBytes64(a)
}
`,
[]string{"\tMOVDBR\t"},
},
{
`
func f23(a uint32) uint32 {
return bits.ReverseBytes32(a)
}
`,
[]string{"\tMOVWBR\t"},
},
{
`
func f24(a uint64) int {
return bits.Len64(a)
}
`,
[]string{"\tFLOGR\t"},
},
{
`
func f25(a uint32) int {
return bits.Len32(a)
}
`,
[]string{"\tFLOGR\t"},
},
{
`
func f26(a uint16) int {
return bits.Len16(a)
}
`,
[]string{"\tFLOGR\t"},
},
{
`
func f27(a uint8) int {
return bits.Len8(a)
}
`,
[]string{"\tFLOGR\t"},
},
{
`
func f28(a uint) int {
return bits.Len(a)
}
`,
[]string{"\tFLOGR\t"},
},
{
`
func f29(a uint64) int {
return bits.LeadingZeros64(a)
}
`,
[]string{"\tFLOGR\t"},
},
{
`
func f30(a uint32) int {
return bits.LeadingZeros32(a)
}
`,
[]string{"\tFLOGR\t"},
},
{
`
func f31(a uint16) int {
return bits.LeadingZeros16(a)
}
`,
[]string{"\tFLOGR\t"},
},
{
`
func f32(a uint8) int {
return bits.LeadingZeros8(a)
}
`,
[]string{"\tFLOGR\t"},
},
{
`
func f33(a uint) int {
return bits.LeadingZeros(a)
}
`,
[]string{"\tFLOGR\t"},
},
}
var linuxARMTests = []*asmTest{
{
`
func f0(x uint32) uint32 {
return x<<7 + x>>25
}
`,
[]string{"\tMOVW\tR[0-9]+@>25,"},
},
{
`
func f1(x uint32) uint32 {
return x<<7 | x>>25
}
`,
[]string{"\tMOVW\tR[0-9]+@>25,"},
},
{
`
func f2(x uint32) uint32 {
return x<<7 ^ x>>25
}
`,
[]string{"\tMOVW\tR[0-9]+@>25,"},
},
{
`
func f3(a uint64) int {
return bits.Len64(a)
}
`,
[]string{"\tCLZ\t"},
},
{
`
func f4(a uint32) int {
return bits.Len32(a)
}
`,
[]string{"\tCLZ\t"},
},
{
`
func f5(a uint16) int {
return bits.Len16(a)
}
`,
[]string{"\tCLZ\t"},
},
{
`
func f6(a uint8) int {
return bits.Len8(a)
}
`,
[]string{"\tCLZ\t"},
},
{
`
func f7(a uint) int {
return bits.Len(a)
}
`,
[]string{"\tCLZ\t"},
},
{
`
func f8(a uint64) int {
return bits.LeadingZeros64(a)
}
`,
[]string{"\tCLZ\t"},
},
{
`
func f9(a uint32) int {
return bits.LeadingZeros32(a)
}
`,
[]string{"\tCLZ\t"},
},
{
`
func f10(a uint16) int {
return bits.LeadingZeros16(a)
}
`,
[]string{"\tCLZ\t"},
},
{
`
func f11(a uint8) int {
return bits.LeadingZeros8(a)
}
`,
[]string{"\tCLZ\t"},
},
{
`
func f12(a uint) int {
return bits.LeadingZeros(a)
}
`,
[]string{"\tCLZ\t"},
},
{
// make sure assembly output has matching offset and base register.
`
func f13(a, b int) int {
var x [16]byte // use some frame
_ = x
return b
}
`,
[]string{"b\\+4\\(FP\\)"},
},
}
var linuxARM64Tests = []*asmTest{
{
`
func f0(x uint64) uint64 {
return x<<7 + x>>57
}
`,
[]string{"\tROR\t[$]57,"},
},
{
`
func f1(x uint64) uint64 {
return x<<7 | x>>57
}
`,
[]string{"\tROR\t[$]57,"},
},
{
`
func f2(x uint64) uint64 {
return x<<7 ^ x>>57
}
`,
[]string{"\tROR\t[$]57,"},
},
{
`
func f3(x uint32) uint32 {
return x<<7 + x>>25
}
`,
[]string{"\tRORW\t[$]25,"},
},
{
`
func f4(x uint32) uint32 {
return x<<7 | x>>25
}
`,
[]string{"\tRORW\t[$]25,"},
},
{
`
func f5(x uint32) uint32 {
return x<<7 ^ x>>25
}
`,
[]string{"\tRORW\t[$]25,"},
},
{
`
func f22(a uint64) uint64 {
return bits.ReverseBytes64(a)
}
`,
[]string{"\tREV\t"},
},
{
`
func f23(a uint32) uint32 {
return bits.ReverseBytes32(a)
}
`,
[]string{"\tREVW\t"},
},
{
`
func f24(a uint64) int {
return bits.Len64(a)
}
`,
[]string{"\tCLZ\t"},
},
{
`
func f25(a uint32) int {
return bits.Len32(a)
}
`,
[]string{"\tCLZ\t"},
},
{
`
func f26(a uint16) int {
return bits.Len16(a)
}
`,
[]string{"\tCLZ\t"},
},
{
`
func f27(a uint8) int {
return bits.Len8(a)
}
`,
[]string{"\tCLZ\t"},
},
{
`
func f28(a uint) int {
return bits.Len(a)
}
`,
[]string{"\tCLZ\t"},
},
{
`
func f29(a uint64) int {
return bits.LeadingZeros64(a)
}
`,
[]string{"\tCLZ\t"},
},
{
`
func f30(a uint32) int {
return bits.LeadingZeros32(a)
}
`,
[]string{"\tCLZ\t"},
},
{
`
func f31(a uint16) int {
return bits.LeadingZeros16(a)
}
`,
[]string{"\tCLZ\t"},
},
{
`
func f32(a uint8) int {
return bits.LeadingZeros8(a)
}
`,
[]string{"\tCLZ\t"},
},
{
`
func f33(a uint) int {
return bits.LeadingZeros(a)
}
`,
[]string{"\tCLZ\t"},
},
{
`
func f34(a uint64) uint64 {
return a & ((1<<63)-1)
}
`,
[]string{"\tAND\t"},
},
{
`
func f35(a uint64) uint64 {
return a & (1<<63)
}
`,
[]string{"\tAND\t"},
},
cmd/internal/obj/arm64, cmd/compile: improve offset folding on ARM64 ARM64 assembler backend only accepts loads and stores with small or aligned offset. The compiler therefore can only fold small or aligned offsets into loads and stores. For locals and args, their offsets to SP are not known until very late, and the compiler makes conservative decision not folding some of them. However, in most cases, the offset is indeed small or aligned, and can be folded into load and store (but actually not). This CL adds support of loads and stores with large and unaligned offsets. When the offset doesn't fit into the instruction, it uses two instructions and (for very large offset) the constant pool. This way, the compiler doesn't need to be conservative, and can simply fold the offset. To make it work, the assembler's optab matching rules need to be changed. Before, MOVD accepts C_UAUTO32K which matches multiple of 8 between 0 and 32K, and also C_UAUTO16K, which may not be multiple of 8 and does not fit into MOVD instruction. The assembler errors in the latter case. This change makes it only matches multiple of 8 (or offsets within ±256, which also fits in instruction), and uses the large-or-unaligned-offset rule for things doesn't fit (without error). Other sized move rules are changed similarly. Class C_UAUTO64K and C_UOREG64K are removed, as they are never used. In shared library, load/store of global is rewritten to using GOT and temp register, which conflicts with the use of temp register for assembling large offset. So the folding is disabled for globals in shared library mode. Reduce cmd/go binary size by 2%. name old time/op new time/op delta BinaryTree17-8 8.67s ± 0% 8.61s ± 0% -0.60% (p=0.000 n=9+10) Fannkuch11-8 6.24s ± 0% 6.19s ± 0% -0.83% (p=0.000 n=10+9) FmtFprintfEmpty-8 116ns ± 0% 116ns ± 0% ~ (all equal) FmtFprintfString-8 196ns ± 0% 192ns ± 0% -1.89% (p=0.000 n=10+10) FmtFprintfInt-8 199ns ± 0% 198ns ± 0% -0.35% (p=0.001 n=9+10) FmtFprintfIntInt-8 294ns ± 0% 293ns ± 0% -0.34% (p=0.000 n=8+8) FmtFprintfPrefixedInt-8 318ns ± 1% 318ns ± 1% ~ (p=1.000 n=10+10) FmtFprintfFloat-8 537ns ± 0% 531ns ± 0% -1.17% (p=0.000 n=9+10) FmtManyArgs-8 1.19µs ± 1% 1.18µs ± 1% -1.41% (p=0.001 n=10+10) GobDecode-8 17.2ms ± 1% 17.3ms ± 2% ~ (p=0.165 n=10+10) GobEncode-8 14.7ms ± 1% 14.7ms ± 2% ~ (p=0.631 n=10+10) Gzip-8 837ms ± 0% 836ms ± 0% -0.14% (p=0.006 n=9+10) Gunzip-8 141ms ± 0% 139ms ± 0% -1.24% (p=0.000 n=9+10) HTTPClientServer-8 256µs ± 1% 253µs ± 1% -1.35% (p=0.000 n=10+10) JSONEncode-8 40.1ms ± 1% 41.3ms ± 1% +3.06% (p=0.000 n=10+9) JSONDecode-8 157ms ± 1% 156ms ± 1% -0.83% (p=0.001 n=9+8) Mandelbrot200-8 8.94ms ± 0% 8.94ms ± 0% +0.02% (p=0.000 n=9+9) GoParse-8 8.69ms ± 0% 8.54ms ± 1% -1.69% (p=0.000 n=8+10) RegexpMatchEasy0_32-8 227ns ± 1% 228ns ± 1% +0.48% (p=0.016 n=10+9) RegexpMatchEasy0_1K-8 1.92µs ± 0% 1.63µs ± 0% -15.08% (p=0.000 n=10+9) RegexpMatchEasy1_32-8 256ns ± 0% 251ns ± 0% -2.19% (p=0.000 n=10+9) RegexpMatchEasy1_1K-8 2.38µs ± 0% 2.09µs ± 0% -12.49% (p=0.000 n=10+9) RegexpMatchMedium_32-8 352ns ± 0% 354ns ± 0% +0.39% (p=0.002 n=10+9) RegexpMatchMedium_1K-8 106µs ± 0% 106µs ± 0% -0.05% (p=0.005 n=10+9) RegexpMatchHard_32-8 5.92µs ± 0% 5.89µs ± 0% -0.40% (p=0.000 n=9+8) RegexpMatchHard_1K-8 180µs ± 0% 179µs ± 0% -0.14% (p=0.000 n=10+9) Revcomp-8 1.20s ± 0% 1.13s ± 0% -6.29% (p=0.000 n=9+8) Template-8 159ms ± 1% 154ms ± 1% -3.14% (p=0.000 n=9+10) TimeParse-8 800ns ± 3% 769ns ± 1% -3.91% (p=0.000 n=10+10) TimeFormat-8 826ns ± 2% 817ns ± 2% -1.04% (p=0.050 n=10+10) [Geo mean] 145µs 143µs -1.79% Change-Id: I5fc42087cee9b54ea414f8ef6d6d020b80eb5985 Reviewed-on: https://go-review.googlesource.com/42172 Run-TryBot: Cherry Zhang <cherryyz@google.com> Reviewed-by: David Chase <drchase@google.com>
2017-04-28 18:02:00 -04:00
{
// make sure offsets are folded into load and store.
`
func f36(_, a [20]byte) (b [20]byte) {
b = a
return
}
`,
[]string{"\tMOVD\t\"\"\\.a\\+[0-9]+\\(FP\\), R[0-9]+", "\tMOVD\tR[0-9]+, \"\"\\.b\\+[0-9]+\\(FP\\)"},
cmd/internal/obj/arm64, cmd/compile: improve offset folding on ARM64 ARM64 assembler backend only accepts loads and stores with small or aligned offset. The compiler therefore can only fold small or aligned offsets into loads and stores. For locals and args, their offsets to SP are not known until very late, and the compiler makes conservative decision not folding some of them. However, in most cases, the offset is indeed small or aligned, and can be folded into load and store (but actually not). This CL adds support of loads and stores with large and unaligned offsets. When the offset doesn't fit into the instruction, it uses two instructions and (for very large offset) the constant pool. This way, the compiler doesn't need to be conservative, and can simply fold the offset. To make it work, the assembler's optab matching rules need to be changed. Before, MOVD accepts C_UAUTO32K which matches multiple of 8 between 0 and 32K, and also C_UAUTO16K, which may not be multiple of 8 and does not fit into MOVD instruction. The assembler errors in the latter case. This change makes it only matches multiple of 8 (or offsets within ±256, which also fits in instruction), and uses the large-or-unaligned-offset rule for things doesn't fit (without error). Other sized move rules are changed similarly. Class C_UAUTO64K and C_UOREG64K are removed, as they are never used. In shared library, load/store of global is rewritten to using GOT and temp register, which conflicts with the use of temp register for assembling large offset. So the folding is disabled for globals in shared library mode. Reduce cmd/go binary size by 2%. name old time/op new time/op delta BinaryTree17-8 8.67s ± 0% 8.61s ± 0% -0.60% (p=0.000 n=9+10) Fannkuch11-8 6.24s ± 0% 6.19s ± 0% -0.83% (p=0.000 n=10+9) FmtFprintfEmpty-8 116ns ± 0% 116ns ± 0% ~ (all equal) FmtFprintfString-8 196ns ± 0% 192ns ± 0% -1.89% (p=0.000 n=10+10) FmtFprintfInt-8 199ns ± 0% 198ns ± 0% -0.35% (p=0.001 n=9+10) FmtFprintfIntInt-8 294ns ± 0% 293ns ± 0% -0.34% (p=0.000 n=8+8) FmtFprintfPrefixedInt-8 318ns ± 1% 318ns ± 1% ~ (p=1.000 n=10+10) FmtFprintfFloat-8 537ns ± 0% 531ns ± 0% -1.17% (p=0.000 n=9+10) FmtManyArgs-8 1.19µs ± 1% 1.18µs ± 1% -1.41% (p=0.001 n=10+10) GobDecode-8 17.2ms ± 1% 17.3ms ± 2% ~ (p=0.165 n=10+10) GobEncode-8 14.7ms ± 1% 14.7ms ± 2% ~ (p=0.631 n=10+10) Gzip-8 837ms ± 0% 836ms ± 0% -0.14% (p=0.006 n=9+10) Gunzip-8 141ms ± 0% 139ms ± 0% -1.24% (p=0.000 n=9+10) HTTPClientServer-8 256µs ± 1% 253µs ± 1% -1.35% (p=0.000 n=10+10) JSONEncode-8 40.1ms ± 1% 41.3ms ± 1% +3.06% (p=0.000 n=10+9) JSONDecode-8 157ms ± 1% 156ms ± 1% -0.83% (p=0.001 n=9+8) Mandelbrot200-8 8.94ms ± 0% 8.94ms ± 0% +0.02% (p=0.000 n=9+9) GoParse-8 8.69ms ± 0% 8.54ms ± 1% -1.69% (p=0.000 n=8+10) RegexpMatchEasy0_32-8 227ns ± 1% 228ns ± 1% +0.48% (p=0.016 n=10+9) RegexpMatchEasy0_1K-8 1.92µs ± 0% 1.63µs ± 0% -15.08% (p=0.000 n=10+9) RegexpMatchEasy1_32-8 256ns ± 0% 251ns ± 0% -2.19% (p=0.000 n=10+9) RegexpMatchEasy1_1K-8 2.38µs ± 0% 2.09µs ± 0% -12.49% (p=0.000 n=10+9) RegexpMatchMedium_32-8 352ns ± 0% 354ns ± 0% +0.39% (p=0.002 n=10+9) RegexpMatchMedium_1K-8 106µs ± 0% 106µs ± 0% -0.05% (p=0.005 n=10+9) RegexpMatchHard_32-8 5.92µs ± 0% 5.89µs ± 0% -0.40% (p=0.000 n=9+8) RegexpMatchHard_1K-8 180µs ± 0% 179µs ± 0% -0.14% (p=0.000 n=10+9) Revcomp-8 1.20s ± 0% 1.13s ± 0% -6.29% (p=0.000 n=9+8) Template-8 159ms ± 1% 154ms ± 1% -3.14% (p=0.000 n=9+10) TimeParse-8 800ns ± 3% 769ns ± 1% -3.91% (p=0.000 n=10+10) TimeFormat-8 826ns ± 2% 817ns ± 2% -1.04% (p=0.050 n=10+10) [Geo mean] 145µs 143µs -1.79% Change-Id: I5fc42087cee9b54ea414f8ef6d6d020b80eb5985 Reviewed-on: https://go-review.googlesource.com/42172 Run-TryBot: Cherry Zhang <cherryyz@google.com> Reviewed-by: David Chase <drchase@google.com>
2017-04-28 18:02:00 -04:00
},
}
var linuxMIPSTests = []*asmTest{
{
`
func f0(a uint64) int {
return bits.Len64(a)
}
`,
[]string{"\tCLZ\t"},
},
{
`
func f1(a uint32) int {
return bits.Len32(a)
}
`,
[]string{"\tCLZ\t"},
},
{
`
func f2(a uint16) int {
return bits.Len16(a)
}
`,
[]string{"\tCLZ\t"},
},
{
`
func f3(a uint8) int {
return bits.Len8(a)
}
`,
[]string{"\tCLZ\t"},
},
{
`
func f4(a uint) int {
return bits.Len(a)
}
`,
[]string{"\tCLZ\t"},
},
{
`
func f5(a uint64) int {
return bits.LeadingZeros64(a)
}
`,
[]string{"\tCLZ\t"},
},
{
`
func f6(a uint32) int {
return bits.LeadingZeros32(a)
}
`,
[]string{"\tCLZ\t"},
},
{
`
func f7(a uint16) int {
return bits.LeadingZeros16(a)
}
`,
[]string{"\tCLZ\t"},
},
{
`
func f8(a uint8) int {
return bits.LeadingZeros8(a)
}
`,
[]string{"\tCLZ\t"},
},
{
`
func f9(a uint) int {
return bits.LeadingZeros(a)
}
`,
[]string{"\tCLZ\t"},
},
}
var linuxPPC64LETests = []*asmTest{
// Fused multiply-add/sub instructions.
{
`
func f0(x, y, z float64) float64 {
return x * y + z
}
`,
[]string{"\tFMADD\t"},
},
{
`
func f1(x, y, z float64) float64 {
return x * y - z
}
`,
[]string{"\tFMSUB\t"},
},
{
`
func f2(x, y, z float32) float32 {
return x * y + z
}
`,
[]string{"\tFMADDS\t"},
},
{
`
func f3(x, y, z float32) float32 {
return x * y - z
}
`,
[]string{"\tFMSUBS\t"},
},
{
`
func f4(x uint32) uint32 {
return x<<7 | x>>25
}
`,
[]string{"\tROTLW\t"},
},
{
`
func f5(x uint32) uint32 {
return x<<7 + x>>25
}
`,
[]string{"\tROTLW\t"},
},
{
`
func f6(x uint32) uint32 {
return x<<7 ^ x>>25
}
`,
[]string{"\tROTLW\t"},
},
{
`
func f7(x uint64) uint64 {
return x<<7 | x>>57
}
`,
[]string{"\tROTL\t"},
},
{
`
func f8(x uint64) uint64 {
return x<<7 + x>>57
}
`,
[]string{"\tROTL\t"},
},
{
`
func f9(x uint64) uint64 {
return x<<7 ^ x>>57
}
`,
[]string{"\tROTL\t"},
},
}
// TestLineNumber checks to make sure the generated assembly has line numbers
// see issue #16214
func TestLineNumber(t *testing.T) {
testenv.MustHaveGoBuild(t)
dir, err := ioutil.TempDir("", "TestLineNumber")
if err != nil {
t.Fatalf("could not create directory: %v", err)
}
defer os.RemoveAll(dir)
src := filepath.Join(dir, "x.go")
err = ioutil.WriteFile(src, []byte(issue16214src), 0644)
if err != nil {
t.Fatalf("could not write file: %v", err)
}
cmd := exec.Command(testenv.GoToolPath(t), "tool", "compile", "-S", "-o", filepath.Join(dir, "out.o"), src)
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("fail to run go tool compile: %v", err)
}
if strings.Contains(string(out), "unknown line number") {
t.Errorf("line number missing in assembly:\n%s", out)
}
}
var issue16214src = `
package main
func Mod32(x uint32) uint32 {
return x % 3 // frontend rewrites it as HMUL with 2863311531, the LITERAL node has unknown Pos
}
`