go/src/cmd/compile/internal/gc/asm_test.go

774 lines
14 KiB
Go
Raw Normal View History

// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"bytes"
"fmt"
"internal/testenv"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strings"
"testing"
)
// TestAssembly checks to make sure the assembly generated for
// functions contains certain expected instructions.
func TestAssembly(t *testing.T) {
testenv.MustHaveGoBuild(t)
if runtime.GOOS == "windows" {
// TODO: remove if we can get "go tool compile -S" to work on windows.
t.Skipf("skipping test: recursive windows compile not working")
}
dir, err := ioutil.TempDir("", "TestAssembly")
if err != nil {
t.Fatalf("could not create directory: %v", err)
}
defer os.RemoveAll(dir)
t.Run("platform", func(t *testing.T) {
for _, ats := range allAsmTests {
ats := ats
t.Run(ats.os+"/"+ats.arch, func(tt *testing.T) {
tt.Parallel()
asm := ats.compileToAsm(tt, dir)
for i, at := range ats.tests {
fa := funcAsm(asm, i)
at.verifyAsm(tt, fa)
}
})
}
})
}
// funcAsm returns the assembly listing for f{funcIndex}
func funcAsm(asm string, funcIndex int) string {
if i := strings.Index(asm, fmt.Sprintf("TEXT\t\"\".f%d(SB)", funcIndex)); i >= 0 {
asm = asm[i:]
}
if i := strings.Index(asm, fmt.Sprintf("TEXT\t\"\".f%d(SB)", funcIndex+1)); i >= 0 {
asm = asm[:i+1]
}
return asm
}
type asmTest struct {
// function to compile, must be named fX,
// where X is this test's index in asmTests.tests.
function string
// regexps that must match the generated assembly
regexps []string
}
func (at asmTest) verifyAsm(t *testing.T, fa string) {
for _, r := range at.regexps {
if b, err := regexp.MatchString(r, fa); !b || err != nil {
t.Errorf("expected:%s\ngo:%s\nasm:%s\n", r, at.function, fa)
}
}
}
type asmTests struct {
arch string
os string
imports []string
tests []*asmTest
}
func (ats *asmTests) generateCode() []byte {
var buf bytes.Buffer
fmt.Fprintln(&buf, "package main")
for _, s := range ats.imports {
fmt.Fprintf(&buf, "import %q\n", s)
}
for _, t := range ats.tests {
fmt.Fprintln(&buf, t.function)
}
return buf.Bytes()
}
// compile compiles the package pkg for architecture arch and
// returns the generated assembly. dir is a scratch directory.
func (ats *asmTests) compileToAsm(t *testing.T, dir string) string {
// create test directory
testDir := filepath.Join(dir, fmt.Sprintf("%s_%s", ats.arch, ats.os))
err := os.Mkdir(testDir, 0700)
if err != nil {
t.Fatalf("could not create directory: %v", err)
}
// Create source.
src := filepath.Join(testDir, "test.go")
err = ioutil.WriteFile(src, ats.generateCode(), 0600)
if err != nil {
t.Fatalf("error writing code: %v", err)
}
// First, install any dependencies we need. This builds the required export data
// for any packages that are imported.
for _, i := range ats.imports {
out := filepath.Join(testDir, i+".a")
if s := ats.runGo(t, "build", "-o", out, "-gcflags=-dolinkobj", i); s != "" {
t.Fatalf("Stdout = %s\nWant empty", s)
}
}
// Now, compile the individual file for which we want to see the generated assembly.
asm := ats.runGo(t, "tool", "compile", "-I", testDir, "-S", "-o", filepath.Join(testDir, "out.o"), src)
// Get rid of code for "".init. Also gets rid of type algorithms & other junk.
if i := strings.Index(asm, "\n\"\".init "); i >= 0 {
asm = asm[:i+1]
}
return asm
}
// runGo runs go command with the given args and returns stdout string.
// go is run with GOARCH and GOOS set as ats.arch and ats.os respectively
func (ats *asmTests) runGo(t *testing.T, args ...string) string {
var stdout, stderr bytes.Buffer
cmd := exec.Command(testenv.GoToolPath(t), args...)
cmd.Env = mergeEnvLists([]string{"GOARCH=" + ats.arch, "GOOS=" + ats.os}, os.Environ())
cmd.Stdout = &stdout
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
t.Fatalf("error running cmd: %v", err)
}
if s := stderr.String(); s != "" {
t.Fatalf("Stderr = %s\nWant empty", s)
}
return stdout.String()
}
var allAsmTests = []*asmTests{
{
arch: "amd64",
os: "linux",
imports: []string{"encoding/binary"},
tests: linuxAMD64Tests,
},
{
arch: "386",
os: "linux",
imports: []string{"encoding/binary"},
tests: linux386Tests,
},
{
arch: "s390x",
os: "linux",
imports: []string{"encoding/binary"},
tests: linuxS390XTests,
},
{
arch: "arm",
os: "linux",
tests: linuxARMTests,
},
{
arch: "arm64",
os: "linux",
tests: linuxARM64Tests,
},
}
var linuxAMD64Tests = []*asmTest{
{
`
func f0(x int) int {
return x * 64
}
`,
[]string{"\tSHLQ\t\\$6,"},
},
{
`
func f1(x int) int {
return x * 96
}
`,
[]string{"\tSHLQ\t\\$5,", "\tLEAQ\t\\(.*\\)\\(.*\\*2\\),"},
},
// Load-combining tests.
{
`
func f2(b []byte) uint64 {
return binary.LittleEndian.Uint64(b)
}
`,
[]string{"\tMOVQ\t\\(.*\\),"},
},
{
`
func f3(b []byte, i int) uint64 {
return binary.LittleEndian.Uint64(b[i:])
}
`,
[]string{"\tMOVQ\t\\(.*\\)\\(.*\\*1\\),"},
},
{
`
func f4(b []byte) uint32 {
return binary.LittleEndian.Uint32(b)
}
`,
[]string{"\tMOVL\t\\(.*\\),"},
},
{
`
func f5(b []byte, i int) uint32 {
return binary.LittleEndian.Uint32(b[i:])
}
`,
[]string{"\tMOVL\t\\(.*\\)\\(.*\\*1\\),"},
},
{
`
func f6(b []byte) uint64 {
return binary.BigEndian.Uint64(b)
}
`,
[]string{"\tBSWAPQ\t"},
},
{
`
func f7(b []byte, i int) uint64 {
return binary.BigEndian.Uint64(b[i:])
}
`,
[]string{"\tBSWAPQ\t"},
},
{
`
func f8(b []byte, v uint64) {
binary.BigEndian.PutUint64(b, v)
}
`,
[]string{"\tBSWAPQ\t"},
},
{
`
func f9(b []byte, i int, v uint64) {
binary.BigEndian.PutUint64(b[i:], v)
}
`,
cmd/compile/internal/ssa: generate bswap/store for indexed bigendian byte stores too on AMD64 Commit 10f75748 (CL 32222) added rewrite rules to combine byte loads/stores + shifts into larger loads/stores + bswap. For loads both MOVBload and MOVBloadidx1 were handled but for store only MOVBstore was there without MOVBstoreidx added to rewrite pattern. Fix it. Here is how generated code changes for the following 2 functions (ommitting staying the same prologue/epilogue): func put32(b []byte, i int, v uint32) { binary.BigEndian.PutUint32(b[i:], v) } func put64(b []byte, i int, v uint64) { binary.BigEndian.PutUint64(b[i:], v) } "".put32 t=1 size=100 args=0x28 locals=0x0 // before 0x0032 00050 (x.go:5) MOVL CX, DX 0x0034 00052 (x.go:5) SHRL $24, CX 0x0037 00055 (x.go:5) MOVQ "".b+8(FP), BX 0x003c 00060 (x.go:5) MOVB CL, (BX)(AX*1) 0x003f 00063 (x.go:5) MOVL DX, CX 0x0041 00065 (x.go:5) SHRL $16, DX 0x0044 00068 (x.go:5) MOVB DL, 1(BX)(AX*1) 0x0048 00072 (x.go:5) MOVL CX, DX 0x004a 00074 (x.go:5) SHRL $8, CX 0x004d 00077 (x.go:5) MOVB CL, 2(BX)(AX*1) 0x0051 00081 (x.go:5) MOVB DL, 3(BX)(AX*1) // after 0x0032 00050 (x.go:5) BSWAPL CX 0x0034 00052 (x.go:5) MOVQ "".b+8(FP), DX 0x0039 00057 (x.go:5) MOVL CX, (DX)(AX*1) "".put64 t=1 size=155 args=0x28 locals=0x0 // before 0x0037 00055 (x.go:9) MOVQ CX, DX 0x003a 00058 (x.go:9) SHRQ $56, CX 0x003e 00062 (x.go:9) MOVQ "".b+8(FP), BX 0x0043 00067 (x.go:9) MOVB CL, (BX)(AX*1) 0x0046 00070 (x.go:9) MOVQ DX, CX 0x0049 00073 (x.go:9) SHRQ $48, DX 0x004d 00077 (x.go:9) MOVB DL, 1(BX)(AX*1) 0x0051 00081 (x.go:9) MOVQ CX, DX 0x0054 00084 (x.go:9) SHRQ $40, CX 0x0058 00088 (x.go:9) MOVB CL, 2(BX)(AX*1) 0x005c 00092 (x.go:9) MOVQ DX, CX 0x005f 00095 (x.go:9) SHRQ $32, DX 0x0063 00099 (x.go:9) MOVB DL, 3(BX)(AX*1) 0x0067 00103 (x.go:9) MOVQ CX, DX 0x006a 00106 (x.go:9) SHRQ $24, CX 0x006e 00110 (x.go:9) MOVB CL, 4(BX)(AX*1) 0x0072 00114 (x.go:9) MOVQ DX, CX 0x0075 00117 (x.go:9) SHRQ $16, DX 0x0079 00121 (x.go:9) MOVB DL, 5(BX)(AX*1) 0x007d 00125 (x.go:9) MOVQ CX, DX 0x0080 00128 (x.go:9) SHRQ $8, CX 0x0084 00132 (x.go:9) MOVB CL, 6(BX)(AX*1) 0x0088 00136 (x.go:9) MOVB DL, 7(BX)(AX*1) // after 0x0033 00051 (x.go:9) BSWAPQ CX 0x0036 00054 (x.go:9) MOVQ "".b+8(FP), DX 0x003b 00059 (x.go:9) MOVQ CX, (DX)(AX*1) Updates #17151 Change-Id: I3f4a7f28f210e62e153e60da5abd1d39508cc6c4 Reviewed-on: https://go-review.googlesource.com/34635 Run-TryBot: Ilya Tocar <ilya.tocar@intel.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Ilya Tocar <ilya.tocar@intel.com>
2016-12-01 22:13:16 +03:00
[]string{"\tBSWAPQ\t"},
},
{
`
func f10(b []byte) uint32 {
return binary.BigEndian.Uint32(b)
}
`,
[]string{"\tBSWAPL\t"},
},
{
`
func f11(b []byte, i int) uint32 {
return binary.BigEndian.Uint32(b[i:])
}
`,
[]string{"\tBSWAPL\t"},
},
{
`
func f12(b []byte, v uint32) {
binary.BigEndian.PutUint32(b, v)
}
`,
cmd/compile/internal/ssa: generate bswap/store for indexed bigendian byte stores too on AMD64 Commit 10f75748 (CL 32222) added rewrite rules to combine byte loads/stores + shifts into larger loads/stores + bswap. For loads both MOVBload and MOVBloadidx1 were handled but for store only MOVBstore was there without MOVBstoreidx added to rewrite pattern. Fix it. Here is how generated code changes for the following 2 functions (ommitting staying the same prologue/epilogue): func put32(b []byte, i int, v uint32) { binary.BigEndian.PutUint32(b[i:], v) } func put64(b []byte, i int, v uint64) { binary.BigEndian.PutUint64(b[i:], v) } "".put32 t=1 size=100 args=0x28 locals=0x0 // before 0x0032 00050 (x.go:5) MOVL CX, DX 0x0034 00052 (x.go:5) SHRL $24, CX 0x0037 00055 (x.go:5) MOVQ "".b+8(FP), BX 0x003c 00060 (x.go:5) MOVB CL, (BX)(AX*1) 0x003f 00063 (x.go:5) MOVL DX, CX 0x0041 00065 (x.go:5) SHRL $16, DX 0x0044 00068 (x.go:5) MOVB DL, 1(BX)(AX*1) 0x0048 00072 (x.go:5) MOVL CX, DX 0x004a 00074 (x.go:5) SHRL $8, CX 0x004d 00077 (x.go:5) MOVB CL, 2(BX)(AX*1) 0x0051 00081 (x.go:5) MOVB DL, 3(BX)(AX*1) // after 0x0032 00050 (x.go:5) BSWAPL CX 0x0034 00052 (x.go:5) MOVQ "".b+8(FP), DX 0x0039 00057 (x.go:5) MOVL CX, (DX)(AX*1) "".put64 t=1 size=155 args=0x28 locals=0x0 // before 0x0037 00055 (x.go:9) MOVQ CX, DX 0x003a 00058 (x.go:9) SHRQ $56, CX 0x003e 00062 (x.go:9) MOVQ "".b+8(FP), BX 0x0043 00067 (x.go:9) MOVB CL, (BX)(AX*1) 0x0046 00070 (x.go:9) MOVQ DX, CX 0x0049 00073 (x.go:9) SHRQ $48, DX 0x004d 00077 (x.go:9) MOVB DL, 1(BX)(AX*1) 0x0051 00081 (x.go:9) MOVQ CX, DX 0x0054 00084 (x.go:9) SHRQ $40, CX 0x0058 00088 (x.go:9) MOVB CL, 2(BX)(AX*1) 0x005c 00092 (x.go:9) MOVQ DX, CX 0x005f 00095 (x.go:9) SHRQ $32, DX 0x0063 00099 (x.go:9) MOVB DL, 3(BX)(AX*1) 0x0067 00103 (x.go:9) MOVQ CX, DX 0x006a 00106 (x.go:9) SHRQ $24, CX 0x006e 00110 (x.go:9) MOVB CL, 4(BX)(AX*1) 0x0072 00114 (x.go:9) MOVQ DX, CX 0x0075 00117 (x.go:9) SHRQ $16, DX 0x0079 00121 (x.go:9) MOVB DL, 5(BX)(AX*1) 0x007d 00125 (x.go:9) MOVQ CX, DX 0x0080 00128 (x.go:9) SHRQ $8, CX 0x0084 00132 (x.go:9) MOVB CL, 6(BX)(AX*1) 0x0088 00136 (x.go:9) MOVB DL, 7(BX)(AX*1) // after 0x0033 00051 (x.go:9) BSWAPQ CX 0x0036 00054 (x.go:9) MOVQ "".b+8(FP), DX 0x003b 00059 (x.go:9) MOVQ CX, (DX)(AX*1) Updates #17151 Change-Id: I3f4a7f28f210e62e153e60da5abd1d39508cc6c4 Reviewed-on: https://go-review.googlesource.com/34635 Run-TryBot: Ilya Tocar <ilya.tocar@intel.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Ilya Tocar <ilya.tocar@intel.com>
2016-12-01 22:13:16 +03:00
[]string{"\tBSWAPL\t"},
},
{
`
func f13(b []byte, i int, v uint32) {
binary.BigEndian.PutUint32(b[i:], v)
}
`,
[]string{"\tBSWAPL\t"},
},
{
`
func f14(b []byte) uint16 {
return binary.BigEndian.Uint16(b)
}
`,
cmd/compile/internal/ssa: combine 2 byte loads + shifts into word load + rolw 8 on AMD64 ... and same for stores. This does for binary.BigEndian.Uint16() what was already done for Uint32 and Uint64 with BSWAP in 10f75748 (CL 32222). Here is how generated code changes e.g. for the following function (omitting saying the same prologue/epilogue): func get16(b [2]byte) uint16 { return binary.BigEndian.Uint16(b[:]) } "".get16 t=1 size=21 args=0x10 locals=0x0 // before 0x0000 00000 (x.go:15) MOVBLZX "".b+9(FP), AX 0x0005 00005 (x.go:15) MOVBLZX "".b+8(FP), CX 0x000a 00010 (x.go:15) SHLL $8, CX 0x000d 00013 (x.go:15) ORL CX, AX // after 0x0000 00000 (x.go:15) MOVWLZX "".b+8(FP), AX 0x0005 00005 (x.go:15) ROLW $8, AX encoding/binary is speedup overall a bit: name old time/op new time/op delta ReadSlice1000Int32s-4 4.83µs ± 0% 4.83µs ± 0% ~ (p=0.206 n=4+5) ReadStruct-4 1.29µs ± 2% 1.28µs ± 1% -1.27% (p=0.032 n=4+5) ReadInts-4 384ns ± 1% 385ns ± 1% ~ (p=0.968 n=4+5) WriteInts-4 534ns ± 3% 526ns ± 0% -1.54% (p=0.048 n=4+5) WriteSlice1000Int32s-4 5.02µs ± 0% 5.11µs ± 3% ~ (p=0.175 n=4+5) PutUint16-4 0.59ns ± 0% 0.49ns ± 2% -16.95% (p=0.016 n=4+5) PutUint32-4 0.52ns ± 0% 0.52ns ± 0% ~ (all equal) PutUint64-4 0.53ns ± 0% 0.53ns ± 0% ~ (all equal) PutUvarint32-4 19.9ns ± 0% 19.9ns ± 1% ~ (p=0.556 n=4+5) PutUvarint64-4 54.5ns ± 1% 54.2ns ± 0% ~ (p=0.333 n=4+5) name old speed new speed delta ReadSlice1000Int32s-4 829MB/s ± 0% 828MB/s ± 0% ~ (p=0.190 n=4+5) ReadStruct-4 58.0MB/s ± 2% 58.7MB/s ± 1% +1.30% (p=0.032 n=4+5) ReadInts-4 78.0MB/s ± 1% 77.8MB/s ± 1% ~ (p=0.968 n=4+5) WriteInts-4 56.1MB/s ± 3% 57.0MB/s ± 0% ~ (p=0.063 n=4+5) WriteSlice1000Int32s-4 797MB/s ± 0% 783MB/s ± 3% ~ (p=0.190 n=4+5) PutUint16-4 3.37GB/s ± 0% 4.07GB/s ± 2% +20.83% (p=0.016 n=4+5) PutUint32-4 7.73GB/s ± 0% 7.72GB/s ± 0% ~ (p=0.556 n=4+5) PutUint64-4 15.1GB/s ± 0% 15.1GB/s ± 0% ~ (p=0.905 n=4+5) PutUvarint32-4 201MB/s ± 0% 201MB/s ± 0% ~ (p=0.905 n=4+5) PutUvarint64-4 147MB/s ± 1% 147MB/s ± 0% ~ (p=0.286 n=4+5) ( "a bit" only because most of the time is spent in reflection-like things there, not actual bytes decoding. Even for direct PutUint16 benchmark the looping adds overhead and lowers visible benefit. For code-generated encoders / decoders actual effect is more than 20% ) Adding Uint32 and Uint64 raw benchmarks too for completeness. NOTE I had to adjust load-combining rule for bswap case to match first 2 bytes loads as result of "2-bytes load+shift" -> "loadw + rorw 8" rewrite. Reason is: for loads+shift, even e.g. into uint16 var var b []byte var v uin16 v = uint16(b[1]) | uint16(b[0])<<8 the compiler eventually generates L(ong) shift - SHLLconst [8], probably because it is more straightforward / other reasons to work on the whole register. This way 2 bytes rewriting rule is using SHLLconst (not SHLWconst) in its pattern, and then it always gets matched first, even if 2-byte rule comes syntactically after 4-byte rule in AMD64.rules because 4-bytes rule seemingly needs more applyRewrite() cycles to trigger. If 2-bytes rule gets matched for inner half of var b []byte var v uin32 v = uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 and we keep 4-byte load rule unchanged, the result will be MOVW + RORW $8 and then series of byte loads and shifts - not one MOVL + BSWAPL. There is no such problem for stores: there compiler, since it probably knows store destination is 2 bytes wide, uses SHRWconst 8 (not SHRLconst 8) and thus 2-byte store rule is not a subset of rule for 4-byte stores. Fixes #17151 (int16 was last missing piece there) Change-Id: Idc03ba965bfce2b94fef456b02ff6742194748f6 Reviewed-on: https://go-review.googlesource.com/34636 Reviewed-by: Ilya Tocar <ilya.tocar@intel.com> Run-TryBot: Ilya Tocar <ilya.tocar@intel.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
2016-12-01 23:43:21 +03:00
[]string{"\tROLW\t\\$8,"},
},
{
`
func f15(b []byte, i int) uint16 {
return binary.BigEndian.Uint16(b[i:])
}
`,
cmd/compile/internal/ssa: combine 2 byte loads + shifts into word load + rolw 8 on AMD64 ... and same for stores. This does for binary.BigEndian.Uint16() what was already done for Uint32 and Uint64 with BSWAP in 10f75748 (CL 32222). Here is how generated code changes e.g. for the following function (omitting saying the same prologue/epilogue): func get16(b [2]byte) uint16 { return binary.BigEndian.Uint16(b[:]) } "".get16 t=1 size=21 args=0x10 locals=0x0 // before 0x0000 00000 (x.go:15) MOVBLZX "".b+9(FP), AX 0x0005 00005 (x.go:15) MOVBLZX "".b+8(FP), CX 0x000a 00010 (x.go:15) SHLL $8, CX 0x000d 00013 (x.go:15) ORL CX, AX // after 0x0000 00000 (x.go:15) MOVWLZX "".b+8(FP), AX 0x0005 00005 (x.go:15) ROLW $8, AX encoding/binary is speedup overall a bit: name old time/op new time/op delta ReadSlice1000Int32s-4 4.83µs ± 0% 4.83µs ± 0% ~ (p=0.206 n=4+5) ReadStruct-4 1.29µs ± 2% 1.28µs ± 1% -1.27% (p=0.032 n=4+5) ReadInts-4 384ns ± 1% 385ns ± 1% ~ (p=0.968 n=4+5) WriteInts-4 534ns ± 3% 526ns ± 0% -1.54% (p=0.048 n=4+5) WriteSlice1000Int32s-4 5.02µs ± 0% 5.11µs ± 3% ~ (p=0.175 n=4+5) PutUint16-4 0.59ns ± 0% 0.49ns ± 2% -16.95% (p=0.016 n=4+5) PutUint32-4 0.52ns ± 0% 0.52ns ± 0% ~ (all equal) PutUint64-4 0.53ns ± 0% 0.53ns ± 0% ~ (all equal) PutUvarint32-4 19.9ns ± 0% 19.9ns ± 1% ~ (p=0.556 n=4+5) PutUvarint64-4 54.5ns ± 1% 54.2ns ± 0% ~ (p=0.333 n=4+5) name old speed new speed delta ReadSlice1000Int32s-4 829MB/s ± 0% 828MB/s ± 0% ~ (p=0.190 n=4+5) ReadStruct-4 58.0MB/s ± 2% 58.7MB/s ± 1% +1.30% (p=0.032 n=4+5) ReadInts-4 78.0MB/s ± 1% 77.8MB/s ± 1% ~ (p=0.968 n=4+5) WriteInts-4 56.1MB/s ± 3% 57.0MB/s ± 0% ~ (p=0.063 n=4+5) WriteSlice1000Int32s-4 797MB/s ± 0% 783MB/s ± 3% ~ (p=0.190 n=4+5) PutUint16-4 3.37GB/s ± 0% 4.07GB/s ± 2% +20.83% (p=0.016 n=4+5) PutUint32-4 7.73GB/s ± 0% 7.72GB/s ± 0% ~ (p=0.556 n=4+5) PutUint64-4 15.1GB/s ± 0% 15.1GB/s ± 0% ~ (p=0.905 n=4+5) PutUvarint32-4 201MB/s ± 0% 201MB/s ± 0% ~ (p=0.905 n=4+5) PutUvarint64-4 147MB/s ± 1% 147MB/s ± 0% ~ (p=0.286 n=4+5) ( "a bit" only because most of the time is spent in reflection-like things there, not actual bytes decoding. Even for direct PutUint16 benchmark the looping adds overhead and lowers visible benefit. For code-generated encoders / decoders actual effect is more than 20% ) Adding Uint32 and Uint64 raw benchmarks too for completeness. NOTE I had to adjust load-combining rule for bswap case to match first 2 bytes loads as result of "2-bytes load+shift" -> "loadw + rorw 8" rewrite. Reason is: for loads+shift, even e.g. into uint16 var var b []byte var v uin16 v = uint16(b[1]) | uint16(b[0])<<8 the compiler eventually generates L(ong) shift - SHLLconst [8], probably because it is more straightforward / other reasons to work on the whole register. This way 2 bytes rewriting rule is using SHLLconst (not SHLWconst) in its pattern, and then it always gets matched first, even if 2-byte rule comes syntactically after 4-byte rule in AMD64.rules because 4-bytes rule seemingly needs more applyRewrite() cycles to trigger. If 2-bytes rule gets matched for inner half of var b []byte var v uin32 v = uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 and we keep 4-byte load rule unchanged, the result will be MOVW + RORW $8 and then series of byte loads and shifts - not one MOVL + BSWAPL. There is no such problem for stores: there compiler, since it probably knows store destination is 2 bytes wide, uses SHRWconst 8 (not SHRLconst 8) and thus 2-byte store rule is not a subset of rule for 4-byte stores. Fixes #17151 (int16 was last missing piece there) Change-Id: Idc03ba965bfce2b94fef456b02ff6742194748f6 Reviewed-on: https://go-review.googlesource.com/34636 Reviewed-by: Ilya Tocar <ilya.tocar@intel.com> Run-TryBot: Ilya Tocar <ilya.tocar@intel.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
2016-12-01 23:43:21 +03:00
[]string{"\tROLW\t\\$8,"},
},
{
`
func f16(b []byte, v uint16) {
binary.BigEndian.PutUint16(b, v)
}
`,
cmd/compile/internal/ssa: combine 2 byte loads + shifts into word load + rolw 8 on AMD64 ... and same for stores. This does for binary.BigEndian.Uint16() what was already done for Uint32 and Uint64 with BSWAP in 10f75748 (CL 32222). Here is how generated code changes e.g. for the following function (omitting saying the same prologue/epilogue): func get16(b [2]byte) uint16 { return binary.BigEndian.Uint16(b[:]) } "".get16 t=1 size=21 args=0x10 locals=0x0 // before 0x0000 00000 (x.go:15) MOVBLZX "".b+9(FP), AX 0x0005 00005 (x.go:15) MOVBLZX "".b+8(FP), CX 0x000a 00010 (x.go:15) SHLL $8, CX 0x000d 00013 (x.go:15) ORL CX, AX // after 0x0000 00000 (x.go:15) MOVWLZX "".b+8(FP), AX 0x0005 00005 (x.go:15) ROLW $8, AX encoding/binary is speedup overall a bit: name old time/op new time/op delta ReadSlice1000Int32s-4 4.83µs ± 0% 4.83µs ± 0% ~ (p=0.206 n=4+5) ReadStruct-4 1.29µs ± 2% 1.28µs ± 1% -1.27% (p=0.032 n=4+5) ReadInts-4 384ns ± 1% 385ns ± 1% ~ (p=0.968 n=4+5) WriteInts-4 534ns ± 3% 526ns ± 0% -1.54% (p=0.048 n=4+5) WriteSlice1000Int32s-4 5.02µs ± 0% 5.11µs ± 3% ~ (p=0.175 n=4+5) PutUint16-4 0.59ns ± 0% 0.49ns ± 2% -16.95% (p=0.016 n=4+5) PutUint32-4 0.52ns ± 0% 0.52ns ± 0% ~ (all equal) PutUint64-4 0.53ns ± 0% 0.53ns ± 0% ~ (all equal) PutUvarint32-4 19.9ns ± 0% 19.9ns ± 1% ~ (p=0.556 n=4+5) PutUvarint64-4 54.5ns ± 1% 54.2ns ± 0% ~ (p=0.333 n=4+5) name old speed new speed delta ReadSlice1000Int32s-4 829MB/s ± 0% 828MB/s ± 0% ~ (p=0.190 n=4+5) ReadStruct-4 58.0MB/s ± 2% 58.7MB/s ± 1% +1.30% (p=0.032 n=4+5) ReadInts-4 78.0MB/s ± 1% 77.8MB/s ± 1% ~ (p=0.968 n=4+5) WriteInts-4 56.1MB/s ± 3% 57.0MB/s ± 0% ~ (p=0.063 n=4+5) WriteSlice1000Int32s-4 797MB/s ± 0% 783MB/s ± 3% ~ (p=0.190 n=4+5) PutUint16-4 3.37GB/s ± 0% 4.07GB/s ± 2% +20.83% (p=0.016 n=4+5) PutUint32-4 7.73GB/s ± 0% 7.72GB/s ± 0% ~ (p=0.556 n=4+5) PutUint64-4 15.1GB/s ± 0% 15.1GB/s ± 0% ~ (p=0.905 n=4+5) PutUvarint32-4 201MB/s ± 0% 201MB/s ± 0% ~ (p=0.905 n=4+5) PutUvarint64-4 147MB/s ± 1% 147MB/s ± 0% ~ (p=0.286 n=4+5) ( "a bit" only because most of the time is spent in reflection-like things there, not actual bytes decoding. Even for direct PutUint16 benchmark the looping adds overhead and lowers visible benefit. For code-generated encoders / decoders actual effect is more than 20% ) Adding Uint32 and Uint64 raw benchmarks too for completeness. NOTE I had to adjust load-combining rule for bswap case to match first 2 bytes loads as result of "2-bytes load+shift" -> "loadw + rorw 8" rewrite. Reason is: for loads+shift, even e.g. into uint16 var var b []byte var v uin16 v = uint16(b[1]) | uint16(b[0])<<8 the compiler eventually generates L(ong) shift - SHLLconst [8], probably because it is more straightforward / other reasons to work on the whole register. This way 2 bytes rewriting rule is using SHLLconst (not SHLWconst) in its pattern, and then it always gets matched first, even if 2-byte rule comes syntactically after 4-byte rule in AMD64.rules because 4-bytes rule seemingly needs more applyRewrite() cycles to trigger. If 2-bytes rule gets matched for inner half of var b []byte var v uin32 v = uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 and we keep 4-byte load rule unchanged, the result will be MOVW + RORW $8 and then series of byte loads and shifts - not one MOVL + BSWAPL. There is no such problem for stores: there compiler, since it probably knows store destination is 2 bytes wide, uses SHRWconst 8 (not SHRLconst 8) and thus 2-byte store rule is not a subset of rule for 4-byte stores. Fixes #17151 (int16 was last missing piece there) Change-Id: Idc03ba965bfce2b94fef456b02ff6742194748f6 Reviewed-on: https://go-review.googlesource.com/34636 Reviewed-by: Ilya Tocar <ilya.tocar@intel.com> Run-TryBot: Ilya Tocar <ilya.tocar@intel.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
2016-12-01 23:43:21 +03:00
[]string{"\tROLW\t\\$8,"},
},
{
`
func f17(b []byte, i int, v uint16) {
binary.BigEndian.PutUint16(b[i:], v)
}
`,
cmd/compile/internal/ssa: combine 2 byte loads + shifts into word load + rolw 8 on AMD64 ... and same for stores. This does for binary.BigEndian.Uint16() what was already done for Uint32 and Uint64 with BSWAP in 10f75748 (CL 32222). Here is how generated code changes e.g. for the following function (omitting saying the same prologue/epilogue): func get16(b [2]byte) uint16 { return binary.BigEndian.Uint16(b[:]) } "".get16 t=1 size=21 args=0x10 locals=0x0 // before 0x0000 00000 (x.go:15) MOVBLZX "".b+9(FP), AX 0x0005 00005 (x.go:15) MOVBLZX "".b+8(FP), CX 0x000a 00010 (x.go:15) SHLL $8, CX 0x000d 00013 (x.go:15) ORL CX, AX // after 0x0000 00000 (x.go:15) MOVWLZX "".b+8(FP), AX 0x0005 00005 (x.go:15) ROLW $8, AX encoding/binary is speedup overall a bit: name old time/op new time/op delta ReadSlice1000Int32s-4 4.83µs ± 0% 4.83µs ± 0% ~ (p=0.206 n=4+5) ReadStruct-4 1.29µs ± 2% 1.28µs ± 1% -1.27% (p=0.032 n=4+5) ReadInts-4 384ns ± 1% 385ns ± 1% ~ (p=0.968 n=4+5) WriteInts-4 534ns ± 3% 526ns ± 0% -1.54% (p=0.048 n=4+5) WriteSlice1000Int32s-4 5.02µs ± 0% 5.11µs ± 3% ~ (p=0.175 n=4+5) PutUint16-4 0.59ns ± 0% 0.49ns ± 2% -16.95% (p=0.016 n=4+5) PutUint32-4 0.52ns ± 0% 0.52ns ± 0% ~ (all equal) PutUint64-4 0.53ns ± 0% 0.53ns ± 0% ~ (all equal) PutUvarint32-4 19.9ns ± 0% 19.9ns ± 1% ~ (p=0.556 n=4+5) PutUvarint64-4 54.5ns ± 1% 54.2ns ± 0% ~ (p=0.333 n=4+5) name old speed new speed delta ReadSlice1000Int32s-4 829MB/s ± 0% 828MB/s ± 0% ~ (p=0.190 n=4+5) ReadStruct-4 58.0MB/s ± 2% 58.7MB/s ± 1% +1.30% (p=0.032 n=4+5) ReadInts-4 78.0MB/s ± 1% 77.8MB/s ± 1% ~ (p=0.968 n=4+5) WriteInts-4 56.1MB/s ± 3% 57.0MB/s ± 0% ~ (p=0.063 n=4+5) WriteSlice1000Int32s-4 797MB/s ± 0% 783MB/s ± 3% ~ (p=0.190 n=4+5) PutUint16-4 3.37GB/s ± 0% 4.07GB/s ± 2% +20.83% (p=0.016 n=4+5) PutUint32-4 7.73GB/s ± 0% 7.72GB/s ± 0% ~ (p=0.556 n=4+5) PutUint64-4 15.1GB/s ± 0% 15.1GB/s ± 0% ~ (p=0.905 n=4+5) PutUvarint32-4 201MB/s ± 0% 201MB/s ± 0% ~ (p=0.905 n=4+5) PutUvarint64-4 147MB/s ± 1% 147MB/s ± 0% ~ (p=0.286 n=4+5) ( "a bit" only because most of the time is spent in reflection-like things there, not actual bytes decoding. Even for direct PutUint16 benchmark the looping adds overhead and lowers visible benefit. For code-generated encoders / decoders actual effect is more than 20% ) Adding Uint32 and Uint64 raw benchmarks too for completeness. NOTE I had to adjust load-combining rule for bswap case to match first 2 bytes loads as result of "2-bytes load+shift" -> "loadw + rorw 8" rewrite. Reason is: for loads+shift, even e.g. into uint16 var var b []byte var v uin16 v = uint16(b[1]) | uint16(b[0])<<8 the compiler eventually generates L(ong) shift - SHLLconst [8], probably because it is more straightforward / other reasons to work on the whole register. This way 2 bytes rewriting rule is using SHLLconst (not SHLWconst) in its pattern, and then it always gets matched first, even if 2-byte rule comes syntactically after 4-byte rule in AMD64.rules because 4-bytes rule seemingly needs more applyRewrite() cycles to trigger. If 2-bytes rule gets matched for inner half of var b []byte var v uin32 v = uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 and we keep 4-byte load rule unchanged, the result will be MOVW + RORW $8 and then series of byte loads and shifts - not one MOVL + BSWAPL. There is no such problem for stores: there compiler, since it probably knows store destination is 2 bytes wide, uses SHRWconst 8 (not SHRLconst 8) and thus 2-byte store rule is not a subset of rule for 4-byte stores. Fixes #17151 (int16 was last missing piece there) Change-Id: Idc03ba965bfce2b94fef456b02ff6742194748f6 Reviewed-on: https://go-review.googlesource.com/34636 Reviewed-by: Ilya Tocar <ilya.tocar@intel.com> Run-TryBot: Ilya Tocar <ilya.tocar@intel.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
2016-12-01 23:43:21 +03:00
[]string{"\tROLW\t\\$8,"},
},
// Structure zeroing. See issue #18370.
{
`
type T1 struct {
a, b, c int
}
func f18(t *T1) {
*t = T1{}
}
`,
[]string{"\tMOVQ\t\\$0, \\(.*\\)", "\tMOVQ\t\\$0, 8\\(.*\\)", "\tMOVQ\t\\$0, 16\\(.*\\)"},
},
// TODO: add a test for *t = T{3,4,5} when we fix that.
// Also test struct containing pointers (this was special because of write barriers).
{
`
type T2 struct {
a, b, c *int
}
func f19(t *T2) {
*t = T2{}
}
`,
[]string{"\tMOVQ\t\\$0, \\(.*\\)", "\tMOVQ\t\\$0, 8\\(.*\\)", "\tMOVQ\t\\$0, 16\\(.*\\)", "\tCALL\truntime\\.writebarrierptr\\(SB\\)"},
},
// Rotate tests
{
`
func f20(x uint64) uint64 {
return x<<7 | x>>57
}
`,
[]string{"\tROLQ\t[$]7,"},
},
{
`
func f21(x uint64) uint64 {
return x<<7 + x>>57
}
`,
[]string{"\tROLQ\t[$]7,"},
},
{
`
func f22(x uint64) uint64 {
return x<<7 ^ x>>57
}
`,
[]string{"\tROLQ\t[$]7,"},
},
{
`
func f23(x uint32) uint32 {
return x<<7 + x>>25
}
`,
[]string{"\tROLL\t[$]7,"},
},
{
`
func f24(x uint32) uint32 {
return x<<7 | x>>25
}
`,
[]string{"\tROLL\t[$]7,"},
},
{
`
func f25(x uint32) uint32 {
return x<<7 ^ x>>25
}
`,
[]string{"\tROLL\t[$]7,"},
},
{
`
func f26(x uint16) uint16 {
return x<<7 + x>>9
}
`,
[]string{"\tROLW\t[$]7,"},
},
{
`
func f27(x uint16) uint16 {
return x<<7 | x>>9
}
`,
[]string{"\tROLW\t[$]7,"},
},
{
`
func f28(x uint16) uint16 {
return x<<7 ^ x>>9
}
`,
[]string{"\tROLW\t[$]7,"},
},
{
`
func f29(x uint8) uint8 {
return x<<7 + x>>1
}
`,
[]string{"\tROLB\t[$]7,"},
},
{
`
func f30(x uint8) uint8 {
return x<<7 | x>>1
}
`,
[]string{"\tROLB\t[$]7,"},
},
{
`
func f31(x uint8) uint8 {
return x<<7 ^ x>>1
}
`,
[]string{"\tROLB\t[$]7,"},
},
// Rotate after inlining (see issue 18254).
{
`
func g(x uint32, k uint) uint32 {
return x<<k | x>>(32-k)
}
func f32(x uint32) uint32 {
return g(x, 7)
}
`,
[]string{"\tROLL\t[$]7,"},
},
{
`
func f33(m map[int]int) int {
return m[5]
}
`,
[]string{"\tMOVQ\t[$]5,"},
},
// Direct use of constants in fast map access calls. Issue 19015.
{
`
func f34(m map[int]int) bool {
_, ok := m[5]
return ok
}
`,
[]string{"\tMOVQ\t[$]5,"},
},
{
`
func f35(m map[string]int) int {
return m["abc"]
}
`,
[]string{"\"abc\""},
},
{
`
func f36(m map[string]int) bool {
_, ok := m["abc"]
return ok
}
`,
[]string{"\"abc\""},
},
}
var linux386Tests = []*asmTest{
{
`
func f0(b []byte) uint32 {
return binary.LittleEndian.Uint32(b)
}
`,
[]string{"\tMOVL\t\\(.*\\),"},
},
{
`
func f1(b []byte, i int) uint32 {
return binary.LittleEndian.Uint32(b[i:])
}
`,
[]string{"\tMOVL\t\\(.*\\)\\(.*\\*1\\),"},
},
}
var linuxS390XTests = []*asmTest{
{
`
func f0(b []byte) uint32 {
return binary.LittleEndian.Uint32(b)
}
`,
[]string{"\tMOVWBR\t\\(.*\\),"},
},
{
`
func f1(b []byte, i int) uint32 {
return binary.LittleEndian.Uint32(b[i:])
}
`,
[]string{"\tMOVWBR\t\\(.*\\)\\(.*\\*1\\),"},
},
{
`
func f2(b []byte) uint64 {
return binary.LittleEndian.Uint64(b)
}
`,
[]string{"\tMOVDBR\t\\(.*\\),"},
},
{
`
func f3(b []byte, i int) uint64 {
return binary.LittleEndian.Uint64(b[i:])
}
`,
[]string{"\tMOVDBR\t\\(.*\\)\\(.*\\*1\\),"},
},
{
`
func f4(b []byte) uint32 {
return binary.BigEndian.Uint32(b)
}
`,
[]string{"\tMOVWZ\t\\(.*\\),"},
},
{
`
func f5(b []byte, i int) uint32 {
return binary.BigEndian.Uint32(b[i:])
}
`,
[]string{"\tMOVWZ\t\\(.*\\)\\(.*\\*1\\),"},
},
{
`
func f6(b []byte) uint64 {
return binary.BigEndian.Uint64(b)
}
`,
[]string{"\tMOVD\t\\(.*\\),"},
},
{
`
func f7(b []byte, i int) uint64 {
return binary.BigEndian.Uint64(b[i:])
}
`,
[]string{"\tMOVD\t\\(.*\\)\\(.*\\*1\\),"},
},
{
`
func f8(x uint64) uint64 {
return x<<7 + x>>57
}
`,
[]string{"\tRLLG\t[$]7,"},
},
{
`
func f9(x uint64) uint64 {
return x<<7 | x>>57
}
`,
[]string{"\tRLLG\t[$]7,"},
},
{
`
func f10(x uint64) uint64 {
return x<<7 ^ x>>57
}
`,
[]string{"\tRLLG\t[$]7,"},
},
{
`
func f11(x uint32) uint32 {
return x<<7 + x>>25
}
`,
[]string{"\tRLL\t[$]7,"},
},
{
`
func f12(x uint32) uint32 {
return x<<7 | x>>25
}
`,
[]string{"\tRLL\t[$]7,"},
},
{
`
func f13(x uint32) uint32 {
return x<<7 ^ x>>25
}
`,
[]string{"\tRLL\t[$]7,"},
},
}
var linuxARMTests = []*asmTest{
{
`
func f0(x uint32) uint32 {
return x<<7 + x>>25
}
`,
[]string{"\tMOVW\tR[0-9]+@>25,"},
},
{
`
func f1(x uint32) uint32 {
return x<<7 | x>>25
}
`,
[]string{"\tMOVW\tR[0-9]+@>25,"},
},
{
`
func f2(x uint32) uint32 {
return x<<7 ^ x>>25
}
`,
[]string{"\tMOVW\tR[0-9]+@>25,"},
},
}
var linuxARM64Tests = []*asmTest{
{
`
func f0(x uint64) uint64 {
return x<<7 + x>>57
}
`,
[]string{"\tROR\t[$]57,"},
},
{
`
func f1(x uint64) uint64 {
return x<<7 | x>>57
}
`,
[]string{"\tROR\t[$]57,"},
},
{
`
func f2(x uint64) uint64 {
return x<<7 ^ x>>57
}
`,
[]string{"\tROR\t[$]57,"},
},
{
`
func f3(x uint32) uint32 {
return x<<7 + x>>25
}
`,
[]string{"\tRORW\t[$]25,"},
},
{
`
func f4(x uint32) uint32 {
return x<<7 | x>>25
}
`,
[]string{"\tRORW\t[$]25,"},
},
{
`
func f5(x uint32) uint32 {
return x<<7 ^ x>>25
}
`,
[]string{"\tRORW\t[$]25,"},
},
}
// mergeEnvLists merges the two environment lists such that
// variables with the same name in "in" replace those in "out".
// This always returns a newly allocated slice.
func mergeEnvLists(in, out []string) []string {
out = append([]string(nil), out...)
NextVar:
for _, inkv := range in {
k := strings.SplitAfterN(inkv, "=", 2)[0]
for i, outkv := range out {
if strings.HasPrefix(outkv, k) {
out[i] = inkv
continue NextVar
}
}
out = append(out, inkv)
}
return out
}
// TestLineNumber checks to make sure the generated assembly has line numbers
// see issue #16214
func TestLineNumber(t *testing.T) {
testenv.MustHaveGoBuild(t)
dir, err := ioutil.TempDir("", "TestLineNumber")
if err != nil {
t.Fatalf("could not create directory: %v", err)
}
defer os.RemoveAll(dir)
src := filepath.Join(dir, "x.go")
err = ioutil.WriteFile(src, []byte(issue16214src), 0644)
if err != nil {
t.Fatalf("could not write file: %v", err)
}
cmd := exec.Command(testenv.GoToolPath(t), "tool", "compile", "-S", "-o", filepath.Join(dir, "out.o"), src)
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("fail to run go tool compile: %v", err)
}
if strings.Contains(string(out), "unknown line number") {
t.Errorf("line number missing in assembly:\n%s", out)
}
}
var issue16214src = `
package main
func Mod32(x uint32) uint32 {
return x % 3 // frontend rewrites it as HMUL with 2863311531, the LITERAL node has unknown Pos
}
`