go/src/cmd/compile/internal/ssa/compile.go

450 lines
14 KiB
Go
Raw Normal View History

// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssa
import (
"fmt"
"log"
"os"
"regexp"
"runtime"
"strings"
"time"
)
// Compile is the main entry point for this package.
// Compile modifies f so that on return:
// · all Values in f map to 0 or 1 assembly instructions of the target architecture
// · the order of f.Blocks is the order to emit the Blocks
// · the order of b.Values is the order to emit the Values in each Block
// · f has a non-nil regAlloc field
func Compile(f *Func) {
// TODO: debugging - set flags to control verbosity of compiler,
// which phases to dump IR before/after, etc.
if f.Log() {
f.Logf("compiling %s\n", f.Name)
}
// hook to print function & phase if panic happens
phaseName := "init"
defer func() {
if phaseName != "" {
err := recover()
stack := make([]byte, 16384)
n := runtime.Stack(stack, false)
stack = stack[:n]
f.Fatalf("panic during %s while compiling %s:\n\n%v\n\n%s\n", phaseName, f.Name, err, stack)
}
}()
// Run all the passes
printFunc(f)
f.Config.HTML.WriteFunc("start", f)
if BuildDump != "" && BuildDump == f.Name {
f.dumpFile("build")
}
if checkEnabled {
checkFunc(f)
}
const logMemStats = false
for _, p := range passes {
if !f.Config.optimize && !p.required || p.disabled {
continue
}
f.pass = &p
phaseName = p.name
if f.Log() {
f.Logf(" pass %s begin\n", p.name)
}
// TODO: capture logging during this pass, add it to the HTML
var mStart runtime.MemStats
if logMemStats || p.mem {
runtime.ReadMemStats(&mStart)
}
tStart := time.Now()
p.fn(f)
tEnd := time.Now()
// Need something less crude than "Log the whole intermediate result".
if f.Log() || f.Config.HTML != nil {
time := tEnd.Sub(tStart).Nanoseconds()
var stats string
if logMemStats {
var mEnd runtime.MemStats
runtime.ReadMemStats(&mEnd)
nBytes := mEnd.TotalAlloc - mStart.TotalAlloc
nAllocs := mEnd.Mallocs - mStart.Mallocs
stats = fmt.Sprintf("[%d ns %d allocs %d bytes]", time, nAllocs, nBytes)
} else {
stats = fmt.Sprintf("[%d ns]", time)
}
f.Logf(" pass %s end %s\n", p.name, stats)
printFunc(f)
f.Config.HTML.WriteFunc(fmt.Sprintf("after %s <span class=\"stats\">%s</span>", phaseName, stats), f)
}
if p.time || p.mem {
// Surround timing information w/ enough context to allow comparisons.
time := tEnd.Sub(tStart).Nanoseconds()
if p.time {
cmd/compile: use sparse algorithm for phis in large program This adds a sparse method for locating nearest ancestors in a dominator tree, and checks blocks with more than one predecessor for differences and inserts phi functions where there are. Uses reversed post order to cut number of passes, running it from first def to last use ("last use" for paramout and mem is end-of-program; last use for a phi input from a backedge is the source of the back edge) Includes a cutover from old algorithm to new to avoid paying large constant factor for small programs. This keeps normal builds running at about the same time, while not running over-long on large machine-generated inputs. Add "phase" flags for ssa/build -- ssa/build/stats prints number of blocks, values (before and after linking references and inserting phis, so expansion can be measured), and their product; the product governs the cutover, where a good value seems to be somewhere between 1 and 5 million. Among the files compiled by make.bash, this is the shape of the tail of the distribution for #blocks, #vars, and their product: #blocks #vars product max 6171 28180 173,898,780 99.9% 1641 6548 10,401,878 99% 463 1909 873,721 95% 152 639 95,235 90% 84 359 30,021 The old algorithm is indeed usually fastest, for 99%ile values of usually. The fix to LookupVarOutgoing ( https://go-review.googlesource.com/#/c/22790/ ) deals with some of the same problems addressed by this CL, but on at least one bug ( #15537 ) this change is still a significant help. With this CL: /tmp/gopath$ rm -rf pkg bin /tmp/gopath$ time go get -v -gcflags -memprofile=y.mprof \ github.com/gogo/protobuf/test/theproto3/combos/... ... real 4m35.200s user 13m16.644s sys 0m36.712s and pprof reports 3.4GB allocated in one of the larger profiles With tip: /tmp/gopath$ rm -rf pkg bin /tmp/gopath$ time go get -v -gcflags -memprofile=y.mprof \ github.com/gogo/protobuf/test/theproto3/combos/... ... real 10m36.569s user 25m52.286s sys 4m3.696s and pprof reports 8.3GB allocated in the same larger profile With this CL, most of the compilation time on the benchmarked input is spent in register/stack allocation (cumulative 53%) and in the sparse lookup algorithm itself (cumulative 20%). Fixes #15537. Change-Id: Ia0299dda6a291534d8b08e5f9883216ded677a00 Reviewed-on: https://go-review.googlesource.com/22342 Reviewed-by: Keith Randall <khr@golang.org> Run-TryBot: David Chase <drchase@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
2016-04-21 13:24:58 -04:00
f.LogStat("TIME(ns)", time)
}
if p.mem {
var mEnd runtime.MemStats
runtime.ReadMemStats(&mEnd)
nBytes := mEnd.TotalAlloc - mStart.TotalAlloc
nAllocs := mEnd.Mallocs - mStart.Mallocs
cmd/compile: use sparse algorithm for phis in large program This adds a sparse method for locating nearest ancestors in a dominator tree, and checks blocks with more than one predecessor for differences and inserts phi functions where there are. Uses reversed post order to cut number of passes, running it from first def to last use ("last use" for paramout and mem is end-of-program; last use for a phi input from a backedge is the source of the back edge) Includes a cutover from old algorithm to new to avoid paying large constant factor for small programs. This keeps normal builds running at about the same time, while not running over-long on large machine-generated inputs. Add "phase" flags for ssa/build -- ssa/build/stats prints number of blocks, values (before and after linking references and inserting phis, so expansion can be measured), and their product; the product governs the cutover, where a good value seems to be somewhere between 1 and 5 million. Among the files compiled by make.bash, this is the shape of the tail of the distribution for #blocks, #vars, and their product: #blocks #vars product max 6171 28180 173,898,780 99.9% 1641 6548 10,401,878 99% 463 1909 873,721 95% 152 639 95,235 90% 84 359 30,021 The old algorithm is indeed usually fastest, for 99%ile values of usually. The fix to LookupVarOutgoing ( https://go-review.googlesource.com/#/c/22790/ ) deals with some of the same problems addressed by this CL, but on at least one bug ( #15537 ) this change is still a significant help. With this CL: /tmp/gopath$ rm -rf pkg bin /tmp/gopath$ time go get -v -gcflags -memprofile=y.mprof \ github.com/gogo/protobuf/test/theproto3/combos/... ... real 4m35.200s user 13m16.644s sys 0m36.712s and pprof reports 3.4GB allocated in one of the larger profiles With tip: /tmp/gopath$ rm -rf pkg bin /tmp/gopath$ time go get -v -gcflags -memprofile=y.mprof \ github.com/gogo/protobuf/test/theproto3/combos/... ... real 10m36.569s user 25m52.286s sys 4m3.696s and pprof reports 8.3GB allocated in the same larger profile With this CL, most of the compilation time on the benchmarked input is spent in register/stack allocation (cumulative 53%) and in the sparse lookup algorithm itself (cumulative 20%). Fixes #15537. Change-Id: Ia0299dda6a291534d8b08e5f9883216ded677a00 Reviewed-on: https://go-review.googlesource.com/22342 Reviewed-by: Keith Randall <khr@golang.org> Run-TryBot: David Chase <drchase@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
2016-04-21 13:24:58 -04:00
f.LogStat("TIME(ns):BYTES:ALLOCS", time, nBytes, nAllocs)
}
}
if p.dump != nil && p.dump[f.Name] {
// Dump function to appropriately named file
f.dumpFile(phaseName)
}
if checkEnabled {
checkFunc(f)
}
}
// Squash error printing defer
phaseName = ""
}
// TODO: should be a config field
var dumpFileSeq int
// dumpFile creates a file from the phase name and function name
// Dumping is done to files to avoid buffering huge strings before
// output.
func (f *Func) dumpFile(phaseName string) {
dumpFileSeq++
fname := fmt.Sprintf("%s__%s_%d.dump", phaseName, f.Name, dumpFileSeq)
fname = strings.Replace(fname, " ", "_", -1)
fname = strings.Replace(fname, "/", "_", -1)
fname = strings.Replace(fname, ":", "_", -1)
fi, err := os.Create(fname)
if err != nil {
f.Config.Warnl(0, "Unable to create after-phase dump file %s", fname)
return
}
p := stringFuncPrinter{w: fi}
fprintFunc(p, f)
fi.Close()
}
type pass struct {
name string
fn func(*Func)
required bool
disabled bool
time bool // report time to run pass
mem bool // report mem stats to run pass
stats int // pass reports own "stats" (e.g., branches removed)
debug int // pass performs some debugging. =1 should be in error-testing-friendly Warnl format.
test int // pass-specific ad-hoc option, perhaps useful in development
dump map[string]bool // dump if function name matches
}
func (p *pass) addDump(s string) {
if p.dump == nil {
p.dump = make(map[string]bool)
}
p.dump[s] = true
}
// Run consistency checker between each phase
var checkEnabled = false
// Debug output
var IntrinsicsDebug int
var IntrinsicsDisable bool
cmd/compile: use sparse algorithm for phis in large program This adds a sparse method for locating nearest ancestors in a dominator tree, and checks blocks with more than one predecessor for differences and inserts phi functions where there are. Uses reversed post order to cut number of passes, running it from first def to last use ("last use" for paramout and mem is end-of-program; last use for a phi input from a backedge is the source of the back edge) Includes a cutover from old algorithm to new to avoid paying large constant factor for small programs. This keeps normal builds running at about the same time, while not running over-long on large machine-generated inputs. Add "phase" flags for ssa/build -- ssa/build/stats prints number of blocks, values (before and after linking references and inserting phis, so expansion can be measured), and their product; the product governs the cutover, where a good value seems to be somewhere between 1 and 5 million. Among the files compiled by make.bash, this is the shape of the tail of the distribution for #blocks, #vars, and their product: #blocks #vars product max 6171 28180 173,898,780 99.9% 1641 6548 10,401,878 99% 463 1909 873,721 95% 152 639 95,235 90% 84 359 30,021 The old algorithm is indeed usually fastest, for 99%ile values of usually. The fix to LookupVarOutgoing ( https://go-review.googlesource.com/#/c/22790/ ) deals with some of the same problems addressed by this CL, but on at least one bug ( #15537 ) this change is still a significant help. With this CL: /tmp/gopath$ rm -rf pkg bin /tmp/gopath$ time go get -v -gcflags -memprofile=y.mprof \ github.com/gogo/protobuf/test/theproto3/combos/... ... real 4m35.200s user 13m16.644s sys 0m36.712s and pprof reports 3.4GB allocated in one of the larger profiles With tip: /tmp/gopath$ rm -rf pkg bin /tmp/gopath$ time go get -v -gcflags -memprofile=y.mprof \ github.com/gogo/protobuf/test/theproto3/combos/... ... real 10m36.569s user 25m52.286s sys 4m3.696s and pprof reports 8.3GB allocated in the same larger profile With this CL, most of the compilation time on the benchmarked input is spent in register/stack allocation (cumulative 53%) and in the sparse lookup algorithm itself (cumulative 20%). Fixes #15537. Change-Id: Ia0299dda6a291534d8b08e5f9883216ded677a00 Reviewed-on: https://go-review.googlesource.com/22342 Reviewed-by: Keith Randall <khr@golang.org> Run-TryBot: David Chase <drchase@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
2016-04-21 13:24:58 -04:00
var BuildDebug int
var BuildTest int
var BuildStats int
var BuildDump string // name of function to dump after initial build of ssa
cmd/compile: use sparse algorithm for phis in large program This adds a sparse method for locating nearest ancestors in a dominator tree, and checks blocks with more than one predecessor for differences and inserts phi functions where there are. Uses reversed post order to cut number of passes, running it from first def to last use ("last use" for paramout and mem is end-of-program; last use for a phi input from a backedge is the source of the back edge) Includes a cutover from old algorithm to new to avoid paying large constant factor for small programs. This keeps normal builds running at about the same time, while not running over-long on large machine-generated inputs. Add "phase" flags for ssa/build -- ssa/build/stats prints number of blocks, values (before and after linking references and inserting phis, so expansion can be measured), and their product; the product governs the cutover, where a good value seems to be somewhere between 1 and 5 million. Among the files compiled by make.bash, this is the shape of the tail of the distribution for #blocks, #vars, and their product: #blocks #vars product max 6171 28180 173,898,780 99.9% 1641 6548 10,401,878 99% 463 1909 873,721 95% 152 639 95,235 90% 84 359 30,021 The old algorithm is indeed usually fastest, for 99%ile values of usually. The fix to LookupVarOutgoing ( https://go-review.googlesource.com/#/c/22790/ ) deals with some of the same problems addressed by this CL, but on at least one bug ( #15537 ) this change is still a significant help. With this CL: /tmp/gopath$ rm -rf pkg bin /tmp/gopath$ time go get -v -gcflags -memprofile=y.mprof \ github.com/gogo/protobuf/test/theproto3/combos/... ... real 4m35.200s user 13m16.644s sys 0m36.712s and pprof reports 3.4GB allocated in one of the larger profiles With tip: /tmp/gopath$ rm -rf pkg bin /tmp/gopath$ time go get -v -gcflags -memprofile=y.mprof \ github.com/gogo/protobuf/test/theproto3/combos/... ... real 10m36.569s user 25m52.286s sys 4m3.696s and pprof reports 8.3GB allocated in the same larger profile With this CL, most of the compilation time on the benchmarked input is spent in register/stack allocation (cumulative 53%) and in the sparse lookup algorithm itself (cumulative 20%). Fixes #15537. Change-Id: Ia0299dda6a291534d8b08e5f9883216ded677a00 Reviewed-on: https://go-review.googlesource.com/22342 Reviewed-by: Keith Randall <khr@golang.org> Run-TryBot: David Chase <drchase@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
2016-04-21 13:24:58 -04:00
// PhaseOption sets the specified flag in the specified ssa phase,
// returning empty string if this was successful or a string explaining
// the error if it was not.
// A version of the phase name with "_" replaced by " " is also checked for a match.
// If the phase name begins a '~' then the rest of the underscores-replaced-with-blanks
// version is used as a regular expression to match the phase name(s).
//
// Special cases that have turned out to be useful:
// ssa/check/on enables checking after each phase
// ssa/all/time enables time reporting for all phases
//
// See gc/lex.go for dissection of the option string.
// Example uses:
//
// GO_GCFLAGS=-d=ssa/generic_cse/time,ssa/generic_cse/stats,ssa/generic_cse/debug=3 ./make.bash
//
// BOOT_GO_GCFLAGS=-d='ssa/~^.*scc$/off' GO_GCFLAGS='-d=ssa/~^.*scc$/off' ./make.bash
//
func PhaseOption(phase, flag string, val int, valString string) string {
if phase == "help" {
lastcr := 0
phasenames := "check, all, build, intrinsics"
for _, p := range passes {
pn := strings.Replace(p.name, " ", "_", -1)
if len(pn)+len(phasenames)-lastcr > 70 {
phasenames += "\n"
lastcr = len(phasenames)
phasenames += pn
} else {
phasenames += ", " + pn
}
}
return "" +
`GcFlag -d=ssa/<phase>/<flag>[=<value>]|[:<function_name>]
<phase> is one of:
` + phasenames + `
<flag> is one of on, off, debug, mem, time, test, stats, dump
<value> defaults to 1
<function_name> is required for "dump", specifies name of function to dump after <phase>
Except for dump, output is directed to standard out; dump appears in a file.
Phase "all" supports flags "time", "mem", and "dump".
Phases "intrinsics" supports flags "on", "off", and "debug".
Interpretation of the "debug" value depends on the phase.
Dump files are named <phase>__<function_name>_<seq>.dump.
`
}
if phase == "check" && flag == "on" {
checkEnabled = val != 0
return ""
}
if phase == "check" && flag == "off" {
checkEnabled = val == 0
return ""
}
alltime := false
allmem := false
alldump := false
if phase == "all" {
if flag == "time" {
alltime = val != 0
} else if flag == "mem" {
allmem = val != 0
} else if flag == "dump" {
alldump = val != 0
if alldump {
BuildDump = valString
}
} else {
return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option", flag, phase)
}
}
if phase == "intrinsics" {
switch flag {
case "on":
IntrinsicsDisable = val == 0
case "off":
IntrinsicsDisable = val != 0
case "debug":
IntrinsicsDebug = val
default:
return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option", flag, phase)
}
return ""
}
cmd/compile: use sparse algorithm for phis in large program This adds a sparse method for locating nearest ancestors in a dominator tree, and checks blocks with more than one predecessor for differences and inserts phi functions where there are. Uses reversed post order to cut number of passes, running it from first def to last use ("last use" for paramout and mem is end-of-program; last use for a phi input from a backedge is the source of the back edge) Includes a cutover from old algorithm to new to avoid paying large constant factor for small programs. This keeps normal builds running at about the same time, while not running over-long on large machine-generated inputs. Add "phase" flags for ssa/build -- ssa/build/stats prints number of blocks, values (before and after linking references and inserting phis, so expansion can be measured), and their product; the product governs the cutover, where a good value seems to be somewhere between 1 and 5 million. Among the files compiled by make.bash, this is the shape of the tail of the distribution for #blocks, #vars, and their product: #blocks #vars product max 6171 28180 173,898,780 99.9% 1641 6548 10,401,878 99% 463 1909 873,721 95% 152 639 95,235 90% 84 359 30,021 The old algorithm is indeed usually fastest, for 99%ile values of usually. The fix to LookupVarOutgoing ( https://go-review.googlesource.com/#/c/22790/ ) deals with some of the same problems addressed by this CL, but on at least one bug ( #15537 ) this change is still a significant help. With this CL: /tmp/gopath$ rm -rf pkg bin /tmp/gopath$ time go get -v -gcflags -memprofile=y.mprof \ github.com/gogo/protobuf/test/theproto3/combos/... ... real 4m35.200s user 13m16.644s sys 0m36.712s and pprof reports 3.4GB allocated in one of the larger profiles With tip: /tmp/gopath$ rm -rf pkg bin /tmp/gopath$ time go get -v -gcflags -memprofile=y.mprof \ github.com/gogo/protobuf/test/theproto3/combos/... ... real 10m36.569s user 25m52.286s sys 4m3.696s and pprof reports 8.3GB allocated in the same larger profile With this CL, most of the compilation time on the benchmarked input is spent in register/stack allocation (cumulative 53%) and in the sparse lookup algorithm itself (cumulative 20%). Fixes #15537. Change-Id: Ia0299dda6a291534d8b08e5f9883216ded677a00 Reviewed-on: https://go-review.googlesource.com/22342 Reviewed-by: Keith Randall <khr@golang.org> Run-TryBot: David Chase <drchase@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
2016-04-21 13:24:58 -04:00
if phase == "build" {
switch flag {
case "debug":
BuildDebug = val
case "test":
BuildTest = val
case "stats":
BuildStats = val
case "dump":
BuildDump = valString
cmd/compile: use sparse algorithm for phis in large program This adds a sparse method for locating nearest ancestors in a dominator tree, and checks blocks with more than one predecessor for differences and inserts phi functions where there are. Uses reversed post order to cut number of passes, running it from first def to last use ("last use" for paramout and mem is end-of-program; last use for a phi input from a backedge is the source of the back edge) Includes a cutover from old algorithm to new to avoid paying large constant factor for small programs. This keeps normal builds running at about the same time, while not running over-long on large machine-generated inputs. Add "phase" flags for ssa/build -- ssa/build/stats prints number of blocks, values (before and after linking references and inserting phis, so expansion can be measured), and their product; the product governs the cutover, where a good value seems to be somewhere between 1 and 5 million. Among the files compiled by make.bash, this is the shape of the tail of the distribution for #blocks, #vars, and their product: #blocks #vars product max 6171 28180 173,898,780 99.9% 1641 6548 10,401,878 99% 463 1909 873,721 95% 152 639 95,235 90% 84 359 30,021 The old algorithm is indeed usually fastest, for 99%ile values of usually. The fix to LookupVarOutgoing ( https://go-review.googlesource.com/#/c/22790/ ) deals with some of the same problems addressed by this CL, but on at least one bug ( #15537 ) this change is still a significant help. With this CL: /tmp/gopath$ rm -rf pkg bin /tmp/gopath$ time go get -v -gcflags -memprofile=y.mprof \ github.com/gogo/protobuf/test/theproto3/combos/... ... real 4m35.200s user 13m16.644s sys 0m36.712s and pprof reports 3.4GB allocated in one of the larger profiles With tip: /tmp/gopath$ rm -rf pkg bin /tmp/gopath$ time go get -v -gcflags -memprofile=y.mprof \ github.com/gogo/protobuf/test/theproto3/combos/... ... real 10m36.569s user 25m52.286s sys 4m3.696s and pprof reports 8.3GB allocated in the same larger profile With this CL, most of the compilation time on the benchmarked input is spent in register/stack allocation (cumulative 53%) and in the sparse lookup algorithm itself (cumulative 20%). Fixes #15537. Change-Id: Ia0299dda6a291534d8b08e5f9883216ded677a00 Reviewed-on: https://go-review.googlesource.com/22342 Reviewed-by: Keith Randall <khr@golang.org> Run-TryBot: David Chase <drchase@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
2016-04-21 13:24:58 -04:00
default:
return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option", flag, phase)
}
return ""
}
underphase := strings.Replace(phase, "_", " ", -1)
var re *regexp.Regexp
if phase[0] == '~' {
r, ok := regexp.Compile(underphase[1:])
if ok != nil {
return fmt.Sprintf("Error %s in regexp for phase %s, flag %s", ok.Error(), phase, flag)
}
re = r
}
matchedOne := false
for i, p := range passes {
if phase == "all" {
p.time = alltime
p.mem = allmem
if alldump {
p.addDump(valString)
}
passes[i] = p
matchedOne = true
} else if p.name == phase || p.name == underphase || re != nil && re.MatchString(p.name) {
switch flag {
case "on":
p.disabled = val == 0
case "off":
p.disabled = val != 0
case "time":
p.time = val != 0
case "mem":
p.mem = val != 0
case "debug":
p.debug = val
case "stats":
p.stats = val
case "test":
p.test = val
case "dump":
p.addDump(valString)
default:
return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option", flag, phase)
}
if p.disabled && p.required {
return fmt.Sprintf("Cannot disable required SSA phase %s using -d=ssa/%s debug option", phase, phase)
}
passes[i] = p
matchedOne = true
}
}
if matchedOne {
return ""
}
return fmt.Sprintf("Did not find a phase matching %s in -d=ssa/... debug option", phase)
}
// list of passes for the compiler
var passes = [...]pass{
// TODO: combine phielim and copyelim into a single pass?
{name: "early phielim", fn: phielim},
{name: "early copyelim", fn: copyelim},
{name: "early deadcode", fn: deadcode}, // remove generated dead code to avoid doing pointless work during opt
{name: "short circuit", fn: shortcircuit},
{name: "decompose user", fn: decomposeUser, required: true},
{name: "opt", fn: opt, required: true}, // TODO: split required rules and optimizing rules
{name: "zero arg cse", fn: zcse, required: true}, // required to merge OpSB values
{name: "opt deadcode", fn: deadcode, required: true}, // remove any blocks orphaned during opt
{name: "generic cse", fn: cse},
{name: "phiopt", fn: phiopt},
{name: "nilcheckelim", fn: nilcheckelim},
{name: "prove", fn: prove},
cmd/compile/internal/ssa: BCE for induction variables There are 5293 loop in the main go repository. A survey of the top most common for loops: 18 for __k__ := 0; i < len(sa.Addr); i++ { 19 for __k__ := 0; ; i++ { 19 for __k__ := 0; i < 16; i++ { 25 for __k__ := 0; i < length; i++ { 30 for __k__ := 0; i < 8; i++ { 49 for __k__ := 0; i < len(s); i++ { 67 for __k__ := 0; i < n; i++ { 376 for __k__ := range __slice__ { 685 for __k__, __v__ := range __slice__ { 2074 for __, __v__ := range __slice__ { The algorithm to find induction variables handles all cases with an upper limit. It currently doesn't find related induction variables such as c * ind or c + ind. 842 out of 22954 bound checks are removed for src/make.bash. 1957 out of 42952 bounds checks are removed for src/all.bash. Things to do in follow-up CLs: * Find the associated pointer for `for _, v := range a {}` * Drop the NilChecks on the pointer. * Replace the implicit induction variable by a loop over the pointer Generated garbage can be reduced if we share the sdom between passes. % benchstat old.txt new.txt name old time/op new time/op delta Template 337ms ± 3% 333ms ± 3% ~ (p=0.258 n=9+9) GoTypes 1.11s ± 2% 1.10s ± 2% ~ (p=0.912 n=10+10) Compiler 5.25s ± 1% 5.29s ± 2% ~ (p=0.077 n=9+9) MakeBash 33.5s ± 1% 34.1s ± 2% +1.85% (p=0.011 n=9+9) name old alloc/op new alloc/op delta Template 63.6MB ± 0% 63.9MB ± 0% +0.52% (p=0.000 n=10+9) GoTypes 218MB ± 0% 219MB ± 0% +0.59% (p=0.000 n=10+9) Compiler 978MB ± 0% 985MB ± 0% +0.69% (p=0.000 n=10+10) name old allocs/op new allocs/op delta Template 582k ± 0% 583k ± 0% +0.10% (p=0.000 n=10+10) GoTypes 1.78M ± 0% 1.78M ± 0% +0.12% (p=0.000 n=10+10) Compiler 7.68M ± 0% 7.69M ± 0% +0.05% (p=0.000 n=10+10) name old text-bytes new text-bytes delta HelloSize 581k ± 0% 581k ± 0% -0.08% (p=0.000 n=10+10) CmdGoSize 6.40M ± 0% 6.39M ± 0% -0.08% (p=0.000 n=10+10) name old data-bytes new data-bytes delta HelloSize 3.66k ± 0% 3.66k ± 0% ~ (all samples are equal) CmdGoSize 134k ± 0% 134k ± 0% ~ (all samples are equal) name old bss-bytes new bss-bytes delta HelloSize 126k ± 0% 126k ± 0% ~ (all samples are equal) CmdGoSize 149k ± 0% 149k ± 0% ~ (all samples are equal) name old exe-bytes new exe-bytes delta HelloSize 947k ± 0% 946k ± 0% -0.01% (p=0.000 n=10+10) CmdGoSize 9.92M ± 0% 9.91M ± 0% -0.06% (p=0.000 n=10+10) Change-Id: Ie74bdff46fd602db41bb457333d3a762a0c3dc4d Reviewed-on: https://go-review.googlesource.com/20517 Reviewed-by: David Chase <drchase@google.com> Run-TryBot: Alexandru Moșoi <alexandru@mosoi.ro>
2016-03-02 12:58:27 +01:00
{name: "loopbce", fn: loopbce},
{name: "decompose builtin", fn: decomposeBuiltIn, required: true},
{name: "dec", fn: dec, required: true},
{name: "late opt", fn: opt, required: true}, // TODO: split required rules and optimizing rules
{name: "generic deadcode", fn: deadcode},
{name: "check bce", fn: checkbce},
{name: "writebarrier", fn: writebarrier, required: true}, // expand write barrier ops
{name: "fuse", fn: fuse},
{name: "dse", fn: dse},
{name: "tighten", fn: tighten}, // move values closer to their uses
{name: "lower", fn: lower, required: true},
{name: "lowered cse", fn: cse},
{name: "lowered deadcode", fn: deadcode, required: true},
{name: "checkLower", fn: checkLower, required: true},
{name: "late phielim", fn: phielim},
{name: "late copyelim", fn: copyelim},
cmd/compile: move phi args which are constants closer to the phi entry: x = MOVQconst [7] ... b1: goto b2 b2: v = Phi(x, y, z) Transform that program to: entry: ... b1: x = MOVQconst [7] goto b2 b2: v = Phi(x, y, z) This CL moves constant-generating instructions used by a phi to the appropriate immediate predecessor of the phi's block. We used to put all constants in the entry block. Unfortunately, in large functions we have lots of constants at the start of the function, all of which are used by lots of phis throughout the function. This leads to the constants being live through most of the function (especially if there is an outer loop). That's an O(n^2) problem. Note that most of the non-phi uses of constants have already been folded into instructions (ADDQconst, MOVQstoreconst, etc.). This CL may be generally useful for other instances of compiler slowness, I'll have to check. It may cause some programs to run slower, but probably not by much, as rematerializeable values like these constants are allocated late (not at their originally scheduled location) anyway. This CL is definitely a minimal change that can be considered for 1.7. We probably want to do a better job in the tighten pass generally, not just for phi args. Leaving that for 1.8. Update #16407 Change-Id: If112a8883b4ef172b2f37dea13e44bda9346c342 Reviewed-on: https://go-review.googlesource.com/25046 Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2016-07-18 23:06:04 -07:00
{name: "phi tighten", fn: phiTighten},
{name: "late deadcode", fn: deadcode},
[dev.ssa] cmd/compile: adjust branch likeliness for calls/loops Static branch predictions (which guide block ordering) are adjusted based on: loop/not-loop (favor looping) abnormal-exit/not (avoid panic) call/not-call (avoid call) ret/default (treat returns as rare) This appears to make no difference in performance of real code, meaning the compiler itself. The earlier version of this has been stripped down to help make the cost of this only-aesthetic-on-Intel phase be as cheap as possible (we probably want information about inner loops for improving register allocation, but because register allocation follows close behind this pass, conceivably the information could be reused -- so we might do this anyway just to normalize output). For a ./make.bash that takes 200 user seconds, about .75 second is reported in likelyadjust (summing nanoseconds reported with -d=ssa/likelyadjust/time ). Upstream predictions are respected. Includes test, limited to build on amd64 only. Did several iterations on the debugging output to allow some rough checks on behavior. Debug=1 logging notes agree/disagree with earlier passes, allowing analysis like the following: Run on make.bash: GO_GCFLAGS=-d=ssa/likelyadjust/debug \ ./make.bash >& lkly5.log grep 'ranch prediction' lkly5.log | wc -l 78242 // 78k predictions grep 'ranch predi' lkly5.log | egrep -v 'agrees with' | wc -l 29633 // 29k NEW predictions grep 'disagrees' lkly5.log | wc -l 444 // contradicted 444 times grep '< exit' lkly5.log | wc -l 10212 // 10k exit predictions grep '< exit' lkly5.log | egrep 'disagrees' | wc -l 5 // 5 contradicted by previous prediction grep '< exit' lkly5.log | egrep -v 'agrees' | wc -l 702 // 702-5 redundant with previous prediction grep '< call' lkly5.log | egrep -v 'agrees' | wc -l 16699 // 16k new call predictions grep 'stay in loop' lkly5.log | egrep -v 'agrees' | wc -l 3951 // 4k new "remain in loop" predictions Fixes #11451. Change-Id: Iafb0504f7030d304ef4b6dc1aba9a5789151a593 Reviewed-on: https://go-review.googlesource.com/19995 Run-TryBot: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2016-02-27 11:54:52 -05:00
{name: "critical", fn: critical, required: true}, // remove critical edges
{name: "likelyadjust", fn: likelyadjust},
{name: "layout", fn: layout, required: true}, // schedule blocks
{name: "schedule", fn: schedule, required: true}, // schedule values
{name: "late nilcheck", fn: nilcheckelim2},
{name: "flagalloc", fn: flagalloc, required: true}, // allocate flags register
{name: "regalloc", fn: regalloc, required: true}, // allocate int & float registers + stack slots
{name: "stackframe", fn: stackframe, required: true},
{name: "trim", fn: trim}, // remove empty blocks
}
// Double-check phase ordering constraints.
// This code is intended to document the ordering requirements
// between different phases. It does not override the passes
// list above.
type constraint struct {
a, b string // a must come before b
}
var passOrder = [...]constraint{
// prove reliese on common-subexpression elimination for maximum benefits.
{"generic cse", "prove"},
// deadcode after prove to eliminate all new dead blocks.
{"prove", "generic deadcode"},
// common-subexpression before dead-store elim, so that we recognize
// when two address expressions are the same.
{"generic cse", "dse"},
// cse substantially improves nilcheckelim efficacy
{"generic cse", "nilcheckelim"},
// allow deadcode to clean up after nilcheckelim
{"nilcheckelim", "generic deadcode"},
// nilcheckelim generates sequences of plain basic blocks
{"nilcheckelim", "fuse"},
// nilcheckelim relies on opt to rewrite user nil checks
{"opt", "nilcheckelim"},
[dev.ssa] cmd/compile: add SSA pass to move values closer to uses Even this very simple, restricted initial implementation helps. While running make.bash, it moves 84437 values to new, closer homes. As a concrete example: func f_ssa(i, j int, b bool) int { if !b { return 0 } return i + j } It cuts off one stack slot and two instructions: Before: "".f_ssa t=1 size=96 value=0 args=0x20 locals=0x18 0x0000 00000 (x.go:3) TEXT "".f_ssa(SB), $24-32 0x0000 00000 (x.go:3) SUBQ $24, SP 0x0004 00004 (x.go:3) FUNCDATA $0, "".gcargs·0(SB) 0x0004 00004 (x.go:3) FUNCDATA $1, "".gclocals·1(SB) 0x0004 00004 (x.go:5) MOVQ $0, AX 0x0006 00006 (x.go:3) MOVQ 32(SP), CX 0x000b 00011 (x.go:3) MOVQ 40(SP), DX 0x0010 00016 (x.go:3) LEAQ 48(SP), BX 0x0015 00021 (x.go:3) MOVB (BX), BPB 0x0018 00024 (x.go:3) MOVQ $0, SI 0x001a 00026 (x.go:3) MOVQ SI, 56(SP) 0x001f 00031 (x.go:3) TESTB BPB, BPB 0x0022 00034 (x.go:5) MOVQ AX, (SP) 0x0026 00038 (x.go:3) MOVQ CX, 8(SP) 0x002b 00043 (x.go:3) MOVQ DX, 16(SP) 0x0030 00048 (x.go:4) JEQ 74 0x0032 00050 (x.go:3) MOVQ 8(SP), AX 0x0037 00055 (x.go:3) MOVQ 16(SP), CX 0x003c 00060 (x.go:7) LEAQ (AX)(CX*1), DX 0x0040 00064 (x.go:7) MOVQ DX, 56(SP) 0x0045 00069 (x.go:3) ADDQ $24, SP 0x0049 00073 (x.go:3) RET 0x004a 00074 (x.go:5) MOVQ (SP), AX 0x004e 00078 (x.go:5) MOVQ AX, 56(SP) 0x0053 00083 (x.go:3) JMP 69 After: "".f_ssa t=1 size=80 value=0 args=0x20 locals=0x10 0x0000 00000 (x.go:3) TEXT "".f_ssa(SB), $16-32 0x0000 00000 (x.go:3) SUBQ $16, SP 0x0004 00004 (x.go:3) FUNCDATA $0, "".gcargs·0(SB) 0x0004 00004 (x.go:3) FUNCDATA $1, "".gclocals·1(SB) 0x0004 00004 (x.go:3) MOVQ 32(SP), AX 0x0009 00009 (x.go:3) MOVQ 24(SP), CX 0x000e 00014 (x.go:3) LEAQ 40(SP), DX 0x0013 00019 (x.go:3) MOVB (DX), BL 0x0015 00021 (x.go:3) MOVQ $0, BP 0x0017 00023 (x.go:3) MOVQ BP, 48(SP) 0x001c 00028 (x.go:3) TESTB BL, BL 0x001e 00030 (x.go:3) MOVQ AX, (SP) 0x0022 00034 (x.go:3) MOVQ CX, 8(SP) 0x0027 00039 (x.go:4) JEQ 64 0x0029 00041 (x.go:3) MOVQ 8(SP), AX 0x002e 00046 (x.go:3) MOVQ (SP), CX 0x0032 00050 (x.go:7) LEAQ (AX)(CX*1), DX 0x0036 00054 (x.go:7) MOVQ DX, 48(SP) 0x003b 00059 (x.go:3) ADDQ $16, SP 0x003f 00063 (x.go:3) RET 0x0040 00064 (x.go:5) MOVQ $0, AX 0x0042 00066 (x.go:5) MOVQ AX, 48(SP) 0x0047 00071 (x.go:3) JMP 59 Of course, the old backend is still well ahead: "".f_ssa t=1 size=48 value=0 args=0x20 locals=0x0 0x0000 00000 (x.go:3) TEXT "".f_ssa(SB), $0-32 0x0000 00000 (x.go:3) NOP 0x0000 00000 (x.go:3) NOP 0x0000 00000 (x.go:3) FUNCDATA $0, gclocals·a8eabfc4a4514ed6b3b0c61e9680e440(SB) 0x0000 00000 (x.go:3) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (x.go:4) CMPB "".b+24(FP), $0 0x0005 00005 (x.go:4) JNE 17 0x0007 00007 (x.go:5) MOVQ $0, "".~r3+32(FP) 0x0010 00016 (x.go:5) RET 0x0011 00017 (x.go:7) MOVQ "".i+8(FP), BX 0x0016 00022 (x.go:7) MOVQ "".j+16(FP), BP 0x001b 00027 (x.go:7) ADDQ BP, BX 0x001e 00030 (x.go:7) MOVQ BX, "".~r3+32(FP) 0x0023 00035 (x.go:7) RET Some regalloc improvements should help considerably. Change-Id: I95bb5dd83e56afd70ae4e983f1d32dffd0c3d46a Reviewed-on: https://go-review.googlesource.com/13142 Reviewed-by: Keith Randall <khr@golang.org>
2015-08-04 14:55:35 -07:00
// tighten should happen before lowering to avoid splitting naturally paired instructions such as CMP/SET
{"tighten", "lower"},
// tighten will be most effective when as many values have been removed as possible
{"generic deadcode", "tighten"},
{"generic cse", "tighten"},
// checkbce needs the values removed
{"generic deadcode", "check bce"},
// don't run optimization pass until we've decomposed builtin objects
{"decompose builtin", "late opt"},
// don't layout blocks until critical edges have been removed
{"critical", "layout"},
// regalloc requires the removal of all critical edges
{"critical", "regalloc"},
// regalloc requires all the values in a block to be scheduled
{"schedule", "regalloc"},
// checkLower must run after lowering & subsequent dead code elim
{"lower", "checkLower"},
{"lowered deadcode", "checkLower"},
// late nilcheck needs instructions to be scheduled.
{"schedule", "late nilcheck"},
// flagalloc needs instructions to be scheduled.
{"schedule", "flagalloc"},
// regalloc needs flags to be allocated first.
{"flagalloc", "regalloc"},
// stackframe needs to know about spilled registers.
{"regalloc", "stackframe"},
// trim needs regalloc to be done first.
{"regalloc", "trim"},
}
func init() {
for _, c := range passOrder {
a, b := c.a, c.b
i := -1
j := -1
for k, p := range passes {
if p.name == a {
i = k
}
if p.name == b {
j = k
}
}
if i < 0 {
log.Panicf("pass %s not found", a)
}
if j < 0 {
log.Panicf("pass %s not found", b)
}
if i >= j {
log.Panicf("passes %s and %s out of order", a, b)
}
}
}