2015-02-13 14:40:36 -05:00
|
|
|
// Copyright 2011 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
//
|
|
|
|
// The inlining facility makes 2 passes: first caninl determines which
|
|
|
|
// functions are suitable for inlining, and for those that are it
|
|
|
|
// saves a copy of the body. Then inlcalls walks each function body to
|
|
|
|
// expand calls to inlinable functions.
|
|
|
|
//
|
2016-02-06 20:35:29 +09:00
|
|
|
// The debug['l'] flag controls the aggressiveness. Note that main() swaps level 0 and 1,
|
2017-10-20 15:20:56 -07:00
|
|
|
// making 1 the default and -l disable. Additional levels (beyond -l) may be buggy and
|
|
|
|
// are not supported.
|
2015-02-13 14:40:36 -05:00
|
|
|
// 0: disabled
|
2018-04-27 12:13:17 -04:00
|
|
|
// 1: 80-nodes leaf functions, oneliners, panic, lazy typechecking (default)
|
2017-10-20 15:20:56 -07:00
|
|
|
// 2: (unassigned)
|
2018-03-13 12:59:41 -07:00
|
|
|
// 3: (unassigned)
|
2017-10-20 15:20:56 -07:00
|
|
|
// 4: allow non-leaf functions
|
2015-02-13 14:40:36 -05:00
|
|
|
//
|
2017-10-20 15:20:56 -07:00
|
|
|
// At some point this may get another default and become switch-offable with -N.
|
2015-02-13 14:40:36 -05:00
|
|
|
//
|
2017-10-20 15:20:56 -07:00
|
|
|
// The -d typcheckinl flag enables early typechecking of all imported bodies,
|
|
|
|
// which is useful to flush out bugs.
|
|
|
|
//
|
|
|
|
// The debug['m'] flag enables diagnostic output. a single -m is useful for verifying
|
|
|
|
// which calls get inlined or not, more is for debugging, and may go away at any point.
|
2015-02-13 14:40:36 -05:00
|
|
|
|
|
|
|
package gc
|
|
|
|
|
2016-12-06 17:08:06 -08:00
|
|
|
import (
|
cmd/compile: factor out Pkg, Sym, and Type into package types
- created new package cmd/compile/internal/types
- moved Pkg, Sym, Type to new package
- to break cycles, for now we need the (ugly) types/utils.go
file which contains a handful of functions that must be installed
early by the gc frontend
- to break cycles, for now we need two functions to convert between
*gc.Node and *types.Node (the latter is a dummy type)
- adjusted the gc's code to use the new package and the conversion
functions as needed
- made several Pkg, Sym, and Type methods functions as needed
- renamed constructors typ, typPtr, typArray, etc. to types.New,
types.NewPtr, types.NewArray, etc.
Passes toolstash-check -all.
Change-Id: I8adfa5e85c731645d0a7fd2030375ed6ebf54b72
Reviewed-on: https://go-review.googlesource.com/39855
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2017-04-04 17:54:02 -07:00
|
|
|
"cmd/compile/internal/types"
|
2017-10-06 11:32:28 -04:00
|
|
|
"cmd/internal/obj"
|
2016-12-06 17:08:06 -08:00
|
|
|
"cmd/internal/src"
|
|
|
|
"fmt"
|
2017-10-06 11:32:28 -04:00
|
|
|
"strings"
|
2016-12-06 17:08:06 -08:00
|
|
|
)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2018-04-27 12:13:17 -04:00
|
|
|
// Inlining budget parameters, gathered in one place
|
|
|
|
const (
|
|
|
|
inlineMaxBudget = 80
|
|
|
|
inlineExtraAppendCost = 0
|
|
|
|
inlineExtraCallCost = inlineMaxBudget // default is do not inline, -l=4 enables by using 1 instead.
|
|
|
|
inlineExtraPanicCost = 1 // do not penalize inlining panics.
|
|
|
|
inlineExtraThrowCost = inlineMaxBudget // with current (2018-05/1.11) code, inlining runtime.throw does not help.
|
2018-07-23 13:09:48 -07:00
|
|
|
|
|
|
|
inlineBigFunctionNodes = 5000 // Functions with this many nodes are considered "big".
|
|
|
|
inlineBigFunctionMaxCost = 20 // Max cost of inlinee when inlining into a "big" function.
|
2018-04-27 12:13:17 -04:00
|
|
|
)
|
|
|
|
|
2016-03-01 23:21:55 +00:00
|
|
|
// Get the function's package. For ordinary functions it's on the ->sym, but for imported methods
|
2015-02-13 14:40:36 -05:00
|
|
|
// the ->sym can be re-used in the local package, so peel it off the receiver's type.
|
cmd/compile: factor out Pkg, Sym, and Type into package types
- created new package cmd/compile/internal/types
- moved Pkg, Sym, Type to new package
- to break cycles, for now we need the (ugly) types/utils.go
file which contains a handful of functions that must be installed
early by the gc frontend
- to break cycles, for now we need two functions to convert between
*gc.Node and *types.Node (the latter is a dummy type)
- adjusted the gc's code to use the new package and the conversion
functions as needed
- made several Pkg, Sym, and Type methods functions as needed
- renamed constructors typ, typPtr, typArray, etc. to types.New,
types.NewPtr, types.NewArray, etc.
Passes toolstash-check -all.
Change-Id: I8adfa5e85c731645d0a7fd2030375ed6ebf54b72
Reviewed-on: https://go-review.googlesource.com/39855
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2017-04-04 17:54:02 -07:00
|
|
|
func fnpkg(fn *Node) *types.Pkg {
|
2016-09-11 14:43:37 -07:00
|
|
|
if fn.IsMethod() {
|
2015-02-13 14:40:36 -05:00
|
|
|
// method
|
2016-03-09 20:54:59 -08:00
|
|
|
rcvr := fn.Type.Recv().Type
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2016-03-30 15:09:25 -07:00
|
|
|
if rcvr.IsPtr() {
|
2016-03-30 10:57:47 -07:00
|
|
|
rcvr = rcvr.Elem()
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2015-02-17 22:13:49 -05:00
|
|
|
if rcvr.Sym == nil {
|
2016-09-09 21:08:46 -07:00
|
|
|
Fatalf("receiver with no sym: [%v] %L (%v)", fn.Sym, fn, rcvr)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
return rcvr.Sym.Pkg
|
|
|
|
}
|
|
|
|
|
|
|
|
// non-method
|
|
|
|
return fn.Sym.Pkg
|
|
|
|
}
|
|
|
|
|
2016-03-01 23:21:55 +00:00
|
|
|
// Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck
|
2015-02-13 14:40:36 -05:00
|
|
|
// because they're a copy of an already checked body.
|
|
|
|
func typecheckinl(fn *Node) {
|
2016-03-02 12:49:37 -08:00
|
|
|
lno := setlineno(fn)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2018-04-01 01:55:55 -07:00
|
|
|
if flagiexport {
|
|
|
|
expandInline(fn)
|
|
|
|
}
|
|
|
|
|
2015-02-13 14:40:36 -05:00
|
|
|
// typecheckinl is only for imported functions;
|
|
|
|
// their bodies may refer to unsafe as long as the package
|
|
|
|
// was marked safe during import (which was checked then).
|
|
|
|
// the ->inl of a local function has been typechecked before caninl copied it.
|
2015-02-23 16:07:24 -05:00
|
|
|
pkg := fnpkg(fn)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
|
|
|
if pkg == localpkg || pkg == nil {
|
|
|
|
return // typecheckinl on local function
|
|
|
|
}
|
|
|
|
|
2016-03-18 17:21:32 -07:00
|
|
|
if Debug['m'] > 2 || Debug_export != 0 {
|
2018-04-04 15:53:27 -07:00
|
|
|
fmt.Printf("typecheck import [%v] %L { %#v }\n", fn.Sym, fn, asNodes(fn.Func.Inl.Body))
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
2015-02-23 16:07:24 -05:00
|
|
|
save_safemode := safemode
|
2016-04-13 18:37:18 -07:00
|
|
|
safemode = false
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2015-02-23 16:07:24 -05:00
|
|
|
savefn := Curfn
|
2015-02-13 14:40:36 -05:00
|
|
|
Curfn = fn
|
2018-04-04 15:53:27 -07:00
|
|
|
typecheckslice(fn.Func.Inl.Body, Etop)
|
2015-02-13 14:40:36 -05:00
|
|
|
Curfn = savefn
|
|
|
|
|
2018-04-04 15:53:27 -07:00
|
|
|
// During typechecking, declarations are added to
|
|
|
|
// Curfn.Func.Dcl. Move them to Inl.Dcl for consistency with
|
|
|
|
// how local functions behave. (Append because typecheckinl
|
|
|
|
// may be called multiple times.)
|
|
|
|
fn.Func.Inl.Dcl = append(fn.Func.Inl.Dcl, fn.Func.Dcl...)
|
|
|
|
fn.Func.Dcl = nil
|
|
|
|
|
2015-02-13 14:40:36 -05:00
|
|
|
safemode = save_safemode
|
|
|
|
|
2016-03-02 12:49:37 -08:00
|
|
|
lineno = lno
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Caninl determines whether fn is inlineable.
|
|
|
|
// If so, caninl saves fn->nbody in fn->inl and substitutes it with a copy.
|
|
|
|
// fn and ->nbody will already have been typechecked.
|
|
|
|
func caninl(fn *Node) {
|
|
|
|
if fn.Op != ODCLFUNC {
|
2015-08-30 23:10:03 +02:00
|
|
|
Fatalf("caninl %v", fn)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2015-05-27 10:42:55 -04:00
|
|
|
if fn.Func.Nname == nil {
|
2016-08-31 15:22:36 -07:00
|
|
|
Fatalf("caninl no nname %+v", fn)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
2016-05-03 17:21:32 -07:00
|
|
|
var reason string // reason, if any, that the function was not inlined
|
|
|
|
if Debug['m'] > 1 {
|
|
|
|
defer func() {
|
|
|
|
if reason != "" {
|
|
|
|
fmt.Printf("%v: cannot inline %v: %s\n", fn.Line(), fn.Func.Nname, reason)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2015-08-24 19:45:59 -05:00
|
|
|
// If marked "go:noinline", don't inline
|
2016-02-26 13:32:28 -08:00
|
|
|
if fn.Func.Pragma&Noinline != 0 {
|
2016-05-03 17:21:32 -07:00
|
|
|
reason = "marked go:noinline"
|
2015-08-24 19:45:59 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-06-15 15:20:57 -04:00
|
|
|
// If marked "go:norace" and -race compilation, don't inline.
|
|
|
|
if flag_race && fn.Func.Pragma&Norace != 0 {
|
|
|
|
reason = "marked go:norace with -race compilation"
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-04-19 16:58:07 -04:00
|
|
|
// If marked "go:cgo_unsafe_args", don't inline, since the
|
|
|
|
// function makes assumptions about its argument frame layout.
|
2016-11-30 17:09:07 -05:00
|
|
|
if fn.Func.Pragma&CgoUnsafeArgs != 0 {
|
|
|
|
reason = "marked go:cgo_unsafe_args"
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-10-20 14:20:48 -07:00
|
|
|
// The nowritebarrierrec checker currently works at function
|
|
|
|
// granularity, so inlining yeswritebarrierrec functions can
|
|
|
|
// confuse it (#22342). As a workaround, disallow inlining
|
|
|
|
// them for now.
|
|
|
|
if fn.Func.Pragma&Yeswritebarrierrec != 0 {
|
|
|
|
reason = "marked go:yeswritebarrierrec"
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-02-13 14:40:36 -05:00
|
|
|
// If fn has no body (is defined outside of Go), cannot inline it.
|
2016-04-24 13:50:26 -07:00
|
|
|
if fn.Nbody.Len() == 0 {
|
2016-05-03 17:21:32 -07:00
|
|
|
reason = "no function body"
|
2015-02-13 14:40:36 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-04-25 18:02:43 -07:00
|
|
|
if fn.Typecheck() == 0 {
|
2015-08-30 23:10:03 +02:00
|
|
|
Fatalf("caninl on non-typechecked function %v", fn)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
2017-09-18 14:54:10 -07:00
|
|
|
n := fn.Func.Nname
|
|
|
|
if n.Func.InlinabilityChecked() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer n.Func.SetInlinabilityChecked(true)
|
|
|
|
|
2018-04-27 12:13:17 -04:00
|
|
|
cc := int32(inlineExtraCallCost)
|
|
|
|
if Debug['l'] == 4 {
|
|
|
|
cc = 1 // this appears to yield better performance than 0.
|
|
|
|
}
|
|
|
|
|
2018-05-23 15:31:52 -04:00
|
|
|
// At this point in the game the function we're looking at may
|
|
|
|
// have "stale" autos, vars that still appear in the Dcl list, but
|
|
|
|
// which no longer have any uses in the function body (due to
|
|
|
|
// elimination by deadcode). We'd like to exclude these dead vars
|
|
|
|
// when creating the "Inline.Dcl" field below; to accomplish this,
|
|
|
|
// the hairyVisitor below builds up a map of used/referenced
|
|
|
|
// locals, and we use this map to produce a pruned Inline.Dcl
|
|
|
|
// list. See issue 25249 for more context.
|
|
|
|
|
|
|
|
visitor := hairyVisitor{
|
|
|
|
budget: inlineMaxBudget,
|
|
|
|
extraCallCost: cc,
|
|
|
|
usedLocals: make(map[*Node]bool),
|
|
|
|
}
|
2017-04-19 16:32:41 -04:00
|
|
|
if visitor.visitList(fn.Nbody) {
|
|
|
|
reason = visitor.reason
|
2016-05-03 17:21:32 -07:00
|
|
|
return
|
|
|
|
}
|
2017-04-19 16:32:41 -04:00
|
|
|
if visitor.budget < 0 {
|
2018-04-27 12:13:17 -04:00
|
|
|
reason = fmt.Sprintf("function too complex: cost %d exceeds budget %d", inlineMaxBudget-visitor.budget, inlineMaxBudget)
|
2015-02-13 14:40:36 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-04-04 15:53:27 -07:00
|
|
|
n.Func.Inl = &Inline{
|
2018-04-27 12:13:17 -04:00
|
|
|
Cost: inlineMaxBudget - visitor.budget,
|
2018-05-23 15:31:52 -04:00
|
|
|
Dcl: inlcopylist(pruneUnusedAutos(n.Name.Defn.Func.Dcl, &visitor)),
|
2018-04-04 15:53:27 -07:00
|
|
|
Body: inlcopylist(fn.Nbody.Slice()),
|
|
|
|
}
|
2015-02-13 14:40:36 -05:00
|
|
|
|
|
|
|
// hack, TODO, check for better way to link method nodes back to the thing with the ->inl
|
|
|
|
// this is so export can find the body of a method
|
cmd/compile: factor out Pkg, Sym, and Type into package types
- created new package cmd/compile/internal/types
- moved Pkg, Sym, Type to new package
- to break cycles, for now we need the (ugly) types/utils.go
file which contains a handful of functions that must be installed
early by the gc frontend
- to break cycles, for now we need two functions to convert between
*gc.Node and *types.Node (the latter is a dummy type)
- adjusted the gc's code to use the new package and the conversion
functions as needed
- made several Pkg, Sym, and Type methods functions as needed
- renamed constructors typ, typPtr, typArray, etc. to types.New,
types.NewPtr, types.NewArray, etc.
Passes toolstash-check -all.
Change-Id: I8adfa5e85c731645d0a7fd2030375ed6ebf54b72
Reviewed-on: https://go-review.googlesource.com/39855
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2017-04-04 17:54:02 -07:00
|
|
|
fn.Type.FuncType().Nname = asTypesNode(n)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
|
|
|
if Debug['m'] > 1 {
|
2018-04-04 15:53:27 -07:00
|
|
|
fmt.Printf("%v: can inline %#v as: %#v { %#v }\n", fn.Line(), n, fn.Type, asNodes(n.Func.Inl.Body))
|
2015-02-13 14:40:36 -05:00
|
|
|
} else if Debug['m'] != 0 {
|
2016-04-24 14:09:03 -07:00
|
|
|
fmt.Printf("%v: can inline %v\n", fn.Line(), n)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
cmd/compile: don't export unreachable inline method bodies
Previously, anytime we exported a function or method declaration
(which includes methods for every type transitively exported), we
included the inline function bodies, if any. However, in many cases,
it's impossible (or at least very unlikely) for the importing package
to call the method.
For example:
package p
type T int
func (t T) M() { t.u() }
func (t T) u() {}
func (t T) v() {}
T.M and T.u are inlineable, and they're both reachable through calls
to T.M, which is exported. However, t.v is also inlineable, but cannot
be reached.
Exception: if p.T is embedded in another type q.U, p.T.v will be
promoted to q.U.v, and the generated wrapper function could have
inlined the call to p.T.v. However, in practice, this doesn't happen,
and a missed inlining opportunity doesn't affect correctness.
To implement this, this CL introduces an extra flood fill pass before
exporting to mark inline bodies that are actually reachable, so the
exporter can skip over methods like t.v.
This reduces Kubernetes build time (as measured by "time go build -a
k8s.io/kubernetes/cmd/...") on an HP Z620 measurably:
== before ==
real 0m44.658s
user 11m19.136s
sys 0m53.844s
== after ==
real 0m41.702s
user 10m29.732s
sys 0m50.908s
It also significantly cuts down the cost of enabling mid-stack
inlining (-l=4):
== before (-l=4) ==
real 1m19.236s
user 20m6.528s
sys 1m17.328s
== after (-l=4) ==
real 0m59.100s
user 13m12.808s
sys 0m58.776s
Updates #19348.
Change-Id: Iade58233ca42af823a1630517a53848b5d3c7a7e
Reviewed-on: https://go-review.googlesource.com/74110
Run-TryBot: Matthew Dempsky <mdempsky@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-10-27 15:36:59 -07:00
|
|
|
// inlFlood marks n's inline body for export and recursively ensures
|
|
|
|
// all called functions are marked too.
|
|
|
|
func inlFlood(n *Node) {
|
|
|
|
if n == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if n.Op != ONAME || n.Class() != PFUNC {
|
|
|
|
Fatalf("inlFlood: unexpected %v, %v, %v", n, n.Op, n.Class())
|
|
|
|
}
|
|
|
|
if n.Func == nil {
|
|
|
|
Fatalf("inlFlood: missing Func on %v", n)
|
|
|
|
}
|
2018-04-04 15:53:27 -07:00
|
|
|
if n.Func.Inl == nil {
|
cmd/compile: don't export unreachable inline method bodies
Previously, anytime we exported a function or method declaration
(which includes methods for every type transitively exported), we
included the inline function bodies, if any. However, in many cases,
it's impossible (or at least very unlikely) for the importing package
to call the method.
For example:
package p
type T int
func (t T) M() { t.u() }
func (t T) u() {}
func (t T) v() {}
T.M and T.u are inlineable, and they're both reachable through calls
to T.M, which is exported. However, t.v is also inlineable, but cannot
be reached.
Exception: if p.T is embedded in another type q.U, p.T.v will be
promoted to q.U.v, and the generated wrapper function could have
inlined the call to p.T.v. However, in practice, this doesn't happen,
and a missed inlining opportunity doesn't affect correctness.
To implement this, this CL introduces an extra flood fill pass before
exporting to mark inline bodies that are actually reachable, so the
exporter can skip over methods like t.v.
This reduces Kubernetes build time (as measured by "time go build -a
k8s.io/kubernetes/cmd/...") on an HP Z620 measurably:
== before ==
real 0m44.658s
user 11m19.136s
sys 0m53.844s
== after ==
real 0m41.702s
user 10m29.732s
sys 0m50.908s
It also significantly cuts down the cost of enabling mid-stack
inlining (-l=4):
== before (-l=4) ==
real 1m19.236s
user 20m6.528s
sys 1m17.328s
== after (-l=4) ==
real 0m59.100s
user 13m12.808s
sys 0m58.776s
Updates #19348.
Change-Id: Iade58233ca42af823a1630517a53848b5d3c7a7e
Reviewed-on: https://go-review.googlesource.com/74110
Run-TryBot: Matthew Dempsky <mdempsky@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-10-27 15:36:59 -07:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if n.Func.ExportInline() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
n.Func.SetExportInline(true)
|
|
|
|
|
|
|
|
typecheckinl(n)
|
|
|
|
|
2018-04-04 15:53:27 -07:00
|
|
|
inspectList(asNodes(n.Func.Inl.Body), func(n *Node) bool {
|
cmd/compile: don't export unreachable inline method bodies
Previously, anytime we exported a function or method declaration
(which includes methods for every type transitively exported), we
included the inline function bodies, if any. However, in many cases,
it's impossible (or at least very unlikely) for the importing package
to call the method.
For example:
package p
type T int
func (t T) M() { t.u() }
func (t T) u() {}
func (t T) v() {}
T.M and T.u are inlineable, and they're both reachable through calls
to T.M, which is exported. However, t.v is also inlineable, but cannot
be reached.
Exception: if p.T is embedded in another type q.U, p.T.v will be
promoted to q.U.v, and the generated wrapper function could have
inlined the call to p.T.v. However, in practice, this doesn't happen,
and a missed inlining opportunity doesn't affect correctness.
To implement this, this CL introduces an extra flood fill pass before
exporting to mark inline bodies that are actually reachable, so the
exporter can skip over methods like t.v.
This reduces Kubernetes build time (as measured by "time go build -a
k8s.io/kubernetes/cmd/...") on an HP Z620 measurably:
== before ==
real 0m44.658s
user 11m19.136s
sys 0m53.844s
== after ==
real 0m41.702s
user 10m29.732s
sys 0m50.908s
It also significantly cuts down the cost of enabling mid-stack
inlining (-l=4):
== before (-l=4) ==
real 1m19.236s
user 20m6.528s
sys 1m17.328s
== after (-l=4) ==
real 0m59.100s
user 13m12.808s
sys 0m58.776s
Updates #19348.
Change-Id: Iade58233ca42af823a1630517a53848b5d3c7a7e
Reviewed-on: https://go-review.googlesource.com/74110
Run-TryBot: Matthew Dempsky <mdempsky@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-10-27 15:36:59 -07:00
|
|
|
switch n.Op {
|
cmd/compile: simplify reexport logic
Currently, we reexport any package-scope constant, function, type, or
variable declarations needed by an inlineable function body. However,
now that we have an early pass to walk inlineable function bodies
(golang.org/cl/74110), we can simplify the logic for finding these
declarations.
The binary export format supports writing out type declarations
in-place at their first use. Also, it always writes out constants by
value, so their declarations never need to be reexported.
Notably, we attempted this before (golang.org/cl/36170) and had to
revert it (golang.org/cl/45911). However, this was because while
writing out inline bodies, we could discover variable/function
dependencies. By collecting variable/function dependencies during
inlineable function discovery, we avoid this problem.
While here, get rid of isInlineable. We already typecheck inlineable
function bodies during inlFlood, so it's become a no-op. Just move the
comment explaining parameter numbering to its caller.
Change-Id: Ibbfaafce793733675d3a2ad98791758583055666
Reviewed-on: https://go-review.googlesource.com/103864
Reviewed-by: Robert Griesemer <gri@golang.org>
2018-03-30 18:58:03 -07:00
|
|
|
case ONAME:
|
|
|
|
// Mark any referenced global variables or
|
|
|
|
// functions for reexport. Skip methods,
|
|
|
|
// because they're reexported alongside their
|
|
|
|
// receiver type.
|
|
|
|
if n.Class() == PEXTERN || n.Class() == PFUNC && !n.isMethodExpression() {
|
2018-04-02 15:38:57 -07:00
|
|
|
exportsym(n)
|
cmd/compile: simplify reexport logic
Currently, we reexport any package-scope constant, function, type, or
variable declarations needed by an inlineable function body. However,
now that we have an early pass to walk inlineable function bodies
(golang.org/cl/74110), we can simplify the logic for finding these
declarations.
The binary export format supports writing out type declarations
in-place at their first use. Also, it always writes out constants by
value, so their declarations never need to be reexported.
Notably, we attempted this before (golang.org/cl/36170) and had to
revert it (golang.org/cl/45911). However, this was because while
writing out inline bodies, we could discover variable/function
dependencies. By collecting variable/function dependencies during
inlineable function discovery, we avoid this problem.
While here, get rid of isInlineable. We already typecheck inlineable
function bodies during inlFlood, so it's become a no-op. Just move the
comment explaining parameter numbering to its caller.
Change-Id: Ibbfaafce793733675d3a2ad98791758583055666
Reviewed-on: https://go-review.googlesource.com/103864
Reviewed-by: Robert Griesemer <gri@golang.org>
2018-03-30 18:58:03 -07:00
|
|
|
}
|
|
|
|
|
cmd/compile: don't export unreachable inline method bodies
Previously, anytime we exported a function or method declaration
(which includes methods for every type transitively exported), we
included the inline function bodies, if any. However, in many cases,
it's impossible (or at least very unlikely) for the importing package
to call the method.
For example:
package p
type T int
func (t T) M() { t.u() }
func (t T) u() {}
func (t T) v() {}
T.M and T.u are inlineable, and they're both reachable through calls
to T.M, which is exported. However, t.v is also inlineable, but cannot
be reached.
Exception: if p.T is embedded in another type q.U, p.T.v will be
promoted to q.U.v, and the generated wrapper function could have
inlined the call to p.T.v. However, in practice, this doesn't happen,
and a missed inlining opportunity doesn't affect correctness.
To implement this, this CL introduces an extra flood fill pass before
exporting to mark inline bodies that are actually reachable, so the
exporter can skip over methods like t.v.
This reduces Kubernetes build time (as measured by "time go build -a
k8s.io/kubernetes/cmd/...") on an HP Z620 measurably:
== before ==
real 0m44.658s
user 11m19.136s
sys 0m53.844s
== after ==
real 0m41.702s
user 10m29.732s
sys 0m50.908s
It also significantly cuts down the cost of enabling mid-stack
inlining (-l=4):
== before (-l=4) ==
real 1m19.236s
user 20m6.528s
sys 1m17.328s
== after (-l=4) ==
real 0m59.100s
user 13m12.808s
sys 0m58.776s
Updates #19348.
Change-Id: Iade58233ca42af823a1630517a53848b5d3c7a7e
Reviewed-on: https://go-review.googlesource.com/74110
Run-TryBot: Matthew Dempsky <mdempsky@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-10-27 15:36:59 -07:00
|
|
|
case OCALLFUNC, OCALLMETH:
|
cmd/compile: simplify reexport logic
Currently, we reexport any package-scope constant, function, type, or
variable declarations needed by an inlineable function body. However,
now that we have an early pass to walk inlineable function bodies
(golang.org/cl/74110), we can simplify the logic for finding these
declarations.
The binary export format supports writing out type declarations
in-place at their first use. Also, it always writes out constants by
value, so their declarations never need to be reexported.
Notably, we attempted this before (golang.org/cl/36170) and had to
revert it (golang.org/cl/45911). However, this was because while
writing out inline bodies, we could discover variable/function
dependencies. By collecting variable/function dependencies during
inlineable function discovery, we avoid this problem.
While here, get rid of isInlineable. We already typecheck inlineable
function bodies during inlFlood, so it's become a no-op. Just move the
comment explaining parameter numbering to its caller.
Change-Id: Ibbfaafce793733675d3a2ad98791758583055666
Reviewed-on: https://go-review.googlesource.com/103864
Reviewed-by: Robert Griesemer <gri@golang.org>
2018-03-30 18:58:03 -07:00
|
|
|
// Recursively flood any functions called by
|
|
|
|
// this one.
|
cmd/compile: don't export unreachable inline method bodies
Previously, anytime we exported a function or method declaration
(which includes methods for every type transitively exported), we
included the inline function bodies, if any. However, in many cases,
it's impossible (or at least very unlikely) for the importing package
to call the method.
For example:
package p
type T int
func (t T) M() { t.u() }
func (t T) u() {}
func (t T) v() {}
T.M and T.u are inlineable, and they're both reachable through calls
to T.M, which is exported. However, t.v is also inlineable, but cannot
be reached.
Exception: if p.T is embedded in another type q.U, p.T.v will be
promoted to q.U.v, and the generated wrapper function could have
inlined the call to p.T.v. However, in practice, this doesn't happen,
and a missed inlining opportunity doesn't affect correctness.
To implement this, this CL introduces an extra flood fill pass before
exporting to mark inline bodies that are actually reachable, so the
exporter can skip over methods like t.v.
This reduces Kubernetes build time (as measured by "time go build -a
k8s.io/kubernetes/cmd/...") on an HP Z620 measurably:
== before ==
real 0m44.658s
user 11m19.136s
sys 0m53.844s
== after ==
real 0m41.702s
user 10m29.732s
sys 0m50.908s
It also significantly cuts down the cost of enabling mid-stack
inlining (-l=4):
== before (-l=4) ==
real 1m19.236s
user 20m6.528s
sys 1m17.328s
== after (-l=4) ==
real 0m59.100s
user 13m12.808s
sys 0m58.776s
Updates #19348.
Change-Id: Iade58233ca42af823a1630517a53848b5d3c7a7e
Reviewed-on: https://go-review.googlesource.com/74110
Run-TryBot: Matthew Dempsky <mdempsky@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-10-27 15:36:59 -07:00
|
|
|
inlFlood(asNode(n.Left.Type.Nname()))
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2017-04-19 16:32:41 -04:00
|
|
|
// hairyVisitor visits a function body to determine its inlining
|
|
|
|
// hairiness and whether or not it can be inlined.
|
|
|
|
type hairyVisitor struct {
|
2018-04-27 12:13:17 -04:00
|
|
|
budget int32
|
|
|
|
reason string
|
|
|
|
extraCallCost int32
|
2018-05-23 15:31:52 -04:00
|
|
|
usedLocals map[*Node]bool
|
2017-04-19 16:32:41 -04:00
|
|
|
}
|
|
|
|
|
2015-02-13 14:40:36 -05:00
|
|
|
// Look for anything we want to punt on.
|
2017-04-19 16:32:41 -04:00
|
|
|
func (v *hairyVisitor) visitList(ll Nodes) bool {
|
2016-03-08 15:10:26 -08:00
|
|
|
for _, n := range ll.Slice() {
|
2017-04-19 16:32:41 -04:00
|
|
|
if v.visit(n) {
|
2016-02-27 14:31:33 -08:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2017-04-19 16:32:41 -04:00
|
|
|
func (v *hairyVisitor) visit(n *Node) bool {
|
2015-02-17 22:13:49 -05:00
|
|
|
if n == nil {
|
|
|
|
return false
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
switch n.Op {
|
2015-02-24 12:19:01 -05:00
|
|
|
// Call is okay if inlinable and we have the budget for the body.
|
|
|
|
case OCALLFUNC:
|
2016-12-09 16:59:38 -08:00
|
|
|
if isIntrinsicCall(n) {
|
2017-04-19 16:32:41 -04:00
|
|
|
v.budget--
|
2016-12-09 16:59:38 -08:00
|
|
|
break
|
|
|
|
}
|
2017-04-19 12:57:52 -04:00
|
|
|
// Functions that call runtime.getcaller{pc,sp} can not be inlined
|
|
|
|
// because getcaller{pc,sp} expect a pointer to the caller's first argument.
|
2018-04-27 12:13:17 -04:00
|
|
|
//
|
|
|
|
// runtime.throw is a "cheap call" like panic in normal code.
|
2017-04-25 18:14:12 -07:00
|
|
|
if n.Left.Op == ONAME && n.Left.Class() == PFUNC && isRuntimePkg(n.Left.Sym.Pkg) {
|
2017-04-19 12:57:52 -04:00
|
|
|
fn := n.Left.Sym.Name
|
|
|
|
if fn == "getcallerpc" || fn == "getcallersp" {
|
2017-04-19 16:32:41 -04:00
|
|
|
v.reason = "call to " + fn
|
2017-04-19 12:57:52 -04:00
|
|
|
return true
|
|
|
|
}
|
2018-04-27 12:13:17 -04:00
|
|
|
if fn == "throw" {
|
|
|
|
v.budget -= inlineExtraThrowCost
|
|
|
|
break
|
|
|
|
}
|
2017-04-19 12:57:52 -04:00
|
|
|
}
|
|
|
|
|
2018-04-04 15:53:27 -07:00
|
|
|
if fn := n.Left.Func; fn != nil && fn.Inl != nil {
|
|
|
|
v.budget -= fn.Inl.Cost
|
2017-03-16 14:08:31 -07:00
|
|
|
break
|
|
|
|
}
|
2017-10-24 14:45:41 -07:00
|
|
|
if n.Left.isMethodExpression() {
|
2018-04-04 15:53:27 -07:00
|
|
|
if d := asNode(n.Left.Sym.Def); d != nil && d.Func.Inl != nil {
|
|
|
|
v.budget -= d.Func.Inl.Cost
|
2015-02-24 12:19:01 -05:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2017-12-12 11:13:49 -08:00
|
|
|
// TODO(mdempsky): Budget for OCLOSURE calls if we
|
|
|
|
// ever allow that. See #15561 and #23093.
|
2018-04-27 12:13:17 -04:00
|
|
|
|
|
|
|
// Call cost for non-leaf inlining.
|
|
|
|
v.budget -= v.extraCallCost
|
2015-02-24 12:19:01 -05:00
|
|
|
|
|
|
|
// Call is okay if inlinable and we have the budget for the body.
|
|
|
|
case OCALLMETH:
|
2016-04-24 14:09:03 -07:00
|
|
|
t := n.Left.Type
|
|
|
|
if t == nil {
|
2016-08-31 15:22:36 -07:00
|
|
|
Fatalf("no function type for [%p] %+v\n", n.Left, n.Left)
|
2015-02-24 12:19:01 -05:00
|
|
|
}
|
2016-04-24 14:09:03 -07:00
|
|
|
if t.Nname() == nil {
|
2016-08-31 10:32:40 -07:00
|
|
|
Fatalf("no function definition for [%p] %+v\n", t, t)
|
2015-02-24 12:19:01 -05:00
|
|
|
}
|
runtime: support a two-level arena map
Currently, the heap arena map is a single, large array that covers
every possible arena frame in the entire address space. This is
practical up to about 48 bits of address space with 64 MB arenas.
However, there are two problems with this:
1. mips64, ppc64, and s390x support full 64-bit address spaces (though
on Linux only s390x has kernel support for 64-bit address spaces).
On these platforms, it would be good to support these larger
address spaces.
2. On Windows, processes are charged for untouched memory, so for
processes with small heaps, the mostly-untouched 32 MB arena map
plus a 64 MB arena are significant overhead. Hence, it would be
good to reduce both the arena map size and the arena size, but with
a single-level arena, these are inversely proportional.
This CL adds support for a two-level arena map. Arena frame numbers
are now divided into arenaL1Bits of L1 index and arenaL2Bits of L2
index.
At the moment, arenaL1Bits is always 0, so we effectively have a
single level map. We do a few things so that this has no cost beyond
the current single-level map:
1. We embed the L2 array directly in mheap, so if there's a single
entry in the L2 array, the representation is identical to the
current representation and there's no extra level of indirection.
2. Hot code that accesses the arena map is structured so that it
optimizes to nearly the same machine code as it does currently.
3. We make some small tweaks to hot code paths and to the inliner
itself to keep some important functions inlined despite their
now-larger ASTs. In particular, this is necessary for
heapBitsForAddr and heapBits.next.
Possibly as a result of some of the tweaks, this actually slightly
improves the performance of the x/benchmarks garbage benchmark:
name old time/op new time/op delta
Garbage/benchmem-MB=64-12 2.28ms ± 1% 2.26ms ± 1% -1.07% (p=0.000 n=17+19)
(https://perf.golang.org/search?q=upload:20180223.2)
For #23900.
Change-Id: If5164e0961754f97eb9eca58f837f36d759505ff
Reviewed-on: https://go-review.googlesource.com/96779
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Rick Hudson <rlh@golang.org>
2018-02-22 20:38:09 -05:00
|
|
|
if isRuntimePkg(n.Left.Sym.Pkg) {
|
|
|
|
fn := n.Left.Sym.Name
|
|
|
|
if fn == "heapBits.nextArena" {
|
|
|
|
// Special case: explicitly allow
|
|
|
|
// mid-stack inlining of
|
|
|
|
// runtime.heapBits.next even though
|
|
|
|
// it calls slow-path
|
|
|
|
// runtime.heapBits.nextArena.
|
|
|
|
//
|
|
|
|
// TODO(austin): Once mid-stack
|
|
|
|
// inlining is the default, remove
|
|
|
|
// this special case.
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2018-04-04 15:53:27 -07:00
|
|
|
if inlfn := asNode(t.FuncType().Nname).Func; inlfn.Inl != nil {
|
|
|
|
v.budget -= inlfn.Inl.Cost
|
2015-02-24 12:19:01 -05:00
|
|
|
break
|
|
|
|
}
|
2018-04-27 12:13:17 -04:00
|
|
|
// Call cost for non-leaf inlining.
|
|
|
|
v.budget -= v.extraCallCost
|
2015-02-24 12:19:01 -05:00
|
|
|
|
|
|
|
// Things that are too hairy, irrespective of the budget
|
2018-04-27 12:13:17 -04:00
|
|
|
case OCALL, OCALLINTER:
|
|
|
|
// Call cost for non-leaf inlining.
|
|
|
|
v.budget -= v.extraCallCost
|
|
|
|
|
|
|
|
case OPANIC:
|
2018-06-06 12:38:35 -04:00
|
|
|
v.budget -= inlineExtraPanicCost
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2018-01-25 17:22:41 -05:00
|
|
|
case ORECOVER:
|
|
|
|
// recover matches the argument frame pointer to find
|
|
|
|
// the right panic value, so it needs an argument frame.
|
|
|
|
v.reason = "call to recover"
|
|
|
|
return true
|
|
|
|
|
2015-02-13 14:40:36 -05:00
|
|
|
case OCLOSURE,
|
|
|
|
OCALLPART,
|
|
|
|
ORANGE,
|
|
|
|
OFOR,
|
2017-02-02 11:53:41 -05:00
|
|
|
OFORUNTIL,
|
2015-02-13 14:40:36 -05:00
|
|
|
OSELECT,
|
2016-03-16 21:29:17 -05:00
|
|
|
OTYPESW,
|
2015-02-13 14:40:36 -05:00
|
|
|
OPROC,
|
|
|
|
ODEFER,
|
2016-03-16 18:44:17 -05:00
|
|
|
ODCLTYPE, // can't print yet
|
2016-03-16 21:29:17 -05:00
|
|
|
OBREAK,
|
2015-02-13 14:40:36 -05:00
|
|
|
ORETJMP:
|
2017-04-19 16:32:41 -04:00
|
|
|
v.reason = "unhandled op " + n.Op.String()
|
2015-02-17 22:13:49 -05:00
|
|
|
return true
|
2017-09-03 23:53:38 +10:00
|
|
|
|
2018-04-27 12:13:17 -04:00
|
|
|
case OAPPEND:
|
|
|
|
v.budget -= inlineExtraAppendCost
|
|
|
|
|
2017-09-03 23:53:38 +10:00
|
|
|
case ODCLCONST, OEMPTY, OFALL, OLABEL:
|
|
|
|
// These nodes don't produce code; omit from inlining budget.
|
|
|
|
return false
|
2018-02-22 19:58:59 -05:00
|
|
|
|
|
|
|
case OIF:
|
|
|
|
if Isconst(n.Left, CTBOOL) {
|
|
|
|
// This if and the condition cost nothing.
|
2018-02-27 11:14:11 -08:00
|
|
|
return v.visitList(n.Ninit) || v.visitList(n.Nbody) ||
|
|
|
|
v.visitList(n.Rlist)
|
2018-02-22 19:58:59 -05:00
|
|
|
}
|
2018-05-23 15:31:52 -04:00
|
|
|
|
|
|
|
case ONAME:
|
|
|
|
if n.Class() == PAUTO {
|
|
|
|
v.usedLocals[n] = true
|
|
|
|
}
|
|
|
|
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
2017-04-19 16:32:41 -04:00
|
|
|
v.budget--
|
2016-04-21 19:35:26 -07:00
|
|
|
// TODO(mdempsky/josharian): Hacks to appease toolstash; remove.
|
|
|
|
// See issue 17566 and CL 31674 for discussion.
|
|
|
|
switch n.Op {
|
|
|
|
case OSTRUCTKEY:
|
2017-04-19 16:32:41 -04:00
|
|
|
v.budget--
|
2016-04-21 19:35:26 -07:00
|
|
|
case OSLICE, OSLICEARR, OSLICESTR:
|
2017-04-19 16:32:41 -04:00
|
|
|
v.budget--
|
2016-04-21 19:35:26 -07:00
|
|
|
case OSLICE3, OSLICE3ARR:
|
2017-04-19 16:32:41 -04:00
|
|
|
v.budget -= 2
|
2016-10-21 19:43:33 +00:00
|
|
|
}
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2017-09-12 13:53:55 -05:00
|
|
|
// When debugging, don't stop early, to get full cost of inlining this function
|
|
|
|
if v.budget < 0 && Debug['m'] < 2 {
|
2016-11-08 22:18:38 -08:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2017-04-19 16:32:41 -04:00
|
|
|
return v.visit(n.Left) || v.visit(n.Right) ||
|
|
|
|
v.visitList(n.List) || v.visitList(n.Rlist) ||
|
|
|
|
v.visitList(n.Ninit) || v.visitList(n.Nbody)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Inlcopy and inlcopylist recursively copy the body of a function.
|
|
|
|
// Any name-like node of non-local class is marked for re-export by adding it to
|
|
|
|
// the exportlist.
|
2016-03-08 10:26:20 -08:00
|
|
|
func inlcopylist(ll []*Node) []*Node {
|
2016-03-09 20:29:21 -08:00
|
|
|
s := make([]*Node, 0, len(ll))
|
2016-03-08 15:10:26 -08:00
|
|
|
for _, n := range ll {
|
|
|
|
s = append(s, inlcopy(n))
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2016-03-04 09:37:58 -08:00
|
|
|
return s
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func inlcopy(n *Node) *Node {
|
|
|
|
if n == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
switch n.Op {
|
2015-04-01 09:38:44 -07:00
|
|
|
case ONAME, OTYPE, OLITERAL:
|
2015-02-13 14:40:36 -05:00
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2017-10-23 19:57:07 +01:00
|
|
|
m := n.copy()
|
2015-03-10 21:37:13 -07:00
|
|
|
if m.Func != nil {
|
2018-04-04 15:53:27 -07:00
|
|
|
Fatalf("unexpected Func: %v", m)
|
2015-03-10 21:37:13 -07:00
|
|
|
}
|
2015-02-13 14:40:36 -05:00
|
|
|
m.Left = inlcopy(n.Left)
|
|
|
|
m.Right = inlcopy(n.Right)
|
2016-03-08 15:10:26 -08:00
|
|
|
m.List.Set(inlcopylist(n.List.Slice()))
|
|
|
|
m.Rlist.Set(inlcopylist(n.Rlist.Slice()))
|
|
|
|
m.Ninit.Set(inlcopylist(n.Ninit.Slice()))
|
2016-03-04 09:37:58 -08:00
|
|
|
m.Nbody.Set(inlcopylist(n.Nbody.Slice()))
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2017-10-23 19:57:07 +01:00
|
|
|
return m
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
2018-07-23 13:09:48 -07:00
|
|
|
func countNodes(n *Node) int {
|
|
|
|
if n == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
cnt := 1
|
|
|
|
cnt += countNodes(n.Left)
|
|
|
|
cnt += countNodes(n.Right)
|
|
|
|
for _, n1 := range n.Ninit.Slice() {
|
|
|
|
cnt += countNodes(n1)
|
|
|
|
}
|
|
|
|
for _, n1 := range n.Nbody.Slice() {
|
|
|
|
cnt += countNodes(n1)
|
|
|
|
}
|
|
|
|
for _, n1 := range n.List.Slice() {
|
|
|
|
cnt += countNodes(n1)
|
|
|
|
}
|
|
|
|
for _, n1 := range n.Rlist.Slice() {
|
|
|
|
cnt += countNodes(n1)
|
|
|
|
}
|
|
|
|
return cnt
|
|
|
|
}
|
|
|
|
|
2015-02-13 14:40:36 -05:00
|
|
|
// Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any
|
2016-03-01 23:21:55 +00:00
|
|
|
// calls made to inlineable functions. This is the external entry point.
|
2015-02-13 14:40:36 -05:00
|
|
|
func inlcalls(fn *Node) {
|
2015-02-23 16:07:24 -05:00
|
|
|
savefn := Curfn
|
2015-02-13 14:40:36 -05:00
|
|
|
Curfn = fn
|
2018-07-23 13:09:48 -07:00
|
|
|
maxCost := int32(inlineMaxBudget)
|
|
|
|
if countNodes(fn) >= inlineBigFunctionNodes {
|
|
|
|
maxCost = inlineBigFunctionMaxCost
|
|
|
|
}
|
|
|
|
fn = inlnode(fn, maxCost)
|
2015-02-13 14:40:36 -05:00
|
|
|
if fn != Curfn {
|
2015-08-30 23:10:03 +02:00
|
|
|
Fatalf("inlnode replaced curfn")
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
Curfn = savefn
|
|
|
|
}
|
|
|
|
|
|
|
|
// Turn an OINLCALL into a statement.
|
|
|
|
func inlconv2stmt(n *Node) {
|
|
|
|
n.Op = OBLOCK
|
|
|
|
|
|
|
|
// n->ninit stays
|
2016-03-08 15:10:26 -08:00
|
|
|
n.List.Set(n.Nbody.Slice())
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2016-02-27 14:31:33 -08:00
|
|
|
n.Nbody.Set(nil)
|
2016-03-08 15:10:26 -08:00
|
|
|
n.Rlist.Set(nil)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Turn an OINLCALL into a single valued expression.
|
cmd/compile: reduce use of **Node parameters
Escape analysis has a hard time with tree-like
structures (see #13493 and #14858).
This is unlikely to change.
As a result, when invoking a function that accepts
a **Node parameter, we usually allocate a *Node
on the heap. This happens a whole lot.
This CL changes functions from taking a **Node
to acting more like append: It both modifies
the input and returns a replacement for it.
Because of the cascading nature of escape analysis,
in order to get the benefits, I had to modify
almost all such functions. The remaining functions
are in racewalk and the backend. I would be happy
to update them as well in a separate CL.
This CL was created by manually updating the
function signatures and the directly impacted
bits of code. The callsites were then automatically
updated using a bespoke script:
https://gist.github.com/josharian/046b1be7aceae244de39
For ease of reviewing and future understanding,
this CL is also broken down into four CLs,
mailed separately, which show the manual
and the automated changes separately.
They are CLs 20990, 20991, 20992, and 20993.
Passes toolstash -cmp.
name old time/op new time/op delta
Template 335ms ± 5% 324ms ± 5% -3.35% (p=0.000 n=23+24)
Unicode 176ms ± 9% 165ms ± 6% -6.12% (p=0.000 n=23+24)
GoTypes 1.10s ± 4% 1.07s ± 2% -2.77% (p=0.000 n=24+24)
Compiler 5.31s ± 3% 5.15s ± 3% -2.95% (p=0.000 n=24+24)
MakeBash 41.6s ± 1% 41.7s ± 2% ~ (p=0.586 n=23+23)
name old alloc/op new alloc/op delta
Template 63.3MB ± 0% 62.4MB ± 0% -1.36% (p=0.000 n=25+23)
Unicode 42.4MB ± 0% 41.6MB ± 0% -1.99% (p=0.000 n=24+25)
GoTypes 220MB ± 0% 217MB ± 0% -1.11% (p=0.000 n=25+25)
Compiler 994MB ± 0% 973MB ± 0% -2.08% (p=0.000 n=24+25)
name old allocs/op new allocs/op delta
Template 681k ± 0% 574k ± 0% -15.71% (p=0.000 n=24+25)
Unicode 518k ± 0% 413k ± 0% -20.34% (p=0.000 n=25+24)
GoTypes 2.08M ± 0% 1.78M ± 0% -14.62% (p=0.000 n=25+25)
Compiler 9.26M ± 0% 7.64M ± 0% -17.48% (p=0.000 n=25+25)
name old text-bytes new text-bytes delta
HelloSize 578k ± 0% 578k ± 0% ~ (all samples are equal)
CmdGoSize 6.46M ± 0% 6.46M ± 0% ~ (all samples are equal)
name old data-bytes new data-bytes delta
HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal)
CmdGoSize 281k ± 0% 281k ± 0% ~ (all samples are equal)
name old exe-bytes new exe-bytes delta
HelloSize 921k ± 0% 921k ± 0% ~ (all samples are equal)
CmdGoSize 9.86M ± 0% 9.86M ± 0% ~ (all samples are equal)
Change-Id: I277d95bd56d51c166ef7f560647aeaa092f3f475
Reviewed-on: https://go-review.googlesource.com/20959
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-03-20 08:03:31 -07:00
|
|
|
// The result of inlconv2expr MUST be assigned back to n, e.g.
|
|
|
|
// n.Left = inlconv2expr(n.Left)
|
|
|
|
func inlconv2expr(n *Node) *Node {
|
2016-03-08 15:10:26 -08:00
|
|
|
r := n.Rlist.First()
|
2016-03-23 08:51:38 -07:00
|
|
|
return addinit(r, append(n.Ninit.Slice(), n.Nbody.Slice()...))
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Turn the rlist (with the return values) of the OINLCALL in
|
|
|
|
// n into an expression list lumping the ninit and body
|
|
|
|
// containing the inlined statements on the first list element so
|
|
|
|
// order will be preserved Used in return, oas2func and call
|
|
|
|
// statements.
|
2016-03-04 09:37:58 -08:00
|
|
|
func inlconv2list(n *Node) []*Node {
|
2016-03-08 15:10:26 -08:00
|
|
|
if n.Op != OINLCALL || n.Rlist.Len() == 0 {
|
2016-08-31 15:22:36 -07:00
|
|
|
Fatalf("inlconv2list %+v\n", n)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
2016-03-08 15:10:26 -08:00
|
|
|
s := n.Rlist.Slice()
|
cmd/compile: reduce use of **Node parameters
Escape analysis has a hard time with tree-like
structures (see #13493 and #14858).
This is unlikely to change.
As a result, when invoking a function that accepts
a **Node parameter, we usually allocate a *Node
on the heap. This happens a whole lot.
This CL changes functions from taking a **Node
to acting more like append: It both modifies
the input and returns a replacement for it.
Because of the cascading nature of escape analysis,
in order to get the benefits, I had to modify
almost all such functions. The remaining functions
are in racewalk and the backend. I would be happy
to update them as well in a separate CL.
This CL was created by manually updating the
function signatures and the directly impacted
bits of code. The callsites were then automatically
updated using a bespoke script:
https://gist.github.com/josharian/046b1be7aceae244de39
For ease of reviewing and future understanding,
this CL is also broken down into four CLs,
mailed separately, which show the manual
and the automated changes separately.
They are CLs 20990, 20991, 20992, and 20993.
Passes toolstash -cmp.
name old time/op new time/op delta
Template 335ms ± 5% 324ms ± 5% -3.35% (p=0.000 n=23+24)
Unicode 176ms ± 9% 165ms ± 6% -6.12% (p=0.000 n=23+24)
GoTypes 1.10s ± 4% 1.07s ± 2% -2.77% (p=0.000 n=24+24)
Compiler 5.31s ± 3% 5.15s ± 3% -2.95% (p=0.000 n=24+24)
MakeBash 41.6s ± 1% 41.7s ± 2% ~ (p=0.586 n=23+23)
name old alloc/op new alloc/op delta
Template 63.3MB ± 0% 62.4MB ± 0% -1.36% (p=0.000 n=25+23)
Unicode 42.4MB ± 0% 41.6MB ± 0% -1.99% (p=0.000 n=24+25)
GoTypes 220MB ± 0% 217MB ± 0% -1.11% (p=0.000 n=25+25)
Compiler 994MB ± 0% 973MB ± 0% -2.08% (p=0.000 n=24+25)
name old allocs/op new allocs/op delta
Template 681k ± 0% 574k ± 0% -15.71% (p=0.000 n=24+25)
Unicode 518k ± 0% 413k ± 0% -20.34% (p=0.000 n=25+24)
GoTypes 2.08M ± 0% 1.78M ± 0% -14.62% (p=0.000 n=25+25)
Compiler 9.26M ± 0% 7.64M ± 0% -17.48% (p=0.000 n=25+25)
name old text-bytes new text-bytes delta
HelloSize 578k ± 0% 578k ± 0% ~ (all samples are equal)
CmdGoSize 6.46M ± 0% 6.46M ± 0% ~ (all samples are equal)
name old data-bytes new data-bytes delta
HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal)
CmdGoSize 281k ± 0% 281k ± 0% ~ (all samples are equal)
name old exe-bytes new exe-bytes delta
HelloSize 921k ± 0% 921k ± 0% ~ (all samples are equal)
CmdGoSize 9.86M ± 0% 9.86M ± 0% ~ (all samples are equal)
Change-Id: I277d95bd56d51c166ef7f560647aeaa092f3f475
Reviewed-on: https://go-review.googlesource.com/20959
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-03-20 08:03:31 -07:00
|
|
|
s[0] = addinit(s[0], append(n.Ninit.Slice(), n.Nbody.Slice()...))
|
2016-03-04 09:37:58 -08:00
|
|
|
return s
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
2018-07-23 13:09:48 -07:00
|
|
|
func inlnodelist(l Nodes, maxCost int32) {
|
2016-03-09 12:39:36 -08:00
|
|
|
s := l.Slice()
|
|
|
|
for i := range s {
|
2018-07-23 13:09:48 -07:00
|
|
|
s[i] = inlnode(s[i], maxCost)
|
2016-02-27 14:31:33 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-13 14:40:36 -05:00
|
|
|
// inlnode recurses over the tree to find inlineable calls, which will
|
2016-03-01 23:21:55 +00:00
|
|
|
// be turned into OINLCALLs by mkinlcall. When the recursion comes
|
2015-02-13 14:40:36 -05:00
|
|
|
// back up will examine left, right, list, rlist, ninit, ntest, nincr,
|
|
|
|
// nbody and nelse and use one of the 4 inlconv/glue functions above
|
|
|
|
// to turn the OINLCALL into an expression, a statement, or patch it
|
|
|
|
// in to this nodes list or rlist as appropriate.
|
|
|
|
// NOTE it makes no sense to pass the glue functions down the
|
|
|
|
// recursion to the level where the OINLCALL gets created because they
|
|
|
|
// have to edit /this/ n, so you'd have to push that one down as well,
|
|
|
|
// but then you may as well do it here. so this is cleaner and
|
|
|
|
// shorter and less complicated.
|
cmd/compile: reduce use of **Node parameters
Escape analysis has a hard time with tree-like
structures (see #13493 and #14858).
This is unlikely to change.
As a result, when invoking a function that accepts
a **Node parameter, we usually allocate a *Node
on the heap. This happens a whole lot.
This CL changes functions from taking a **Node
to acting more like append: It both modifies
the input and returns a replacement for it.
Because of the cascading nature of escape analysis,
in order to get the benefits, I had to modify
almost all such functions. The remaining functions
are in racewalk and the backend. I would be happy
to update them as well in a separate CL.
This CL was created by manually updating the
function signatures and the directly impacted
bits of code. The callsites were then automatically
updated using a bespoke script:
https://gist.github.com/josharian/046b1be7aceae244de39
For ease of reviewing and future understanding,
this CL is also broken down into four CLs,
mailed separately, which show the manual
and the automated changes separately.
They are CLs 20990, 20991, 20992, and 20993.
Passes toolstash -cmp.
name old time/op new time/op delta
Template 335ms ± 5% 324ms ± 5% -3.35% (p=0.000 n=23+24)
Unicode 176ms ± 9% 165ms ± 6% -6.12% (p=0.000 n=23+24)
GoTypes 1.10s ± 4% 1.07s ± 2% -2.77% (p=0.000 n=24+24)
Compiler 5.31s ± 3% 5.15s ± 3% -2.95% (p=0.000 n=24+24)
MakeBash 41.6s ± 1% 41.7s ± 2% ~ (p=0.586 n=23+23)
name old alloc/op new alloc/op delta
Template 63.3MB ± 0% 62.4MB ± 0% -1.36% (p=0.000 n=25+23)
Unicode 42.4MB ± 0% 41.6MB ± 0% -1.99% (p=0.000 n=24+25)
GoTypes 220MB ± 0% 217MB ± 0% -1.11% (p=0.000 n=25+25)
Compiler 994MB ± 0% 973MB ± 0% -2.08% (p=0.000 n=24+25)
name old allocs/op new allocs/op delta
Template 681k ± 0% 574k ± 0% -15.71% (p=0.000 n=24+25)
Unicode 518k ± 0% 413k ± 0% -20.34% (p=0.000 n=25+24)
GoTypes 2.08M ± 0% 1.78M ± 0% -14.62% (p=0.000 n=25+25)
Compiler 9.26M ± 0% 7.64M ± 0% -17.48% (p=0.000 n=25+25)
name old text-bytes new text-bytes delta
HelloSize 578k ± 0% 578k ± 0% ~ (all samples are equal)
CmdGoSize 6.46M ± 0% 6.46M ± 0% ~ (all samples are equal)
name old data-bytes new data-bytes delta
HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal)
CmdGoSize 281k ± 0% 281k ± 0% ~ (all samples are equal)
name old exe-bytes new exe-bytes delta
HelloSize 921k ± 0% 921k ± 0% ~ (all samples are equal)
CmdGoSize 9.86M ± 0% 9.86M ± 0% ~ (all samples are equal)
Change-Id: I277d95bd56d51c166ef7f560647aeaa092f3f475
Reviewed-on: https://go-review.googlesource.com/20959
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-03-20 08:03:31 -07:00
|
|
|
// The result of inlnode MUST be assigned back to n, e.g.
|
|
|
|
// n.Left = inlnode(n.Left)
|
2018-07-23 13:09:48 -07:00
|
|
|
func inlnode(n *Node, maxCost int32) *Node {
|
cmd/compile: reduce use of **Node parameters
Escape analysis has a hard time with tree-like
structures (see #13493 and #14858).
This is unlikely to change.
As a result, when invoking a function that accepts
a **Node parameter, we usually allocate a *Node
on the heap. This happens a whole lot.
This CL changes functions from taking a **Node
to acting more like append: It both modifies
the input and returns a replacement for it.
Because of the cascading nature of escape analysis,
in order to get the benefits, I had to modify
almost all such functions. The remaining functions
are in racewalk and the backend. I would be happy
to update them as well in a separate CL.
This CL was created by manually updating the
function signatures and the directly impacted
bits of code. The callsites were then automatically
updated using a bespoke script:
https://gist.github.com/josharian/046b1be7aceae244de39
For ease of reviewing and future understanding,
this CL is also broken down into four CLs,
mailed separately, which show the manual
and the automated changes separately.
They are CLs 20990, 20991, 20992, and 20993.
Passes toolstash -cmp.
name old time/op new time/op delta
Template 335ms ± 5% 324ms ± 5% -3.35% (p=0.000 n=23+24)
Unicode 176ms ± 9% 165ms ± 6% -6.12% (p=0.000 n=23+24)
GoTypes 1.10s ± 4% 1.07s ± 2% -2.77% (p=0.000 n=24+24)
Compiler 5.31s ± 3% 5.15s ± 3% -2.95% (p=0.000 n=24+24)
MakeBash 41.6s ± 1% 41.7s ± 2% ~ (p=0.586 n=23+23)
name old alloc/op new alloc/op delta
Template 63.3MB ± 0% 62.4MB ± 0% -1.36% (p=0.000 n=25+23)
Unicode 42.4MB ± 0% 41.6MB ± 0% -1.99% (p=0.000 n=24+25)
GoTypes 220MB ± 0% 217MB ± 0% -1.11% (p=0.000 n=25+25)
Compiler 994MB ± 0% 973MB ± 0% -2.08% (p=0.000 n=24+25)
name old allocs/op new allocs/op delta
Template 681k ± 0% 574k ± 0% -15.71% (p=0.000 n=24+25)
Unicode 518k ± 0% 413k ± 0% -20.34% (p=0.000 n=25+24)
GoTypes 2.08M ± 0% 1.78M ± 0% -14.62% (p=0.000 n=25+25)
Compiler 9.26M ± 0% 7.64M ± 0% -17.48% (p=0.000 n=25+25)
name old text-bytes new text-bytes delta
HelloSize 578k ± 0% 578k ± 0% ~ (all samples are equal)
CmdGoSize 6.46M ± 0% 6.46M ± 0% ~ (all samples are equal)
name old data-bytes new data-bytes delta
HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal)
CmdGoSize 281k ± 0% 281k ± 0% ~ (all samples are equal)
name old exe-bytes new exe-bytes delta
HelloSize 921k ± 0% 921k ± 0% ~ (all samples are equal)
CmdGoSize 9.86M ± 0% 9.86M ± 0% ~ (all samples are equal)
Change-Id: I277d95bd56d51c166ef7f560647aeaa092f3f475
Reviewed-on: https://go-review.googlesource.com/20959
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-03-20 08:03:31 -07:00
|
|
|
if n == nil {
|
|
|
|
return n
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
switch n.Op {
|
|
|
|
// inhibit inlining of their argument
|
2015-04-01 09:38:44 -07:00
|
|
|
case ODEFER, OPROC:
|
2015-02-13 14:40:36 -05:00
|
|
|
switch n.Left.Op {
|
2015-04-01 09:38:44 -07:00
|
|
|
case OCALLFUNC, OCALLMETH:
|
2017-02-27 19:56:38 +02:00
|
|
|
n.Left.SetNoInline(true)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2017-02-27 10:45:26 -08:00
|
|
|
return n
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2016-09-24 21:38:58 +02:00
|
|
|
// TODO do them here (or earlier),
|
2015-02-13 14:40:36 -05:00
|
|
|
// so escape analysis can avoid more heapmoves.
|
|
|
|
case OCLOSURE:
|
cmd/compile: reduce use of **Node parameters
Escape analysis has a hard time with tree-like
structures (see #13493 and #14858).
This is unlikely to change.
As a result, when invoking a function that accepts
a **Node parameter, we usually allocate a *Node
on the heap. This happens a whole lot.
This CL changes functions from taking a **Node
to acting more like append: It both modifies
the input and returns a replacement for it.
Because of the cascading nature of escape analysis,
in order to get the benefits, I had to modify
almost all such functions. The remaining functions
are in racewalk and the backend. I would be happy
to update them as well in a separate CL.
This CL was created by manually updating the
function signatures and the directly impacted
bits of code. The callsites were then automatically
updated using a bespoke script:
https://gist.github.com/josharian/046b1be7aceae244de39
For ease of reviewing and future understanding,
this CL is also broken down into four CLs,
mailed separately, which show the manual
and the automated changes separately.
They are CLs 20990, 20991, 20992, and 20993.
Passes toolstash -cmp.
name old time/op new time/op delta
Template 335ms ± 5% 324ms ± 5% -3.35% (p=0.000 n=23+24)
Unicode 176ms ± 9% 165ms ± 6% -6.12% (p=0.000 n=23+24)
GoTypes 1.10s ± 4% 1.07s ± 2% -2.77% (p=0.000 n=24+24)
Compiler 5.31s ± 3% 5.15s ± 3% -2.95% (p=0.000 n=24+24)
MakeBash 41.6s ± 1% 41.7s ± 2% ~ (p=0.586 n=23+23)
name old alloc/op new alloc/op delta
Template 63.3MB ± 0% 62.4MB ± 0% -1.36% (p=0.000 n=25+23)
Unicode 42.4MB ± 0% 41.6MB ± 0% -1.99% (p=0.000 n=24+25)
GoTypes 220MB ± 0% 217MB ± 0% -1.11% (p=0.000 n=25+25)
Compiler 994MB ± 0% 973MB ± 0% -2.08% (p=0.000 n=24+25)
name old allocs/op new allocs/op delta
Template 681k ± 0% 574k ± 0% -15.71% (p=0.000 n=24+25)
Unicode 518k ± 0% 413k ± 0% -20.34% (p=0.000 n=25+24)
GoTypes 2.08M ± 0% 1.78M ± 0% -14.62% (p=0.000 n=25+25)
Compiler 9.26M ± 0% 7.64M ± 0% -17.48% (p=0.000 n=25+25)
name old text-bytes new text-bytes delta
HelloSize 578k ± 0% 578k ± 0% ~ (all samples are equal)
CmdGoSize 6.46M ± 0% 6.46M ± 0% ~ (all samples are equal)
name old data-bytes new data-bytes delta
HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal)
CmdGoSize 281k ± 0% 281k ± 0% ~ (all samples are equal)
name old exe-bytes new exe-bytes delta
HelloSize 921k ± 0% 921k ± 0% ~ (all samples are equal)
CmdGoSize 9.86M ± 0% 9.86M ± 0% ~ (all samples are equal)
Change-Id: I277d95bd56d51c166ef7f560647aeaa092f3f475
Reviewed-on: https://go-review.googlesource.com/20959
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-03-20 08:03:31 -07:00
|
|
|
return n
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
2016-03-02 12:49:37 -08:00
|
|
|
lno := setlineno(n)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2018-07-23 13:09:48 -07:00
|
|
|
inlnodelist(n.Ninit, maxCost)
|
2016-03-08 15:10:26 -08:00
|
|
|
for _, n1 := range n.Ninit.Slice() {
|
|
|
|
if n1.Op == OINLCALL {
|
|
|
|
inlconv2stmt(n1)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-23 13:09:48 -07:00
|
|
|
n.Left = inlnode(n.Left, maxCost)
|
2015-02-13 14:40:36 -05:00
|
|
|
if n.Left != nil && n.Left.Op == OINLCALL {
|
cmd/compile: reduce use of **Node parameters
Escape analysis has a hard time with tree-like
structures (see #13493 and #14858).
This is unlikely to change.
As a result, when invoking a function that accepts
a **Node parameter, we usually allocate a *Node
on the heap. This happens a whole lot.
This CL changes functions from taking a **Node
to acting more like append: It both modifies
the input and returns a replacement for it.
Because of the cascading nature of escape analysis,
in order to get the benefits, I had to modify
almost all such functions. The remaining functions
are in racewalk and the backend. I would be happy
to update them as well in a separate CL.
This CL was created by manually updating the
function signatures and the directly impacted
bits of code. The callsites were then automatically
updated using a bespoke script:
https://gist.github.com/josharian/046b1be7aceae244de39
For ease of reviewing and future understanding,
this CL is also broken down into four CLs,
mailed separately, which show the manual
and the automated changes separately.
They are CLs 20990, 20991, 20992, and 20993.
Passes toolstash -cmp.
name old time/op new time/op delta
Template 335ms ± 5% 324ms ± 5% -3.35% (p=0.000 n=23+24)
Unicode 176ms ± 9% 165ms ± 6% -6.12% (p=0.000 n=23+24)
GoTypes 1.10s ± 4% 1.07s ± 2% -2.77% (p=0.000 n=24+24)
Compiler 5.31s ± 3% 5.15s ± 3% -2.95% (p=0.000 n=24+24)
MakeBash 41.6s ± 1% 41.7s ± 2% ~ (p=0.586 n=23+23)
name old alloc/op new alloc/op delta
Template 63.3MB ± 0% 62.4MB ± 0% -1.36% (p=0.000 n=25+23)
Unicode 42.4MB ± 0% 41.6MB ± 0% -1.99% (p=0.000 n=24+25)
GoTypes 220MB ± 0% 217MB ± 0% -1.11% (p=0.000 n=25+25)
Compiler 994MB ± 0% 973MB ± 0% -2.08% (p=0.000 n=24+25)
name old allocs/op new allocs/op delta
Template 681k ± 0% 574k ± 0% -15.71% (p=0.000 n=24+25)
Unicode 518k ± 0% 413k ± 0% -20.34% (p=0.000 n=25+24)
GoTypes 2.08M ± 0% 1.78M ± 0% -14.62% (p=0.000 n=25+25)
Compiler 9.26M ± 0% 7.64M ± 0% -17.48% (p=0.000 n=25+25)
name old text-bytes new text-bytes delta
HelloSize 578k ± 0% 578k ± 0% ~ (all samples are equal)
CmdGoSize 6.46M ± 0% 6.46M ± 0% ~ (all samples are equal)
name old data-bytes new data-bytes delta
HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal)
CmdGoSize 281k ± 0% 281k ± 0% ~ (all samples are equal)
name old exe-bytes new exe-bytes delta
HelloSize 921k ± 0% 921k ± 0% ~ (all samples are equal)
CmdGoSize 9.86M ± 0% 9.86M ± 0% ~ (all samples are equal)
Change-Id: I277d95bd56d51c166ef7f560647aeaa092f3f475
Reviewed-on: https://go-review.googlesource.com/20959
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-03-20 08:03:31 -07:00
|
|
|
n.Left = inlconv2expr(n.Left)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
2018-07-23 13:09:48 -07:00
|
|
|
n.Right = inlnode(n.Right, maxCost)
|
2015-02-13 14:40:36 -05:00
|
|
|
if n.Right != nil && n.Right.Op == OINLCALL {
|
2017-02-02 11:53:41 -05:00
|
|
|
if n.Op == OFOR || n.Op == OFORUNTIL {
|
2015-05-22 01:16:52 -04:00
|
|
|
inlconv2stmt(n.Right)
|
|
|
|
} else {
|
cmd/compile: reduce use of **Node parameters
Escape analysis has a hard time with tree-like
structures (see #13493 and #14858).
This is unlikely to change.
As a result, when invoking a function that accepts
a **Node parameter, we usually allocate a *Node
on the heap. This happens a whole lot.
This CL changes functions from taking a **Node
to acting more like append: It both modifies
the input and returns a replacement for it.
Because of the cascading nature of escape analysis,
in order to get the benefits, I had to modify
almost all such functions. The remaining functions
are in racewalk and the backend. I would be happy
to update them as well in a separate CL.
This CL was created by manually updating the
function signatures and the directly impacted
bits of code. The callsites were then automatically
updated using a bespoke script:
https://gist.github.com/josharian/046b1be7aceae244de39
For ease of reviewing and future understanding,
this CL is also broken down into four CLs,
mailed separately, which show the manual
and the automated changes separately.
They are CLs 20990, 20991, 20992, and 20993.
Passes toolstash -cmp.
name old time/op new time/op delta
Template 335ms ± 5% 324ms ± 5% -3.35% (p=0.000 n=23+24)
Unicode 176ms ± 9% 165ms ± 6% -6.12% (p=0.000 n=23+24)
GoTypes 1.10s ± 4% 1.07s ± 2% -2.77% (p=0.000 n=24+24)
Compiler 5.31s ± 3% 5.15s ± 3% -2.95% (p=0.000 n=24+24)
MakeBash 41.6s ± 1% 41.7s ± 2% ~ (p=0.586 n=23+23)
name old alloc/op new alloc/op delta
Template 63.3MB ± 0% 62.4MB ± 0% -1.36% (p=0.000 n=25+23)
Unicode 42.4MB ± 0% 41.6MB ± 0% -1.99% (p=0.000 n=24+25)
GoTypes 220MB ± 0% 217MB ± 0% -1.11% (p=0.000 n=25+25)
Compiler 994MB ± 0% 973MB ± 0% -2.08% (p=0.000 n=24+25)
name old allocs/op new allocs/op delta
Template 681k ± 0% 574k ± 0% -15.71% (p=0.000 n=24+25)
Unicode 518k ± 0% 413k ± 0% -20.34% (p=0.000 n=25+24)
GoTypes 2.08M ± 0% 1.78M ± 0% -14.62% (p=0.000 n=25+25)
Compiler 9.26M ± 0% 7.64M ± 0% -17.48% (p=0.000 n=25+25)
name old text-bytes new text-bytes delta
HelloSize 578k ± 0% 578k ± 0% ~ (all samples are equal)
CmdGoSize 6.46M ± 0% 6.46M ± 0% ~ (all samples are equal)
name old data-bytes new data-bytes delta
HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal)
CmdGoSize 281k ± 0% 281k ± 0% ~ (all samples are equal)
name old exe-bytes new exe-bytes delta
HelloSize 921k ± 0% 921k ± 0% ~ (all samples are equal)
CmdGoSize 9.86M ± 0% 9.86M ± 0% ~ (all samples are equal)
Change-Id: I277d95bd56d51c166ef7f560647aeaa092f3f475
Reviewed-on: https://go-review.googlesource.com/20959
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-03-20 08:03:31 -07:00
|
|
|
n.Right = inlconv2expr(n.Right)
|
2015-05-22 01:16:52 -04:00
|
|
|
}
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
2018-07-23 13:09:48 -07:00
|
|
|
inlnodelist(n.List, maxCost)
|
2015-02-13 14:40:36 -05:00
|
|
|
switch n.Op {
|
|
|
|
case OBLOCK:
|
2016-03-08 15:10:26 -08:00
|
|
|
for _, n2 := range n.List.Slice() {
|
|
|
|
if n2.Op == OINLCALL {
|
|
|
|
inlconv2stmt(n2)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-27 10:45:26 -08:00
|
|
|
case ORETURN, OCALLFUNC, OCALLMETH, OCALLINTER, OAPPEND, OCOMPLEX:
|
|
|
|
// if we just replaced arg in f(arg()) or return arg with an inlined call
|
|
|
|
// and arg returns multiple values, glue as list
|
2016-03-08 15:10:26 -08:00
|
|
|
if n.List.Len() == 1 && n.List.First().Op == OINLCALL && n.List.First().Rlist.Len() > 1 {
|
|
|
|
n.List.Set(inlconv2list(n.List.First()))
|
2015-02-13 14:40:36 -05:00
|
|
|
break
|
|
|
|
}
|
|
|
|
fallthrough
|
|
|
|
|
|
|
|
default:
|
2016-03-09 12:39:36 -08:00
|
|
|
s := n.List.Slice()
|
|
|
|
for i1, n1 := range s {
|
2016-04-21 19:35:26 -07:00
|
|
|
if n1 != nil && n1.Op == OINLCALL {
|
cmd/compile: reduce use of **Node parameters
Escape analysis has a hard time with tree-like
structures (see #13493 and #14858).
This is unlikely to change.
As a result, when invoking a function that accepts
a **Node parameter, we usually allocate a *Node
on the heap. This happens a whole lot.
This CL changes functions from taking a **Node
to acting more like append: It both modifies
the input and returns a replacement for it.
Because of the cascading nature of escape analysis,
in order to get the benefits, I had to modify
almost all such functions. The remaining functions
are in racewalk and the backend. I would be happy
to update them as well in a separate CL.
This CL was created by manually updating the
function signatures and the directly impacted
bits of code. The callsites were then automatically
updated using a bespoke script:
https://gist.github.com/josharian/046b1be7aceae244de39
For ease of reviewing and future understanding,
this CL is also broken down into four CLs,
mailed separately, which show the manual
and the automated changes separately.
They are CLs 20990, 20991, 20992, and 20993.
Passes toolstash -cmp.
name old time/op new time/op delta
Template 335ms ± 5% 324ms ± 5% -3.35% (p=0.000 n=23+24)
Unicode 176ms ± 9% 165ms ± 6% -6.12% (p=0.000 n=23+24)
GoTypes 1.10s ± 4% 1.07s ± 2% -2.77% (p=0.000 n=24+24)
Compiler 5.31s ± 3% 5.15s ± 3% -2.95% (p=0.000 n=24+24)
MakeBash 41.6s ± 1% 41.7s ± 2% ~ (p=0.586 n=23+23)
name old alloc/op new alloc/op delta
Template 63.3MB ± 0% 62.4MB ± 0% -1.36% (p=0.000 n=25+23)
Unicode 42.4MB ± 0% 41.6MB ± 0% -1.99% (p=0.000 n=24+25)
GoTypes 220MB ± 0% 217MB ± 0% -1.11% (p=0.000 n=25+25)
Compiler 994MB ± 0% 973MB ± 0% -2.08% (p=0.000 n=24+25)
name old allocs/op new allocs/op delta
Template 681k ± 0% 574k ± 0% -15.71% (p=0.000 n=24+25)
Unicode 518k ± 0% 413k ± 0% -20.34% (p=0.000 n=25+24)
GoTypes 2.08M ± 0% 1.78M ± 0% -14.62% (p=0.000 n=25+25)
Compiler 9.26M ± 0% 7.64M ± 0% -17.48% (p=0.000 n=25+25)
name old text-bytes new text-bytes delta
HelloSize 578k ± 0% 578k ± 0% ~ (all samples are equal)
CmdGoSize 6.46M ± 0% 6.46M ± 0% ~ (all samples are equal)
name old data-bytes new data-bytes delta
HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal)
CmdGoSize 281k ± 0% 281k ± 0% ~ (all samples are equal)
name old exe-bytes new exe-bytes delta
HelloSize 921k ± 0% 921k ± 0% ~ (all samples are equal)
CmdGoSize 9.86M ± 0% 9.86M ± 0% ~ (all samples are equal)
Change-Id: I277d95bd56d51c166ef7f560647aeaa092f3f475
Reviewed-on: https://go-review.googlesource.com/20959
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-03-20 08:03:31 -07:00
|
|
|
s[i1] = inlconv2expr(s[i1])
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-23 13:09:48 -07:00
|
|
|
inlnodelist(n.Rlist, maxCost)
|
2017-02-27 10:45:26 -08:00
|
|
|
if n.Op == OAS2FUNC && n.Rlist.First().Op == OINLCALL {
|
|
|
|
n.Rlist.Set(inlconv2list(n.Rlist.First()))
|
|
|
|
n.Op = OAS2
|
2017-04-25 18:02:43 -07:00
|
|
|
n.SetTypecheck(0)
|
2017-02-27 10:45:26 -08:00
|
|
|
n = typecheck(n, Etop)
|
|
|
|
} else {
|
2016-03-09 12:39:36 -08:00
|
|
|
s := n.Rlist.Slice()
|
|
|
|
for i1, n1 := range s {
|
|
|
|
if n1.Op == OINLCALL {
|
2015-05-22 01:16:52 -04:00
|
|
|
if n.Op == OIF {
|
2016-03-09 12:39:36 -08:00
|
|
|
inlconv2stmt(n1)
|
2015-05-22 01:16:52 -04:00
|
|
|
} else {
|
cmd/compile: reduce use of **Node parameters
Escape analysis has a hard time with tree-like
structures (see #13493 and #14858).
This is unlikely to change.
As a result, when invoking a function that accepts
a **Node parameter, we usually allocate a *Node
on the heap. This happens a whole lot.
This CL changes functions from taking a **Node
to acting more like append: It both modifies
the input and returns a replacement for it.
Because of the cascading nature of escape analysis,
in order to get the benefits, I had to modify
almost all such functions. The remaining functions
are in racewalk and the backend. I would be happy
to update them as well in a separate CL.
This CL was created by manually updating the
function signatures and the directly impacted
bits of code. The callsites were then automatically
updated using a bespoke script:
https://gist.github.com/josharian/046b1be7aceae244de39
For ease of reviewing and future understanding,
this CL is also broken down into four CLs,
mailed separately, which show the manual
and the automated changes separately.
They are CLs 20990, 20991, 20992, and 20993.
Passes toolstash -cmp.
name old time/op new time/op delta
Template 335ms ± 5% 324ms ± 5% -3.35% (p=0.000 n=23+24)
Unicode 176ms ± 9% 165ms ± 6% -6.12% (p=0.000 n=23+24)
GoTypes 1.10s ± 4% 1.07s ± 2% -2.77% (p=0.000 n=24+24)
Compiler 5.31s ± 3% 5.15s ± 3% -2.95% (p=0.000 n=24+24)
MakeBash 41.6s ± 1% 41.7s ± 2% ~ (p=0.586 n=23+23)
name old alloc/op new alloc/op delta
Template 63.3MB ± 0% 62.4MB ± 0% -1.36% (p=0.000 n=25+23)
Unicode 42.4MB ± 0% 41.6MB ± 0% -1.99% (p=0.000 n=24+25)
GoTypes 220MB ± 0% 217MB ± 0% -1.11% (p=0.000 n=25+25)
Compiler 994MB ± 0% 973MB ± 0% -2.08% (p=0.000 n=24+25)
name old allocs/op new allocs/op delta
Template 681k ± 0% 574k ± 0% -15.71% (p=0.000 n=24+25)
Unicode 518k ± 0% 413k ± 0% -20.34% (p=0.000 n=25+24)
GoTypes 2.08M ± 0% 1.78M ± 0% -14.62% (p=0.000 n=25+25)
Compiler 9.26M ± 0% 7.64M ± 0% -17.48% (p=0.000 n=25+25)
name old text-bytes new text-bytes delta
HelloSize 578k ± 0% 578k ± 0% ~ (all samples are equal)
CmdGoSize 6.46M ± 0% 6.46M ± 0% ~ (all samples are equal)
name old data-bytes new data-bytes delta
HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal)
CmdGoSize 281k ± 0% 281k ± 0% ~ (all samples are equal)
name old exe-bytes new exe-bytes delta
HelloSize 921k ± 0% 921k ± 0% ~ (all samples are equal)
CmdGoSize 9.86M ± 0% 9.86M ± 0% ~ (all samples are equal)
Change-Id: I277d95bd56d51c166ef7f560647aeaa092f3f475
Reviewed-on: https://go-review.googlesource.com/20959
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-03-20 08:03:31 -07:00
|
|
|
s[i1] = inlconv2expr(s[i1])
|
2015-05-22 01:16:52 -04:00
|
|
|
}
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-23 13:09:48 -07:00
|
|
|
inlnodelist(n.Nbody, maxCost)
|
2016-02-27 14:31:33 -08:00
|
|
|
for _, n := range n.Nbody.Slice() {
|
|
|
|
if n.Op == OINLCALL {
|
|
|
|
inlconv2stmt(n)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// with all the branches out of the way, it is now time to
|
|
|
|
// transmogrify this node itself unless inhibited by the
|
|
|
|
// switch at the top of this function.
|
|
|
|
switch n.Op {
|
2015-04-01 09:38:44 -07:00
|
|
|
case OCALLFUNC, OCALLMETH:
|
2017-02-27 19:56:38 +02:00
|
|
|
if n.NoInline() {
|
cmd/compile: reduce use of **Node parameters
Escape analysis has a hard time with tree-like
structures (see #13493 and #14858).
This is unlikely to change.
As a result, when invoking a function that accepts
a **Node parameter, we usually allocate a *Node
on the heap. This happens a whole lot.
This CL changes functions from taking a **Node
to acting more like append: It both modifies
the input and returns a replacement for it.
Because of the cascading nature of escape analysis,
in order to get the benefits, I had to modify
almost all such functions. The remaining functions
are in racewalk and the backend. I would be happy
to update them as well in a separate CL.
This CL was created by manually updating the
function signatures and the directly impacted
bits of code. The callsites were then automatically
updated using a bespoke script:
https://gist.github.com/josharian/046b1be7aceae244de39
For ease of reviewing and future understanding,
this CL is also broken down into four CLs,
mailed separately, which show the manual
and the automated changes separately.
They are CLs 20990, 20991, 20992, and 20993.
Passes toolstash -cmp.
name old time/op new time/op delta
Template 335ms ± 5% 324ms ± 5% -3.35% (p=0.000 n=23+24)
Unicode 176ms ± 9% 165ms ± 6% -6.12% (p=0.000 n=23+24)
GoTypes 1.10s ± 4% 1.07s ± 2% -2.77% (p=0.000 n=24+24)
Compiler 5.31s ± 3% 5.15s ± 3% -2.95% (p=0.000 n=24+24)
MakeBash 41.6s ± 1% 41.7s ± 2% ~ (p=0.586 n=23+23)
name old alloc/op new alloc/op delta
Template 63.3MB ± 0% 62.4MB ± 0% -1.36% (p=0.000 n=25+23)
Unicode 42.4MB ± 0% 41.6MB ± 0% -1.99% (p=0.000 n=24+25)
GoTypes 220MB ± 0% 217MB ± 0% -1.11% (p=0.000 n=25+25)
Compiler 994MB ± 0% 973MB ± 0% -2.08% (p=0.000 n=24+25)
name old allocs/op new allocs/op delta
Template 681k ± 0% 574k ± 0% -15.71% (p=0.000 n=24+25)
Unicode 518k ± 0% 413k ± 0% -20.34% (p=0.000 n=25+24)
GoTypes 2.08M ± 0% 1.78M ± 0% -14.62% (p=0.000 n=25+25)
Compiler 9.26M ± 0% 7.64M ± 0% -17.48% (p=0.000 n=25+25)
name old text-bytes new text-bytes delta
HelloSize 578k ± 0% 578k ± 0% ~ (all samples are equal)
CmdGoSize 6.46M ± 0% 6.46M ± 0% ~ (all samples are equal)
name old data-bytes new data-bytes delta
HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal)
CmdGoSize 281k ± 0% 281k ± 0% ~ (all samples are equal)
name old exe-bytes new exe-bytes delta
HelloSize 921k ± 0% 921k ± 0% ~ (all samples are equal)
CmdGoSize 9.86M ± 0% 9.86M ± 0% ~ (all samples are equal)
Change-Id: I277d95bd56d51c166ef7f560647aeaa092f3f475
Reviewed-on: https://go-review.googlesource.com/20959
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-03-20 08:03:31 -07:00
|
|
|
return n
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
switch n.Op {
|
|
|
|
case OCALLFUNC:
|
|
|
|
if Debug['m'] > 3 {
|
2016-08-31 15:22:36 -07:00
|
|
|
fmt.Printf("%v:call to func %+v\n", n.Line(), n.Left)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2018-04-04 15:53:27 -07:00
|
|
|
if n.Left.Func != nil && n.Left.Func.Inl != nil && !isIntrinsicCall(n) { // normal case
|
2018-07-23 13:09:48 -07:00
|
|
|
n = mkinlcall(n, n.Left, maxCost)
|
2017-10-24 14:45:41 -07:00
|
|
|
} else if n.Left.isMethodExpression() && asNode(n.Left.Sym.Def) != nil {
|
2018-07-23 13:09:48 -07:00
|
|
|
n = mkinlcall(n, asNode(n.Left.Sym.Def), maxCost)
|
2017-09-18 14:54:10 -07:00
|
|
|
} else if n.Left.Op == OCLOSURE {
|
|
|
|
if f := inlinableClosure(n.Left); f != nil {
|
2018-07-23 13:09:48 -07:00
|
|
|
n = mkinlcall(n, f, maxCost)
|
2017-09-18 14:54:10 -07:00
|
|
|
}
|
|
|
|
} else if n.Left.Op == ONAME && n.Left.Name != nil && n.Left.Name.Defn != nil {
|
|
|
|
if d := n.Left.Name.Defn; d.Op == OAS && d.Right.Op == OCLOSURE {
|
|
|
|
if f := inlinableClosure(d.Right); f != nil {
|
|
|
|
// NB: this check is necessary to prevent indirect re-assignment of the variable
|
|
|
|
// having the address taken after the invocation or only used for reads is actually fine
|
|
|
|
// but we have no easy way to distinguish the safe cases
|
|
|
|
if d.Left.Addrtaken() {
|
|
|
|
if Debug['m'] > 1 {
|
|
|
|
fmt.Printf("%v: cannot inline escaping closure variable %v\n", n.Line(), n.Left)
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// ensure the variable is never re-assigned
|
|
|
|
if unsafe, a := reassigned(n.Left); unsafe {
|
|
|
|
if Debug['m'] > 1 {
|
|
|
|
if a != nil {
|
|
|
|
fmt.Printf("%v: cannot inline re-assigned closure variable at %v: %v\n", n.Line(), a.Line(), a)
|
|
|
|
} else {
|
|
|
|
fmt.Printf("%v: cannot inline global closure variable %v\n", n.Line(), n.Left)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
2018-07-23 13:09:48 -07:00
|
|
|
n = mkinlcall(n, f, maxCost)
|
2017-09-18 14:54:10 -07:00
|
|
|
}
|
|
|
|
}
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
case OCALLMETH:
|
|
|
|
if Debug['m'] > 3 {
|
2016-09-09 21:08:46 -07:00
|
|
|
fmt.Printf("%v:call to meth %L\n", n.Line(), n.Left.Right)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// typecheck should have resolved ODOTMETH->type, whose nname points to the actual function.
|
|
|
|
if n.Left.Type == nil {
|
2016-08-31 15:22:36 -07:00
|
|
|
Fatalf("no function type for [%p] %+v\n", n.Left, n.Left)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
2016-03-30 16:59:53 -07:00
|
|
|
if n.Left.Type.Nname() == nil {
|
2016-08-31 10:32:40 -07:00
|
|
|
Fatalf("no function definition for [%p] %+v\n", n.Left.Type, n.Left.Type)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
2018-07-23 13:09:48 -07:00
|
|
|
n = mkinlcall(n, asNode(n.Left.Type.FuncType().Nname), maxCost)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
2016-03-02 12:49:37 -08:00
|
|
|
lineno = lno
|
cmd/compile: reduce use of **Node parameters
Escape analysis has a hard time with tree-like
structures (see #13493 and #14858).
This is unlikely to change.
As a result, when invoking a function that accepts
a **Node parameter, we usually allocate a *Node
on the heap. This happens a whole lot.
This CL changes functions from taking a **Node
to acting more like append: It both modifies
the input and returns a replacement for it.
Because of the cascading nature of escape analysis,
in order to get the benefits, I had to modify
almost all such functions. The remaining functions
are in racewalk and the backend. I would be happy
to update them as well in a separate CL.
This CL was created by manually updating the
function signatures and the directly impacted
bits of code. The callsites were then automatically
updated using a bespoke script:
https://gist.github.com/josharian/046b1be7aceae244de39
For ease of reviewing and future understanding,
this CL is also broken down into four CLs,
mailed separately, which show the manual
and the automated changes separately.
They are CLs 20990, 20991, 20992, and 20993.
Passes toolstash -cmp.
name old time/op new time/op delta
Template 335ms ± 5% 324ms ± 5% -3.35% (p=0.000 n=23+24)
Unicode 176ms ± 9% 165ms ± 6% -6.12% (p=0.000 n=23+24)
GoTypes 1.10s ± 4% 1.07s ± 2% -2.77% (p=0.000 n=24+24)
Compiler 5.31s ± 3% 5.15s ± 3% -2.95% (p=0.000 n=24+24)
MakeBash 41.6s ± 1% 41.7s ± 2% ~ (p=0.586 n=23+23)
name old alloc/op new alloc/op delta
Template 63.3MB ± 0% 62.4MB ± 0% -1.36% (p=0.000 n=25+23)
Unicode 42.4MB ± 0% 41.6MB ± 0% -1.99% (p=0.000 n=24+25)
GoTypes 220MB ± 0% 217MB ± 0% -1.11% (p=0.000 n=25+25)
Compiler 994MB ± 0% 973MB ± 0% -2.08% (p=0.000 n=24+25)
name old allocs/op new allocs/op delta
Template 681k ± 0% 574k ± 0% -15.71% (p=0.000 n=24+25)
Unicode 518k ± 0% 413k ± 0% -20.34% (p=0.000 n=25+24)
GoTypes 2.08M ± 0% 1.78M ± 0% -14.62% (p=0.000 n=25+25)
Compiler 9.26M ± 0% 7.64M ± 0% -17.48% (p=0.000 n=25+25)
name old text-bytes new text-bytes delta
HelloSize 578k ± 0% 578k ± 0% ~ (all samples are equal)
CmdGoSize 6.46M ± 0% 6.46M ± 0% ~ (all samples are equal)
name old data-bytes new data-bytes delta
HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal)
CmdGoSize 281k ± 0% 281k ± 0% ~ (all samples are equal)
name old exe-bytes new exe-bytes delta
HelloSize 921k ± 0% 921k ± 0% ~ (all samples are equal)
CmdGoSize 9.86M ± 0% 9.86M ± 0% ~ (all samples are equal)
Change-Id: I277d95bd56d51c166ef7f560647aeaa092f3f475
Reviewed-on: https://go-review.googlesource.com/20959
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-03-20 08:03:31 -07:00
|
|
|
return n
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
2017-10-21 15:58:37 -07:00
|
|
|
// inlinableClosure takes an OCLOSURE node and follows linkage to the matching ONAME with
|
|
|
|
// the inlinable body. Returns nil if the function is not inlinable.
|
2017-09-18 14:54:10 -07:00
|
|
|
func inlinableClosure(n *Node) *Node {
|
|
|
|
c := n.Func.Closure
|
|
|
|
caninl(c)
|
|
|
|
f := c.Func.Nname
|
2018-04-04 15:53:27 -07:00
|
|
|
if f == nil || f.Func.Inl == nil {
|
2017-10-21 15:58:37 -07:00
|
|
|
return nil
|
2017-09-18 14:54:10 -07:00
|
|
|
}
|
2017-10-21 15:58:37 -07:00
|
|
|
return f
|
2017-09-18 14:54:10 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// reassigned takes an ONAME node, walks the function in which it is defined, and returns a boolean
|
|
|
|
// indicating whether the name has any assignments other than its declaration.
|
|
|
|
// The second return value is the first such assignment encountered in the walk, if any. It is mostly
|
|
|
|
// useful for -m output documenting the reason for inhibited optimizations.
|
|
|
|
// NB: global variables are always considered to be re-assigned.
|
|
|
|
// TODO: handle initial declaration not including an assignment and followed by a single assignment?
|
|
|
|
func reassigned(n *Node) (bool, *Node) {
|
|
|
|
if n.Op != ONAME {
|
|
|
|
Fatalf("reassigned %v", n)
|
|
|
|
}
|
|
|
|
// no way to reliably check for no-reassignment of globals, assume it can be
|
|
|
|
if n.Name.Curfn == nil {
|
|
|
|
return true, nil
|
|
|
|
}
|
2017-11-02 19:54:46 -07:00
|
|
|
f := n.Name.Curfn
|
|
|
|
// There just might be a good reason for this although this can be pretty surprising:
|
|
|
|
// local variables inside a closure have Curfn pointing to the OCLOSURE node instead
|
|
|
|
// of the corresponding ODCLFUNC.
|
|
|
|
// We need to walk the function body to check for reassignments so we follow the
|
|
|
|
// linkage to the ODCLFUNC node as that is where body is held.
|
|
|
|
if f.Op == OCLOSURE {
|
|
|
|
f = f.Func.Closure
|
|
|
|
}
|
2017-09-18 14:54:10 -07:00
|
|
|
v := reassignVisitor{name: n}
|
2017-11-02 19:54:46 -07:00
|
|
|
a := v.visitList(f.Nbody)
|
2017-09-18 14:54:10 -07:00
|
|
|
return a != nil, a
|
|
|
|
}
|
|
|
|
|
|
|
|
type reassignVisitor struct {
|
|
|
|
name *Node
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v *reassignVisitor) visit(n *Node) *Node {
|
|
|
|
if n == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
switch n.Op {
|
|
|
|
case OAS:
|
|
|
|
if n.Left == v.name && n != v.name.Name.Defn {
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
return nil
|
2017-11-02 19:54:46 -07:00
|
|
|
case OAS2, OAS2FUNC, OAS2MAPR, OAS2DOTTYPE:
|
2017-09-18 14:54:10 -07:00
|
|
|
for _, p := range n.List.Slice() {
|
|
|
|
if p == v.name && n != v.name.Name.Defn {
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if a := v.visit(n.Left); a != nil {
|
|
|
|
return a
|
|
|
|
}
|
|
|
|
if a := v.visit(n.Right); a != nil {
|
|
|
|
return a
|
|
|
|
}
|
|
|
|
if a := v.visitList(n.List); a != nil {
|
|
|
|
return a
|
|
|
|
}
|
|
|
|
if a := v.visitList(n.Rlist); a != nil {
|
|
|
|
return a
|
|
|
|
}
|
|
|
|
if a := v.visitList(n.Ninit); a != nil {
|
|
|
|
return a
|
|
|
|
}
|
|
|
|
if a := v.visitList(n.Nbody); a != nil {
|
|
|
|
return a
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v *reassignVisitor) visitList(l Nodes) *Node {
|
|
|
|
for _, n := range l.Slice() {
|
|
|
|
if a := v.visit(n); a != nil {
|
|
|
|
return a
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
cmd/compile: reduce use of **Node parameters
Escape analysis has a hard time with tree-like
structures (see #13493 and #14858).
This is unlikely to change.
As a result, when invoking a function that accepts
a **Node parameter, we usually allocate a *Node
on the heap. This happens a whole lot.
This CL changes functions from taking a **Node
to acting more like append: It both modifies
the input and returns a replacement for it.
Because of the cascading nature of escape analysis,
in order to get the benefits, I had to modify
almost all such functions. The remaining functions
are in racewalk and the backend. I would be happy
to update them as well in a separate CL.
This CL was created by manually updating the
function signatures and the directly impacted
bits of code. The callsites were then automatically
updated using a bespoke script:
https://gist.github.com/josharian/046b1be7aceae244de39
For ease of reviewing and future understanding,
this CL is also broken down into four CLs,
mailed separately, which show the manual
and the automated changes separately.
They are CLs 20990, 20991, 20992, and 20993.
Passes toolstash -cmp.
name old time/op new time/op delta
Template 335ms ± 5% 324ms ± 5% -3.35% (p=0.000 n=23+24)
Unicode 176ms ± 9% 165ms ± 6% -6.12% (p=0.000 n=23+24)
GoTypes 1.10s ± 4% 1.07s ± 2% -2.77% (p=0.000 n=24+24)
Compiler 5.31s ± 3% 5.15s ± 3% -2.95% (p=0.000 n=24+24)
MakeBash 41.6s ± 1% 41.7s ± 2% ~ (p=0.586 n=23+23)
name old alloc/op new alloc/op delta
Template 63.3MB ± 0% 62.4MB ± 0% -1.36% (p=0.000 n=25+23)
Unicode 42.4MB ± 0% 41.6MB ± 0% -1.99% (p=0.000 n=24+25)
GoTypes 220MB ± 0% 217MB ± 0% -1.11% (p=0.000 n=25+25)
Compiler 994MB ± 0% 973MB ± 0% -2.08% (p=0.000 n=24+25)
name old allocs/op new allocs/op delta
Template 681k ± 0% 574k ± 0% -15.71% (p=0.000 n=24+25)
Unicode 518k ± 0% 413k ± 0% -20.34% (p=0.000 n=25+24)
GoTypes 2.08M ± 0% 1.78M ± 0% -14.62% (p=0.000 n=25+25)
Compiler 9.26M ± 0% 7.64M ± 0% -17.48% (p=0.000 n=25+25)
name old text-bytes new text-bytes delta
HelloSize 578k ± 0% 578k ± 0% ~ (all samples are equal)
CmdGoSize 6.46M ± 0% 6.46M ± 0% ~ (all samples are equal)
name old data-bytes new data-bytes delta
HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal)
CmdGoSize 281k ± 0% 281k ± 0% ~ (all samples are equal)
name old exe-bytes new exe-bytes delta
HelloSize 921k ± 0% 921k ± 0% ~ (all samples are equal)
CmdGoSize 9.86M ± 0% 9.86M ± 0% ~ (all samples are equal)
Change-Id: I277d95bd56d51c166ef7f560647aeaa092f3f475
Reviewed-on: https://go-review.googlesource.com/20959
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-03-20 08:03:31 -07:00
|
|
|
// The result of mkinlcall MUST be assigned back to n, e.g.
|
|
|
|
// n.Left = mkinlcall(n.Left, fn, isddd)
|
2018-07-23 13:09:48 -07:00
|
|
|
func mkinlcall(n *Node, fn *Node, maxCost int32) *Node {
|
2015-02-23 16:07:24 -05:00
|
|
|
save_safemode := safemode
|
2015-02-13 14:40:36 -05:00
|
|
|
|
|
|
|
// imported functions may refer to unsafe as long as the
|
|
|
|
// package was marked safe during import (already checked).
|
2015-02-23 16:07:24 -05:00
|
|
|
pkg := fnpkg(fn)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
|
|
|
if pkg != localpkg && pkg != nil {
|
2016-04-13 18:37:18 -07:00
|
|
|
safemode = false
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2018-07-23 13:09:48 -07:00
|
|
|
n = mkinlcall1(n, fn, maxCost)
|
2015-02-13 14:40:36 -05:00
|
|
|
safemode = save_safemode
|
cmd/compile: reduce use of **Node parameters
Escape analysis has a hard time with tree-like
structures (see #13493 and #14858).
This is unlikely to change.
As a result, when invoking a function that accepts
a **Node parameter, we usually allocate a *Node
on the heap. This happens a whole lot.
This CL changes functions from taking a **Node
to acting more like append: It both modifies
the input and returns a replacement for it.
Because of the cascading nature of escape analysis,
in order to get the benefits, I had to modify
almost all such functions. The remaining functions
are in racewalk and the backend. I would be happy
to update them as well in a separate CL.
This CL was created by manually updating the
function signatures and the directly impacted
bits of code. The callsites were then automatically
updated using a bespoke script:
https://gist.github.com/josharian/046b1be7aceae244de39
For ease of reviewing and future understanding,
this CL is also broken down into four CLs,
mailed separately, which show the manual
and the automated changes separately.
They are CLs 20990, 20991, 20992, and 20993.
Passes toolstash -cmp.
name old time/op new time/op delta
Template 335ms ± 5% 324ms ± 5% -3.35% (p=0.000 n=23+24)
Unicode 176ms ± 9% 165ms ± 6% -6.12% (p=0.000 n=23+24)
GoTypes 1.10s ± 4% 1.07s ± 2% -2.77% (p=0.000 n=24+24)
Compiler 5.31s ± 3% 5.15s ± 3% -2.95% (p=0.000 n=24+24)
MakeBash 41.6s ± 1% 41.7s ± 2% ~ (p=0.586 n=23+23)
name old alloc/op new alloc/op delta
Template 63.3MB ± 0% 62.4MB ± 0% -1.36% (p=0.000 n=25+23)
Unicode 42.4MB ± 0% 41.6MB ± 0% -1.99% (p=0.000 n=24+25)
GoTypes 220MB ± 0% 217MB ± 0% -1.11% (p=0.000 n=25+25)
Compiler 994MB ± 0% 973MB ± 0% -2.08% (p=0.000 n=24+25)
name old allocs/op new allocs/op delta
Template 681k ± 0% 574k ± 0% -15.71% (p=0.000 n=24+25)
Unicode 518k ± 0% 413k ± 0% -20.34% (p=0.000 n=25+24)
GoTypes 2.08M ± 0% 1.78M ± 0% -14.62% (p=0.000 n=25+25)
Compiler 9.26M ± 0% 7.64M ± 0% -17.48% (p=0.000 n=25+25)
name old text-bytes new text-bytes delta
HelloSize 578k ± 0% 578k ± 0% ~ (all samples are equal)
CmdGoSize 6.46M ± 0% 6.46M ± 0% ~ (all samples are equal)
name old data-bytes new data-bytes delta
HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal)
CmdGoSize 281k ± 0% 281k ± 0% ~ (all samples are equal)
name old exe-bytes new exe-bytes delta
HelloSize 921k ± 0% 921k ± 0% ~ (all samples are equal)
CmdGoSize 9.86M ± 0% 9.86M ± 0% ~ (all samples are equal)
Change-Id: I277d95bd56d51c166ef7f560647aeaa092f3f475
Reviewed-on: https://go-review.googlesource.com/20959
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-03-20 08:03:31 -07:00
|
|
|
return n
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
cmd/compile: factor out Pkg, Sym, and Type into package types
- created new package cmd/compile/internal/types
- moved Pkg, Sym, Type to new package
- to break cycles, for now we need the (ugly) types/utils.go
file which contains a handful of functions that must be installed
early by the gc frontend
- to break cycles, for now we need two functions to convert between
*gc.Node and *types.Node (the latter is a dummy type)
- adjusted the gc's code to use the new package and the conversion
functions as needed
- made several Pkg, Sym, and Type methods functions as needed
- renamed constructors typ, typPtr, typArray, etc. to types.New,
types.NewPtr, types.NewArray, etc.
Passes toolstash-check -all.
Change-Id: I8adfa5e85c731645d0a7fd2030375ed6ebf54b72
Reviewed-on: https://go-review.googlesource.com/39855
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2017-04-04 17:54:02 -07:00
|
|
|
func tinlvar(t *types.Field, inlvars map[*Node]*Node) *Node {
|
cmd/compile: replace Field.Nname.Pos with Field.Pos
For struct fields and methods, Field.Nname was only used to store
position information, which means we're allocating an entire ONAME
Node+Name+Param structure just for one field. We can optimize away
these ONAME allocations by instead adding a Field.Pos field.
Unfortunately, we can't get rid of Field.Nname, because it's needed
for function parameters, so Field grows a little bit and now has more
redundant information in those cases. However, that was already the
case (e.g., Field.Sym and Field.Nname.Sym), and it's still a net win
for allocations as demonstrated by the benchmarks below.
Additionally, by moving the ONAME allocation for function parameters
to funcargs, we can avoid allocating them for function parameters that
aren't used in corresponding function bodies (e.g., interface methods,
function-typed variables, and imported functions/methods without
inline bodies).
name old time/op new time/op delta
Template 254ms ± 6% 251ms ± 6% -1.04% (p=0.000 n=487+488)
Unicode 128ms ± 7% 128ms ± 7% ~ (p=0.294 n=482+467)
GoTypes 862ms ± 5% 860ms ± 4% ~ (p=0.075 n=488+471)
Compiler 3.91s ± 4% 3.90s ± 4% -0.39% (p=0.000 n=468+473)
name old user-time/op new user-time/op delta
Template 339ms ±14% 336ms ±14% -1.02% (p=0.001 n=498+494)
Unicode 176ms ±18% 176ms ±25% ~ (p=0.940 n=491+499)
GoTypes 1.13s ± 8% 1.13s ± 9% ~ (p=0.157 n=496+493)
Compiler 5.24s ± 6% 5.21s ± 6% -0.57% (p=0.000 n=485+489)
name old alloc/op new alloc/op delta
Template 38.3MB ± 0% 37.3MB ± 0% -2.58% (p=0.000 n=499+497)
Unicode 29.1MB ± 0% 29.1MB ± 0% -0.03% (p=0.000 n=500+493)
GoTypes 116MB ± 0% 115MB ± 0% -0.65% (p=0.000 n=498+499)
Compiler 492MB ± 0% 487MB ± 0% -1.00% (p=0.000 n=497+498)
name old allocs/op new allocs/op delta
Template 364k ± 0% 360k ± 0% -1.15% (p=0.000 n=499+499)
Unicode 336k ± 0% 336k ± 0% -0.01% (p=0.000 n=500+493)
GoTypes 1.16M ± 0% 1.16M ± 0% -0.30% (p=0.000 n=499+499)
Compiler 4.54M ± 0% 4.51M ± 0% -0.58% (p=0.000 n=494+495)
Passes toolstash-check -gcflags=-dwarf=false. Changes DWARF output
because position information is now tracked more precisely for
function parameters.
Change-Id: Ib8077d70d564cc448c5e4290baceab3a4396d712
Reviewed-on: https://go-review.googlesource.com/108217
Run-TryBot: Matthew Dempsky <mdempsky@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2018-04-18 22:57:10 -07:00
|
|
|
if n := asNode(t.Nname); n != nil && !n.isBlank() {
|
|
|
|
inlvar := inlvars[n]
|
2016-10-26 22:58:50 -07:00
|
|
|
if inlvar == nil {
|
cmd/compile: replace Field.Nname.Pos with Field.Pos
For struct fields and methods, Field.Nname was only used to store
position information, which means we're allocating an entire ONAME
Node+Name+Param structure just for one field. We can optimize away
these ONAME allocations by instead adding a Field.Pos field.
Unfortunately, we can't get rid of Field.Nname, because it's needed
for function parameters, so Field grows a little bit and now has more
redundant information in those cases. However, that was already the
case (e.g., Field.Sym and Field.Nname.Sym), and it's still a net win
for allocations as demonstrated by the benchmarks below.
Additionally, by moving the ONAME allocation for function parameters
to funcargs, we can avoid allocating them for function parameters that
aren't used in corresponding function bodies (e.g., interface methods,
function-typed variables, and imported functions/methods without
inline bodies).
name old time/op new time/op delta
Template 254ms ± 6% 251ms ± 6% -1.04% (p=0.000 n=487+488)
Unicode 128ms ± 7% 128ms ± 7% ~ (p=0.294 n=482+467)
GoTypes 862ms ± 5% 860ms ± 4% ~ (p=0.075 n=488+471)
Compiler 3.91s ± 4% 3.90s ± 4% -0.39% (p=0.000 n=468+473)
name old user-time/op new user-time/op delta
Template 339ms ±14% 336ms ±14% -1.02% (p=0.001 n=498+494)
Unicode 176ms ±18% 176ms ±25% ~ (p=0.940 n=491+499)
GoTypes 1.13s ± 8% 1.13s ± 9% ~ (p=0.157 n=496+493)
Compiler 5.24s ± 6% 5.21s ± 6% -0.57% (p=0.000 n=485+489)
name old alloc/op new alloc/op delta
Template 38.3MB ± 0% 37.3MB ± 0% -2.58% (p=0.000 n=499+497)
Unicode 29.1MB ± 0% 29.1MB ± 0% -0.03% (p=0.000 n=500+493)
GoTypes 116MB ± 0% 115MB ± 0% -0.65% (p=0.000 n=498+499)
Compiler 492MB ± 0% 487MB ± 0% -1.00% (p=0.000 n=497+498)
name old allocs/op new allocs/op delta
Template 364k ± 0% 360k ± 0% -1.15% (p=0.000 n=499+499)
Unicode 336k ± 0% 336k ± 0% -0.01% (p=0.000 n=500+493)
GoTypes 1.16M ± 0% 1.16M ± 0% -0.30% (p=0.000 n=499+499)
Compiler 4.54M ± 0% 4.51M ± 0% -0.58% (p=0.000 n=494+495)
Passes toolstash-check -gcflags=-dwarf=false. Changes DWARF output
because position information is now tracked more precisely for
function parameters.
Change-Id: Ib8077d70d564cc448c5e4290baceab3a4396d712
Reviewed-on: https://go-review.googlesource.com/108217
Run-TryBot: Matthew Dempsky <mdempsky@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2018-04-18 22:57:10 -07:00
|
|
|
Fatalf("missing inlvar for %v\n", n)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2016-10-26 22:58:50 -07:00
|
|
|
return inlvar
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
2016-03-23 08:51:38 -07:00
|
|
|
return typecheck(nblank, Erv|Easgn)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
var inlgen int
|
|
|
|
|
2017-04-07 13:47:10 -07:00
|
|
|
// If n is a call, and fn is a function with an inlinable body,
|
|
|
|
// return an OINLCALL.
|
2015-02-13 14:40:36 -05:00
|
|
|
// On return ninit has the parameter assignments, the nbody is the
|
|
|
|
// inlined function body and list, rlist contain the input, output
|
|
|
|
// parameters.
|
cmd/compile: reduce use of **Node parameters
Escape analysis has a hard time with tree-like
structures (see #13493 and #14858).
This is unlikely to change.
As a result, when invoking a function that accepts
a **Node parameter, we usually allocate a *Node
on the heap. This happens a whole lot.
This CL changes functions from taking a **Node
to acting more like append: It both modifies
the input and returns a replacement for it.
Because of the cascading nature of escape analysis,
in order to get the benefits, I had to modify
almost all such functions. The remaining functions
are in racewalk and the backend. I would be happy
to update them as well in a separate CL.
This CL was created by manually updating the
function signatures and the directly impacted
bits of code. The callsites were then automatically
updated using a bespoke script:
https://gist.github.com/josharian/046b1be7aceae244de39
For ease of reviewing and future understanding,
this CL is also broken down into four CLs,
mailed separately, which show the manual
and the automated changes separately.
They are CLs 20990, 20991, 20992, and 20993.
Passes toolstash -cmp.
name old time/op new time/op delta
Template 335ms ± 5% 324ms ± 5% -3.35% (p=0.000 n=23+24)
Unicode 176ms ± 9% 165ms ± 6% -6.12% (p=0.000 n=23+24)
GoTypes 1.10s ± 4% 1.07s ± 2% -2.77% (p=0.000 n=24+24)
Compiler 5.31s ± 3% 5.15s ± 3% -2.95% (p=0.000 n=24+24)
MakeBash 41.6s ± 1% 41.7s ± 2% ~ (p=0.586 n=23+23)
name old alloc/op new alloc/op delta
Template 63.3MB ± 0% 62.4MB ± 0% -1.36% (p=0.000 n=25+23)
Unicode 42.4MB ± 0% 41.6MB ± 0% -1.99% (p=0.000 n=24+25)
GoTypes 220MB ± 0% 217MB ± 0% -1.11% (p=0.000 n=25+25)
Compiler 994MB ± 0% 973MB ± 0% -2.08% (p=0.000 n=24+25)
name old allocs/op new allocs/op delta
Template 681k ± 0% 574k ± 0% -15.71% (p=0.000 n=24+25)
Unicode 518k ± 0% 413k ± 0% -20.34% (p=0.000 n=25+24)
GoTypes 2.08M ± 0% 1.78M ± 0% -14.62% (p=0.000 n=25+25)
Compiler 9.26M ± 0% 7.64M ± 0% -17.48% (p=0.000 n=25+25)
name old text-bytes new text-bytes delta
HelloSize 578k ± 0% 578k ± 0% ~ (all samples are equal)
CmdGoSize 6.46M ± 0% 6.46M ± 0% ~ (all samples are equal)
name old data-bytes new data-bytes delta
HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal)
CmdGoSize 281k ± 0% 281k ± 0% ~ (all samples are equal)
name old exe-bytes new exe-bytes delta
HelloSize 921k ± 0% 921k ± 0% ~ (all samples are equal)
CmdGoSize 9.86M ± 0% 9.86M ± 0% ~ (all samples are equal)
Change-Id: I277d95bd56d51c166ef7f560647aeaa092f3f475
Reviewed-on: https://go-review.googlesource.com/20959
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-03-20 08:03:31 -07:00
|
|
|
// The result of mkinlcall1 MUST be assigned back to n, e.g.
|
|
|
|
// n.Left = mkinlcall1(n.Left, fn, isddd)
|
2018-07-23 13:09:48 -07:00
|
|
|
func mkinlcall1(n, fn *Node, maxCost int32) *Node {
|
2018-04-04 15:53:27 -07:00
|
|
|
if fn.Func.Inl == nil {
|
2017-04-07 13:47:10 -07:00
|
|
|
// No inlinable body.
|
cmd/compile: reduce use of **Node parameters
Escape analysis has a hard time with tree-like
structures (see #13493 and #14858).
This is unlikely to change.
As a result, when invoking a function that accepts
a **Node parameter, we usually allocate a *Node
on the heap. This happens a whole lot.
This CL changes functions from taking a **Node
to acting more like append: It both modifies
the input and returns a replacement for it.
Because of the cascading nature of escape analysis,
in order to get the benefits, I had to modify
almost all such functions. The remaining functions
are in racewalk and the backend. I would be happy
to update them as well in a separate CL.
This CL was created by manually updating the
function signatures and the directly impacted
bits of code. The callsites were then automatically
updated using a bespoke script:
https://gist.github.com/josharian/046b1be7aceae244de39
For ease of reviewing and future understanding,
this CL is also broken down into four CLs,
mailed separately, which show the manual
and the automated changes separately.
They are CLs 20990, 20991, 20992, and 20993.
Passes toolstash -cmp.
name old time/op new time/op delta
Template 335ms ± 5% 324ms ± 5% -3.35% (p=0.000 n=23+24)
Unicode 176ms ± 9% 165ms ± 6% -6.12% (p=0.000 n=23+24)
GoTypes 1.10s ± 4% 1.07s ± 2% -2.77% (p=0.000 n=24+24)
Compiler 5.31s ± 3% 5.15s ± 3% -2.95% (p=0.000 n=24+24)
MakeBash 41.6s ± 1% 41.7s ± 2% ~ (p=0.586 n=23+23)
name old alloc/op new alloc/op delta
Template 63.3MB ± 0% 62.4MB ± 0% -1.36% (p=0.000 n=25+23)
Unicode 42.4MB ± 0% 41.6MB ± 0% -1.99% (p=0.000 n=24+25)
GoTypes 220MB ± 0% 217MB ± 0% -1.11% (p=0.000 n=25+25)
Compiler 994MB ± 0% 973MB ± 0% -2.08% (p=0.000 n=24+25)
name old allocs/op new allocs/op delta
Template 681k ± 0% 574k ± 0% -15.71% (p=0.000 n=24+25)
Unicode 518k ± 0% 413k ± 0% -20.34% (p=0.000 n=25+24)
GoTypes 2.08M ± 0% 1.78M ± 0% -14.62% (p=0.000 n=25+25)
Compiler 9.26M ± 0% 7.64M ± 0% -17.48% (p=0.000 n=25+25)
name old text-bytes new text-bytes delta
HelloSize 578k ± 0% 578k ± 0% ~ (all samples are equal)
CmdGoSize 6.46M ± 0% 6.46M ± 0% ~ (all samples are equal)
name old data-bytes new data-bytes delta
HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal)
CmdGoSize 281k ± 0% 281k ± 0% ~ (all samples are equal)
name old exe-bytes new exe-bytes delta
HelloSize 921k ± 0% 921k ± 0% ~ (all samples are equal)
CmdGoSize 9.86M ± 0% 9.86M ± 0% ~ (all samples are equal)
Change-Id: I277d95bd56d51c166ef7f560647aeaa092f3f475
Reviewed-on: https://go-review.googlesource.com/20959
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-03-20 08:03:31 -07:00
|
|
|
return n
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2018-07-23 13:09:48 -07:00
|
|
|
if fn.Func.Inl.Cost > maxCost {
|
|
|
|
// The inlined function body is too big. Typically we use this check to restrict
|
|
|
|
// inlining into very big functions. See issue 26546 and 17566.
|
|
|
|
return n
|
|
|
|
}
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2015-05-26 22:19:27 -04:00
|
|
|
if fn == Curfn || fn.Name.Defn == Curfn {
|
2017-04-07 13:47:10 -07:00
|
|
|
// Can't recursively inline a function into itself.
|
cmd/compile: reduce use of **Node parameters
Escape analysis has a hard time with tree-like
structures (see #13493 and #14858).
This is unlikely to change.
As a result, when invoking a function that accepts
a **Node parameter, we usually allocate a *Node
on the heap. This happens a whole lot.
This CL changes functions from taking a **Node
to acting more like append: It both modifies
the input and returns a replacement for it.
Because of the cascading nature of escape analysis,
in order to get the benefits, I had to modify
almost all such functions. The remaining functions
are in racewalk and the backend. I would be happy
to update them as well in a separate CL.
This CL was created by manually updating the
function signatures and the directly impacted
bits of code. The callsites were then automatically
updated using a bespoke script:
https://gist.github.com/josharian/046b1be7aceae244de39
For ease of reviewing and future understanding,
this CL is also broken down into four CLs,
mailed separately, which show the manual
and the automated changes separately.
They are CLs 20990, 20991, 20992, and 20993.
Passes toolstash -cmp.
name old time/op new time/op delta
Template 335ms ± 5% 324ms ± 5% -3.35% (p=0.000 n=23+24)
Unicode 176ms ± 9% 165ms ± 6% -6.12% (p=0.000 n=23+24)
GoTypes 1.10s ± 4% 1.07s ± 2% -2.77% (p=0.000 n=24+24)
Compiler 5.31s ± 3% 5.15s ± 3% -2.95% (p=0.000 n=24+24)
MakeBash 41.6s ± 1% 41.7s ± 2% ~ (p=0.586 n=23+23)
name old alloc/op new alloc/op delta
Template 63.3MB ± 0% 62.4MB ± 0% -1.36% (p=0.000 n=25+23)
Unicode 42.4MB ± 0% 41.6MB ± 0% -1.99% (p=0.000 n=24+25)
GoTypes 220MB ± 0% 217MB ± 0% -1.11% (p=0.000 n=25+25)
Compiler 994MB ± 0% 973MB ± 0% -2.08% (p=0.000 n=24+25)
name old allocs/op new allocs/op delta
Template 681k ± 0% 574k ± 0% -15.71% (p=0.000 n=24+25)
Unicode 518k ± 0% 413k ± 0% -20.34% (p=0.000 n=25+24)
GoTypes 2.08M ± 0% 1.78M ± 0% -14.62% (p=0.000 n=25+25)
Compiler 9.26M ± 0% 7.64M ± 0% -17.48% (p=0.000 n=25+25)
name old text-bytes new text-bytes delta
HelloSize 578k ± 0% 578k ± 0% ~ (all samples are equal)
CmdGoSize 6.46M ± 0% 6.46M ± 0% ~ (all samples are equal)
name old data-bytes new data-bytes delta
HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal)
CmdGoSize 281k ± 0% 281k ± 0% ~ (all samples are equal)
name old exe-bytes new exe-bytes delta
HelloSize 921k ± 0% 921k ± 0% ~ (all samples are equal)
CmdGoSize 9.86M ± 0% 9.86M ± 0% ~ (all samples are equal)
Change-Id: I277d95bd56d51c166ef7f560647aeaa092f3f475
Reviewed-on: https://go-review.googlesource.com/20959
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-03-20 08:03:31 -07:00
|
|
|
return n
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
2018-03-27 15:35:51 -07:00
|
|
|
if instrumenting && isRuntimePkg(fn.Sym.Pkg) {
|
|
|
|
// Runtime package must not be instrumented.
|
|
|
|
// Instrument skips runtime package. However, some runtime code can be
|
|
|
|
// inlined into other packages and instrumented there. To avoid this,
|
|
|
|
// we disable inlining of runtime functions when instrumenting.
|
|
|
|
// The example that we observed is inlining of LockOSThread,
|
|
|
|
// which lead to false race reports on m contents.
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2017-10-20 15:20:56 -07:00
|
|
|
if Debug_typecheckinl == 0 {
|
2015-02-13 14:40:36 -05:00
|
|
|
typecheckinl(fn)
|
|
|
|
}
|
|
|
|
|
2017-04-07 13:47:10 -07:00
|
|
|
// We have a function node, and it has an inlineable body.
|
2015-02-13 14:40:36 -05:00
|
|
|
if Debug['m'] > 1 {
|
2018-04-04 15:53:27 -07:00
|
|
|
fmt.Printf("%v: inlining call to %v %#v { %#v }\n", n.Line(), fn.Sym, fn.Type, asNodes(fn.Func.Inl.Body))
|
2015-02-13 14:40:36 -05:00
|
|
|
} else if Debug['m'] != 0 {
|
2015-04-17 12:03:22 -04:00
|
|
|
fmt.Printf("%v: inlining call to %v\n", n.Line(), fn)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
if Debug['m'] > 2 {
|
2016-08-31 15:22:36 -07:00
|
|
|
fmt.Printf("%v: Before inlining: %+v\n", n.Line(), n)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
2018-07-26 12:51:06 +03:00
|
|
|
if ssaDump != "" && ssaDump == Curfn.funcname() {
|
|
|
|
ssaDumpInlined = append(ssaDumpInlined, fn)
|
|
|
|
}
|
|
|
|
|
2015-02-23 16:07:24 -05:00
|
|
|
ninit := n.Ninit
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2017-10-21 15:58:37 -07:00
|
|
|
// Make temp names to use instead of the originals.
|
|
|
|
inlvars := make(map[*Node]*Node)
|
|
|
|
|
2017-10-06 11:32:28 -04:00
|
|
|
// record formals/locals for later post-processing
|
|
|
|
var inlfvars []*Node
|
|
|
|
|
2018-04-04 15:53:27 -07:00
|
|
|
// Handle captured variables when inlining closures.
|
2016-02-26 13:48:24 -08:00
|
|
|
if fn.Name.Defn != nil {
|
2017-10-21 15:58:37 -07:00
|
|
|
if c := fn.Name.Defn.Func.Closure; c != nil {
|
cmd/compile: cleanup closure.go
The main thing is we now eagerly create the ODCLFUNC node for
closures, immediately cross-link them, and assign fields (e.g., Nbody,
Dcl, Parents, Marks) directly on the ODCLFUNC (previously they were
assigned on the OCLOSURE and later moved to the ODCLFUNC).
This allows us to set Curfn to the ODCLFUNC instead of the OCLOSURE,
which makes things more consistent with normal function declarations.
(Notably, this means Cvars now hang off the ODCLFUNC instead of the
OCLOSURE.)
Assignment of xfunc symbol names also now happens before typechecking
their body, which means debugging output now provides a more helpful
name than "<S>".
In golang.org/cl/66810, we changed "x := y" statements to avoid
creating false closure variables for x, but we still create them for
struct literals like "s{f: x}". Update comment in capturevars
accordingly.
More opportunity for cleanups still, but this makes some substantial
progress, IMO.
Passes toolstash-check.
Change-Id: I65a4efc91886e3dcd1000561348af88297775cd7
Reviewed-on: https://go-review.googlesource.com/100197
Run-TryBot: Matthew Dempsky <mdempsky@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2018-03-08 06:25:04 -08:00
|
|
|
for _, v := range c.Func.Closure.Func.Cvars.Slice() {
|
2017-10-21 15:58:37 -07:00
|
|
|
if v.Op == OXXX {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
o := v.Name.Param.Outer
|
|
|
|
// make sure the outer param matches the inlining location
|
|
|
|
// NB: if we enabled inlining of functions containing OCLOSURE or refined
|
|
|
|
// the reassigned check via some sort of copy propagation this would most
|
|
|
|
// likely need to be changed to a loop to walk up to the correct Param
|
|
|
|
if o == nil || (o.Name.Curfn != Curfn && o.Name.Curfn.Func.Closure != Curfn) {
|
|
|
|
Fatalf("%v: unresolvable capture %v %v\n", n.Line(), fn, v)
|
|
|
|
}
|
|
|
|
|
|
|
|
if v.Name.Byval() {
|
|
|
|
iv := typecheck(inlvar(v), Erv)
|
|
|
|
ninit.Append(nod(ODCL, iv, nil))
|
|
|
|
ninit.Append(typecheck(nod(OAS, iv, o), Etop))
|
|
|
|
inlvars[v] = iv
|
|
|
|
} else {
|
|
|
|
addr := newname(lookup("&" + v.Sym.Name))
|
|
|
|
addr.Type = types.NewPtr(v.Type)
|
|
|
|
ia := typecheck(inlvar(addr), Erv)
|
|
|
|
ninit.Append(nod(ODCL, ia, nil))
|
|
|
|
ninit.Append(typecheck(nod(OAS, ia, nod(OADDR, o, nil)), Etop))
|
|
|
|
inlvars[addr] = ia
|
|
|
|
|
|
|
|
// When capturing by reference, all occurrence of the captured var
|
|
|
|
// must be substituted with dereference of the temporary address
|
|
|
|
inlvars[v] = typecheck(nod(OIND, ia, nil), Erv)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
2018-04-04 15:53:27 -07:00
|
|
|
for _, ln := range fn.Func.Inl.Dcl {
|
2017-04-07 13:47:10 -07:00
|
|
|
if ln.Op != ONAME {
|
|
|
|
continue
|
|
|
|
}
|
2017-04-25 18:14:12 -07:00
|
|
|
if ln.Class() == PPARAMOUT { // return values handled below.
|
2015-02-13 14:40:36 -05:00
|
|
|
continue
|
|
|
|
}
|
cmd/compile: fix liveness computation for heap-escaped parameters
The liveness computation of parameters generally was never
correct, but forcing all parameters to be live throughout the
function covered up that problem. The new SSA back end is
too clever: even though it currently keeps the parameter values live
throughout the function, it may find optimizations that mean
the current values are not written back to the original parameter
stack slots immediately or ever (for example if a parameter is set
to nil, SSA constant propagation may replace all later uses of the
parameter with a constant nil, eliminating the need to write the nil
value back to the stack slot), so the liveness code must now
track the actual operations on the stack slots, exposing these
problems.
One small problem in the handling of arguments is that nodarg
can return ONAME PPARAM nodes with adjusted offsets, so that
there are actually multiple *Node pointers for the same parameter
in the instruction stream. This might be possible to correct, but
not in this CL. For now, we fix this by using n.Orig instead of n
when considering PPARAM and PPARAMOUT nodes.
The major problem in the handling of arguments is general
confusion in the liveness code about the meaning of PPARAM|PHEAP
and PPARAMOUT|PHEAP nodes, especially as contrasted with PAUTO|PHEAP.
The difference between these two is that when a local variable "moves"
to the heap, it's really just allocated there to start with; in contrast,
when an argument moves to the heap, the actual data has to be copied
there from the stack at the beginning of the function, and when a
result "moves" to the heap the value in the heap has to be copied
back to the stack when the function returns
This general confusion is also present in the SSA back end.
The PHEAP bit worked decently when I first introduced it 7 years ago (!)
in 391425ae. The back end did nothing sophisticated, and in particular
there was no analysis at all: no escape analysis, no liveness analysis,
and certainly no SSA back end. But the complications caused in the
various downstream consumers suggest that this should be a detail
kept mainly in the front end.
This CL therefore eliminates both the PHEAP bit and even the idea of
"heap variables" from the back ends.
First, it replaces the PPARAM|PHEAP, PPARAMOUT|PHEAP, and PAUTO|PHEAP
variable classes with the single PAUTOHEAP, a pseudo-class indicating
a variable maintained on the heap and available by indirecting a
local variable kept on the stack (a plain PAUTO).
Second, walkexpr replaces all references to PAUTOHEAP variables
with indirections of the corresponding PAUTO variable.
The back ends and the liveness code now just see plain indirected
variables. This may actually produce better code, but the real goal
here is to eliminate these little-used and somewhat suspect code
paths in the back end analyses.
The OPARAM node type goes away too.
A followup CL will do the same to PPARAMREF. I'm not sure that
the back ends (SSA in particular) are handling those right either,
and with the framework established in this CL that change is trivial
and the result clearly more correct.
Fixes #15747.
Change-Id: I2770b1ce3cbc93981bfc7166be66a9da12013d74
Reviewed-on: https://go-review.googlesource.com/23393
Reviewed-by: Keith Randall <khr@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
2016-05-25 01:33:24 -04:00
|
|
|
if ln.isParamStackCopy() { // ignore the on-stack copy of a parameter that moved to the heap
|
|
|
|
continue
|
|
|
|
}
|
2017-04-07 13:47:10 -07:00
|
|
|
inlvars[ln] = typecheck(inlvar(ln), Erv)
|
2017-04-25 18:14:12 -07:00
|
|
|
if ln.Class() == PPARAM || ln.Name.Param.Stackcopy != nil && ln.Name.Param.Stackcopy.Class() == PPARAM {
|
2017-04-07 13:47:10 -07:00
|
|
|
ninit.Append(nod(ODCL, inlvars[ln], nil))
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2017-10-06 11:32:28 -04:00
|
|
|
if genDwarfInline > 0 {
|
|
|
|
inlf := inlvars[ln]
|
|
|
|
if ln.Class() == PPARAM {
|
|
|
|
inlf.SetInlFormal(true)
|
|
|
|
} else {
|
|
|
|
inlf.SetInlLocal(true)
|
|
|
|
}
|
|
|
|
inlf.Pos = ln.Pos
|
|
|
|
inlfvars = append(inlfvars, inlf)
|
|
|
|
}
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// temporaries for return values.
|
2017-04-07 13:47:10 -07:00
|
|
|
var retvars []*Node
|
|
|
|
for i, t := range fn.Type.Results().Fields().Slice() {
|
|
|
|
var m *Node
|
cmd/compile: replace Field.Nname.Pos with Field.Pos
For struct fields and methods, Field.Nname was only used to store
position information, which means we're allocating an entire ONAME
Node+Name+Param structure just for one field. We can optimize away
these ONAME allocations by instead adding a Field.Pos field.
Unfortunately, we can't get rid of Field.Nname, because it's needed
for function parameters, so Field grows a little bit and now has more
redundant information in those cases. However, that was already the
case (e.g., Field.Sym and Field.Nname.Sym), and it's still a net win
for allocations as demonstrated by the benchmarks below.
Additionally, by moving the ONAME allocation for function parameters
to funcargs, we can avoid allocating them for function parameters that
aren't used in corresponding function bodies (e.g., interface methods,
function-typed variables, and imported functions/methods without
inline bodies).
name old time/op new time/op delta
Template 254ms ± 6% 251ms ± 6% -1.04% (p=0.000 n=487+488)
Unicode 128ms ± 7% 128ms ± 7% ~ (p=0.294 n=482+467)
GoTypes 862ms ± 5% 860ms ± 4% ~ (p=0.075 n=488+471)
Compiler 3.91s ± 4% 3.90s ± 4% -0.39% (p=0.000 n=468+473)
name old user-time/op new user-time/op delta
Template 339ms ±14% 336ms ±14% -1.02% (p=0.001 n=498+494)
Unicode 176ms ±18% 176ms ±25% ~ (p=0.940 n=491+499)
GoTypes 1.13s ± 8% 1.13s ± 9% ~ (p=0.157 n=496+493)
Compiler 5.24s ± 6% 5.21s ± 6% -0.57% (p=0.000 n=485+489)
name old alloc/op new alloc/op delta
Template 38.3MB ± 0% 37.3MB ± 0% -2.58% (p=0.000 n=499+497)
Unicode 29.1MB ± 0% 29.1MB ± 0% -0.03% (p=0.000 n=500+493)
GoTypes 116MB ± 0% 115MB ± 0% -0.65% (p=0.000 n=498+499)
Compiler 492MB ± 0% 487MB ± 0% -1.00% (p=0.000 n=497+498)
name old allocs/op new allocs/op delta
Template 364k ± 0% 360k ± 0% -1.15% (p=0.000 n=499+499)
Unicode 336k ± 0% 336k ± 0% -0.01% (p=0.000 n=500+493)
GoTypes 1.16M ± 0% 1.16M ± 0% -0.30% (p=0.000 n=499+499)
Compiler 4.54M ± 0% 4.51M ± 0% -0.58% (p=0.000 n=494+495)
Passes toolstash-check -gcflags=-dwarf=false. Changes DWARF output
because position information is now tracked more precisely for
function parameters.
Change-Id: Ib8077d70d564cc448c5e4290baceab3a4396d712
Reviewed-on: https://go-review.googlesource.com/108217
Run-TryBot: Matthew Dempsky <mdempsky@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2018-04-18 22:57:10 -07:00
|
|
|
mpos := t.Pos
|
|
|
|
if n := asNode(t.Nname); n != nil && !n.isBlank() {
|
|
|
|
m = inlvar(n)
|
cmd/compile: reduce use of **Node parameters
Escape analysis has a hard time with tree-like
structures (see #13493 and #14858).
This is unlikely to change.
As a result, when invoking a function that accepts
a **Node parameter, we usually allocate a *Node
on the heap. This happens a whole lot.
This CL changes functions from taking a **Node
to acting more like append: It both modifies
the input and returns a replacement for it.
Because of the cascading nature of escape analysis,
in order to get the benefits, I had to modify
almost all such functions. The remaining functions
are in racewalk and the backend. I would be happy
to update them as well in a separate CL.
This CL was created by manually updating the
function signatures and the directly impacted
bits of code. The callsites were then automatically
updated using a bespoke script:
https://gist.github.com/josharian/046b1be7aceae244de39
For ease of reviewing and future understanding,
this CL is also broken down into four CLs,
mailed separately, which show the manual
and the automated changes separately.
They are CLs 20990, 20991, 20992, and 20993.
Passes toolstash -cmp.
name old time/op new time/op delta
Template 335ms ± 5% 324ms ± 5% -3.35% (p=0.000 n=23+24)
Unicode 176ms ± 9% 165ms ± 6% -6.12% (p=0.000 n=23+24)
GoTypes 1.10s ± 4% 1.07s ± 2% -2.77% (p=0.000 n=24+24)
Compiler 5.31s ± 3% 5.15s ± 3% -2.95% (p=0.000 n=24+24)
MakeBash 41.6s ± 1% 41.7s ± 2% ~ (p=0.586 n=23+23)
name old alloc/op new alloc/op delta
Template 63.3MB ± 0% 62.4MB ± 0% -1.36% (p=0.000 n=25+23)
Unicode 42.4MB ± 0% 41.6MB ± 0% -1.99% (p=0.000 n=24+25)
GoTypes 220MB ± 0% 217MB ± 0% -1.11% (p=0.000 n=25+25)
Compiler 994MB ± 0% 973MB ± 0% -2.08% (p=0.000 n=24+25)
name old allocs/op new allocs/op delta
Template 681k ± 0% 574k ± 0% -15.71% (p=0.000 n=24+25)
Unicode 518k ± 0% 413k ± 0% -20.34% (p=0.000 n=25+24)
GoTypes 2.08M ± 0% 1.78M ± 0% -14.62% (p=0.000 n=25+25)
Compiler 9.26M ± 0% 7.64M ± 0% -17.48% (p=0.000 n=25+25)
name old text-bytes new text-bytes delta
HelloSize 578k ± 0% 578k ± 0% ~ (all samples are equal)
CmdGoSize 6.46M ± 0% 6.46M ± 0% ~ (all samples are equal)
name old data-bytes new data-bytes delta
HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal)
CmdGoSize 281k ± 0% 281k ± 0% ~ (all samples are equal)
name old exe-bytes new exe-bytes delta
HelloSize 921k ± 0% 921k ± 0% ~ (all samples are equal)
CmdGoSize 9.86M ± 0% 9.86M ± 0% ~ (all samples are equal)
Change-Id: I277d95bd56d51c166ef7f560647aeaa092f3f475
Reviewed-on: https://go-review.googlesource.com/20959
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-03-20 08:03:31 -07:00
|
|
|
m = typecheck(m, Erv)
|
cmd/compile: replace Field.Nname.Pos with Field.Pos
For struct fields and methods, Field.Nname was only used to store
position information, which means we're allocating an entire ONAME
Node+Name+Param structure just for one field. We can optimize away
these ONAME allocations by instead adding a Field.Pos field.
Unfortunately, we can't get rid of Field.Nname, because it's needed
for function parameters, so Field grows a little bit and now has more
redundant information in those cases. However, that was already the
case (e.g., Field.Sym and Field.Nname.Sym), and it's still a net win
for allocations as demonstrated by the benchmarks below.
Additionally, by moving the ONAME allocation for function parameters
to funcargs, we can avoid allocating them for function parameters that
aren't used in corresponding function bodies (e.g., interface methods,
function-typed variables, and imported functions/methods without
inline bodies).
name old time/op new time/op delta
Template 254ms ± 6% 251ms ± 6% -1.04% (p=0.000 n=487+488)
Unicode 128ms ± 7% 128ms ± 7% ~ (p=0.294 n=482+467)
GoTypes 862ms ± 5% 860ms ± 4% ~ (p=0.075 n=488+471)
Compiler 3.91s ± 4% 3.90s ± 4% -0.39% (p=0.000 n=468+473)
name old user-time/op new user-time/op delta
Template 339ms ±14% 336ms ±14% -1.02% (p=0.001 n=498+494)
Unicode 176ms ±18% 176ms ±25% ~ (p=0.940 n=491+499)
GoTypes 1.13s ± 8% 1.13s ± 9% ~ (p=0.157 n=496+493)
Compiler 5.24s ± 6% 5.21s ± 6% -0.57% (p=0.000 n=485+489)
name old alloc/op new alloc/op delta
Template 38.3MB ± 0% 37.3MB ± 0% -2.58% (p=0.000 n=499+497)
Unicode 29.1MB ± 0% 29.1MB ± 0% -0.03% (p=0.000 n=500+493)
GoTypes 116MB ± 0% 115MB ± 0% -0.65% (p=0.000 n=498+499)
Compiler 492MB ± 0% 487MB ± 0% -1.00% (p=0.000 n=497+498)
name old allocs/op new allocs/op delta
Template 364k ± 0% 360k ± 0% -1.15% (p=0.000 n=499+499)
Unicode 336k ± 0% 336k ± 0% -0.01% (p=0.000 n=500+493)
GoTypes 1.16M ± 0% 1.16M ± 0% -0.30% (p=0.000 n=499+499)
Compiler 4.54M ± 0% 4.51M ± 0% -0.58% (p=0.000 n=494+495)
Passes toolstash-check -gcflags=-dwarf=false. Changes DWARF output
because position information is now tracked more precisely for
function parameters.
Change-Id: Ib8077d70d564cc448c5e4290baceab3a4396d712
Reviewed-on: https://go-review.googlesource.com/108217
Run-TryBot: Matthew Dempsky <mdempsky@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2018-04-18 22:57:10 -07:00
|
|
|
inlvars[n] = m
|
2015-02-13 14:40:36 -05:00
|
|
|
} else {
|
|
|
|
// anonymous return values, synthesize names for use in assignment that replaces return
|
|
|
|
m = retvar(t, i)
|
|
|
|
}
|
|
|
|
|
2017-10-06 11:32:28 -04:00
|
|
|
if genDwarfInline > 0 {
|
|
|
|
// Don't update the src.Pos on a return variable if it
|
2017-12-11 15:53:31 -05:00
|
|
|
// was manufactured by the inliner (e.g. "~R2"); such vars
|
2017-10-06 11:32:28 -04:00
|
|
|
// were not part of the original callee.
|
2017-12-11 15:53:31 -05:00
|
|
|
if !strings.HasPrefix(m.Sym.Name, "~R") {
|
2017-10-06 11:32:28 -04:00
|
|
|
m.SetInlFormal(true)
|
|
|
|
m.Pos = mpos
|
|
|
|
inlfvars = append(inlfvars, m)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-16 11:00:54 +10:00
|
|
|
ninit.Append(nod(ODCL, m, nil))
|
2016-03-10 11:49:20 -08:00
|
|
|
retvars = append(retvars, m)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
2017-04-07 13:47:10 -07:00
|
|
|
// Assign arguments to the parameters' temp names.
|
|
|
|
as := nod(OAS2, nil, nil)
|
|
|
|
as.Rlist.Set(n.List.Slice())
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2017-04-07 13:47:10 -07:00
|
|
|
// For non-dotted calls to variadic functions, we assign the
|
|
|
|
// variadic parameter's temp name separately.
|
|
|
|
var vas *Node
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2017-04-07 13:47:10 -07:00
|
|
|
if fn.IsMethod() {
|
|
|
|
rcv := fn.Type.Recv()
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2017-04-07 13:47:10 -07:00
|
|
|
if n.Left.Op == ODOTMETH {
|
|
|
|
// For x.M(...), assign x directly to the
|
|
|
|
// receiver parameter.
|
|
|
|
if n.Left.Left == nil {
|
|
|
|
Fatalf("method call without receiver: %+v", n)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2017-04-07 13:47:10 -07:00
|
|
|
ras := nod(OAS, tinlvar(rcv, inlvars), n.Left.Left)
|
|
|
|
ras = typecheck(ras, Etop)
|
|
|
|
ninit.Append(ras)
|
|
|
|
} else {
|
|
|
|
// For T.M(...), add the receiver parameter to
|
|
|
|
// as.List, so it's assigned by the normal
|
|
|
|
// arguments.
|
|
|
|
if as.Rlist.Len() == 0 {
|
|
|
|
Fatalf("non-method call to method without first arg: %+v", n)
|
|
|
|
}
|
|
|
|
as.List.Append(tinlvar(rcv, inlvars))
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-07 13:47:10 -07:00
|
|
|
for _, param := range fn.Type.Params().Fields().Slice() {
|
|
|
|
// For ordinary parameters or variadic parameters in
|
|
|
|
// dotted calls, just add the variable to the
|
|
|
|
// assignment list, and we're done.
|
2018-03-13 12:58:56 -07:00
|
|
|
if !param.Isddd() || n.Isddd() {
|
2017-04-07 13:47:10 -07:00
|
|
|
as.List.Append(tinlvar(param, inlvars))
|
|
|
|
continue
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
2017-04-07 13:47:10 -07:00
|
|
|
// Otherwise, we need to collect the remaining values
|
|
|
|
// to pass as a slice.
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2017-04-07 13:47:10 -07:00
|
|
|
numvals := n.List.Len()
|
|
|
|
if numvals == 1 && n.List.First().Type.IsFuncArgStruct() {
|
|
|
|
numvals = n.List.First().Type.NumFields()
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
2017-04-07 13:47:10 -07:00
|
|
|
x := as.List.Len()
|
|
|
|
for as.List.Len() < numvals {
|
|
|
|
as.List.Append(argvar(param.Type, as.List.Len()))
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2017-04-07 13:47:10 -07:00
|
|
|
varargs := as.List.Slice()[x:]
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2017-04-07 13:47:10 -07:00
|
|
|
vas = nod(OAS, tinlvar(param, inlvars), nil)
|
|
|
|
if len(varargs) == 0 {
|
|
|
|
vas.Right = nodnil()
|
|
|
|
vas.Right.Type = param.Type
|
|
|
|
} else {
|
|
|
|
vas.Right = nod(OCOMPLIT, nil, typenod(param.Type))
|
|
|
|
vas.Right.List.Set(varargs)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-08 15:10:26 -08:00
|
|
|
if as.Rlist.Len() != 0 {
|
cmd/compile: reduce use of **Node parameters
Escape analysis has a hard time with tree-like
structures (see #13493 and #14858).
This is unlikely to change.
As a result, when invoking a function that accepts
a **Node parameter, we usually allocate a *Node
on the heap. This happens a whole lot.
This CL changes functions from taking a **Node
to acting more like append: It both modifies
the input and returns a replacement for it.
Because of the cascading nature of escape analysis,
in order to get the benefits, I had to modify
almost all such functions. The remaining functions
are in racewalk and the backend. I would be happy
to update them as well in a separate CL.
This CL was created by manually updating the
function signatures and the directly impacted
bits of code. The callsites were then automatically
updated using a bespoke script:
https://gist.github.com/josharian/046b1be7aceae244de39
For ease of reviewing and future understanding,
this CL is also broken down into four CLs,
mailed separately, which show the manual
and the automated changes separately.
They are CLs 20990, 20991, 20992, and 20993.
Passes toolstash -cmp.
name old time/op new time/op delta
Template 335ms ± 5% 324ms ± 5% -3.35% (p=0.000 n=23+24)
Unicode 176ms ± 9% 165ms ± 6% -6.12% (p=0.000 n=23+24)
GoTypes 1.10s ± 4% 1.07s ± 2% -2.77% (p=0.000 n=24+24)
Compiler 5.31s ± 3% 5.15s ± 3% -2.95% (p=0.000 n=24+24)
MakeBash 41.6s ± 1% 41.7s ± 2% ~ (p=0.586 n=23+23)
name old alloc/op new alloc/op delta
Template 63.3MB ± 0% 62.4MB ± 0% -1.36% (p=0.000 n=25+23)
Unicode 42.4MB ± 0% 41.6MB ± 0% -1.99% (p=0.000 n=24+25)
GoTypes 220MB ± 0% 217MB ± 0% -1.11% (p=0.000 n=25+25)
Compiler 994MB ± 0% 973MB ± 0% -2.08% (p=0.000 n=24+25)
name old allocs/op new allocs/op delta
Template 681k ± 0% 574k ± 0% -15.71% (p=0.000 n=24+25)
Unicode 518k ± 0% 413k ± 0% -20.34% (p=0.000 n=25+24)
GoTypes 2.08M ± 0% 1.78M ± 0% -14.62% (p=0.000 n=25+25)
Compiler 9.26M ± 0% 7.64M ± 0% -17.48% (p=0.000 n=25+25)
name old text-bytes new text-bytes delta
HelloSize 578k ± 0% 578k ± 0% ~ (all samples are equal)
CmdGoSize 6.46M ± 0% 6.46M ± 0% ~ (all samples are equal)
name old data-bytes new data-bytes delta
HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal)
CmdGoSize 281k ± 0% 281k ± 0% ~ (all samples are equal)
name old exe-bytes new exe-bytes delta
HelloSize 921k ± 0% 921k ± 0% ~ (all samples are equal)
CmdGoSize 9.86M ± 0% 9.86M ± 0% ~ (all samples are equal)
Change-Id: I277d95bd56d51c166ef7f560647aeaa092f3f475
Reviewed-on: https://go-review.googlesource.com/20959
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-03-20 08:03:31 -07:00
|
|
|
as = typecheck(as, Etop)
|
2016-03-08 15:10:26 -08:00
|
|
|
ninit.Append(as)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
2017-04-07 13:47:10 -07:00
|
|
|
if vas != nil {
|
|
|
|
vas = typecheck(vas, Etop)
|
|
|
|
ninit.Append(vas)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
2017-04-07 13:47:10 -07:00
|
|
|
// Zero the return parameters.
|
2016-03-10 11:49:20 -08:00
|
|
|
for _, n := range retvars {
|
2017-05-27 17:43:37 -04:00
|
|
|
ras := nod(OAS, n, nil)
|
|
|
|
ras = typecheck(ras, Etop)
|
|
|
|
ninit.Append(ras)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
2016-08-15 21:09:39 -07:00
|
|
|
retlabel := autolabel(".i")
|
2016-06-01 10:15:02 -07:00
|
|
|
|
2015-02-13 14:40:36 -05:00
|
|
|
inlgen++
|
|
|
|
|
2017-09-18 23:02:02 -07:00
|
|
|
parent := -1
|
|
|
|
if b := Ctxt.PosTable.Pos(n.Pos).Base(); b != nil {
|
|
|
|
parent = b.InliningIndex()
|
|
|
|
}
|
|
|
|
newIndex := Ctxt.InlTree.Add(parent, n.Pos, fn.Sym.Linksym())
|
|
|
|
|
2017-10-06 11:32:28 -04:00
|
|
|
if genDwarfInline > 0 {
|
|
|
|
if !fn.Sym.Linksym().WasInlined() {
|
|
|
|
Ctxt.DwFixups.SetPrecursorFunc(fn.Sym.Linksym(), fn)
|
|
|
|
fn.Sym.Linksym().Set(obj.AttrWasInlined, true)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-10 11:49:20 -08:00
|
|
|
subst := inlsubst{
|
2017-09-18 23:02:02 -07:00
|
|
|
retlabel: retlabel,
|
|
|
|
retvars: retvars,
|
|
|
|
inlvars: inlvars,
|
|
|
|
bases: make(map[*src.PosBase]*src.PosBase),
|
|
|
|
newInlIndex: newIndex,
|
2016-03-10 11:49:20 -08:00
|
|
|
}
|
|
|
|
|
2018-04-04 15:53:27 -07:00
|
|
|
body := subst.list(asNodes(fn.Func.Inl.Body))
|
2016-03-10 11:49:20 -08:00
|
|
|
|
2016-09-16 11:00:54 +10:00
|
|
|
lab := nod(OLABEL, retlabel, nil)
|
2016-05-27 15:33:11 -07:00
|
|
|
body = append(body, lab)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2016-02-27 14:31:33 -08:00
|
|
|
typecheckslice(body, Etop)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2017-10-06 11:32:28 -04:00
|
|
|
if genDwarfInline > 0 {
|
|
|
|
for _, v := range inlfvars {
|
|
|
|
v.Pos = subst.updatedPos(v.Pos)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-13 14:40:36 -05:00
|
|
|
//dumplist("ninit post", ninit);
|
|
|
|
|
2016-09-16 11:00:54 +10:00
|
|
|
call := nod(OINLCALL, nil, nil)
|
2016-03-08 15:10:26 -08:00
|
|
|
call.Ninit.Set(ninit.Slice())
|
2016-02-27 14:31:33 -08:00
|
|
|
call.Nbody.Set(body)
|
2016-03-10 11:49:20 -08:00
|
|
|
call.Rlist.Set(retvars)
|
2015-02-13 14:40:36 -05:00
|
|
|
call.Type = n.Type
|
2017-04-25 18:02:43 -07:00
|
|
|
call.SetTypecheck(1)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
|
|
|
// transitive inlining
|
2015-02-24 12:19:01 -05:00
|
|
|
// might be nice to do this before exporting the body,
|
|
|
|
// but can't emit the body with inlining expanded.
|
|
|
|
// instead we emit the things that the body needs
|
|
|
|
// and each use must redo the inlining.
|
|
|
|
// luckily these are small.
|
2018-07-23 13:09:48 -07:00
|
|
|
inlnodelist(call.Nbody, maxCost)
|
2016-02-27 14:31:33 -08:00
|
|
|
for _, n := range call.Nbody.Slice() {
|
|
|
|
if n.Op == OINLCALL {
|
|
|
|
inlconv2stmt(n)
|
2015-02-24 12:19:01 -05:00
|
|
|
}
|
|
|
|
}
|
2015-02-13 14:40:36 -05:00
|
|
|
|
|
|
|
if Debug['m'] > 2 {
|
2017-10-09 11:06:52 -07:00
|
|
|
fmt.Printf("%v: After inlining %+v\n\n", call.Line(), call)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
cmd/compile: reduce use of **Node parameters
Escape analysis has a hard time with tree-like
structures (see #13493 and #14858).
This is unlikely to change.
As a result, when invoking a function that accepts
a **Node parameter, we usually allocate a *Node
on the heap. This happens a whole lot.
This CL changes functions from taking a **Node
to acting more like append: It both modifies
the input and returns a replacement for it.
Because of the cascading nature of escape analysis,
in order to get the benefits, I had to modify
almost all such functions. The remaining functions
are in racewalk and the backend. I would be happy
to update them as well in a separate CL.
This CL was created by manually updating the
function signatures and the directly impacted
bits of code. The callsites were then automatically
updated using a bespoke script:
https://gist.github.com/josharian/046b1be7aceae244de39
For ease of reviewing and future understanding,
this CL is also broken down into four CLs,
mailed separately, which show the manual
and the automated changes separately.
They are CLs 20990, 20991, 20992, and 20993.
Passes toolstash -cmp.
name old time/op new time/op delta
Template 335ms ± 5% 324ms ± 5% -3.35% (p=0.000 n=23+24)
Unicode 176ms ± 9% 165ms ± 6% -6.12% (p=0.000 n=23+24)
GoTypes 1.10s ± 4% 1.07s ± 2% -2.77% (p=0.000 n=24+24)
Compiler 5.31s ± 3% 5.15s ± 3% -2.95% (p=0.000 n=24+24)
MakeBash 41.6s ± 1% 41.7s ± 2% ~ (p=0.586 n=23+23)
name old alloc/op new alloc/op delta
Template 63.3MB ± 0% 62.4MB ± 0% -1.36% (p=0.000 n=25+23)
Unicode 42.4MB ± 0% 41.6MB ± 0% -1.99% (p=0.000 n=24+25)
GoTypes 220MB ± 0% 217MB ± 0% -1.11% (p=0.000 n=25+25)
Compiler 994MB ± 0% 973MB ± 0% -2.08% (p=0.000 n=24+25)
name old allocs/op new allocs/op delta
Template 681k ± 0% 574k ± 0% -15.71% (p=0.000 n=24+25)
Unicode 518k ± 0% 413k ± 0% -20.34% (p=0.000 n=25+24)
GoTypes 2.08M ± 0% 1.78M ± 0% -14.62% (p=0.000 n=25+25)
Compiler 9.26M ± 0% 7.64M ± 0% -17.48% (p=0.000 n=25+25)
name old text-bytes new text-bytes delta
HelloSize 578k ± 0% 578k ± 0% ~ (all samples are equal)
CmdGoSize 6.46M ± 0% 6.46M ± 0% ~ (all samples are equal)
name old data-bytes new data-bytes delta
HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal)
CmdGoSize 281k ± 0% 281k ± 0% ~ (all samples are equal)
name old exe-bytes new exe-bytes delta
HelloSize 921k ± 0% 921k ± 0% ~ (all samples are equal)
CmdGoSize 9.86M ± 0% 9.86M ± 0% ~ (all samples are equal)
Change-Id: I277d95bd56d51c166ef7f560647aeaa092f3f475
Reviewed-on: https://go-review.googlesource.com/20959
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-03-20 08:03:31 -07:00
|
|
|
|
2017-10-09 11:06:52 -07:00
|
|
|
return call
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Every time we expand a function we generate a new set of tmpnames,
|
|
|
|
// PAUTO's in the calling functions, and link them off of the
|
|
|
|
// PPARAM's, PAUTOS and PPARAMOUTs of the called function.
|
|
|
|
func inlvar(var_ *Node) *Node {
|
|
|
|
if Debug['m'] > 3 {
|
2016-08-31 15:22:36 -07:00
|
|
|
fmt.Printf("inlvar %+v\n", var_)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
2015-02-23 16:07:24 -05:00
|
|
|
n := newname(var_.Sym)
|
2015-02-13 14:40:36 -05:00
|
|
|
n.Type = var_.Type
|
2017-04-25 18:14:12 -07:00
|
|
|
n.SetClass(PAUTO)
|
2017-04-27 15:17:57 -07:00
|
|
|
n.Name.SetUsed(true)
|
2015-05-27 07:31:56 -04:00
|
|
|
n.Name.Curfn = Curfn // the calling function, not the called one
|
2017-02-27 19:56:38 +02:00
|
|
|
n.SetAddrtaken(var_.Addrtaken())
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2016-02-25 10:35:19 -08:00
|
|
|
Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
|
2015-02-13 14:40:36 -05:00
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
|
|
|
// Synthesize a variable to store the inlined function's results in.
|
cmd/compile: factor out Pkg, Sym, and Type into package types
- created new package cmd/compile/internal/types
- moved Pkg, Sym, Type to new package
- to break cycles, for now we need the (ugly) types/utils.go
file which contains a handful of functions that must be installed
early by the gc frontend
- to break cycles, for now we need two functions to convert between
*gc.Node and *types.Node (the latter is a dummy type)
- adjusted the gc's code to use the new package and the conversion
functions as needed
- made several Pkg, Sym, and Type methods functions as needed
- renamed constructors typ, typPtr, typArray, etc. to types.New,
types.NewPtr, types.NewArray, etc.
Passes toolstash-check -all.
Change-Id: I8adfa5e85c731645d0a7fd2030375ed6ebf54b72
Reviewed-on: https://go-review.googlesource.com/39855
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2017-04-04 17:54:02 -07:00
|
|
|
func retvar(t *types.Field, i int) *Node {
|
2017-12-11 15:53:31 -05:00
|
|
|
n := newname(lookupN("~R", i))
|
2015-02-13 14:40:36 -05:00
|
|
|
n.Type = t.Type
|
2017-04-25 18:14:12 -07:00
|
|
|
n.SetClass(PAUTO)
|
2017-04-27 15:17:57 -07:00
|
|
|
n.Name.SetUsed(true)
|
2015-05-27 07:31:56 -04:00
|
|
|
n.Name.Curfn = Curfn // the calling function, not the called one
|
2016-02-25 10:35:19 -08:00
|
|
|
Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
|
2015-02-13 14:40:36 -05:00
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
|
|
|
// Synthesize a variable to store the inlined function's arguments
|
|
|
|
// when they come from a multiple return call.
|
cmd/compile: factor out Pkg, Sym, and Type into package types
- created new package cmd/compile/internal/types
- moved Pkg, Sym, Type to new package
- to break cycles, for now we need the (ugly) types/utils.go
file which contains a handful of functions that must be installed
early by the gc frontend
- to break cycles, for now we need two functions to convert between
*gc.Node and *types.Node (the latter is a dummy type)
- adjusted the gc's code to use the new package and the conversion
functions as needed
- made several Pkg, Sym, and Type methods functions as needed
- renamed constructors typ, typPtr, typArray, etc. to types.New,
types.NewPtr, types.NewArray, etc.
Passes toolstash-check -all.
Change-Id: I8adfa5e85c731645d0a7fd2030375ed6ebf54b72
Reviewed-on: https://go-review.googlesource.com/39855
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2017-04-04 17:54:02 -07:00
|
|
|
func argvar(t *types.Type, i int) *Node {
|
2016-09-15 15:45:10 +10:00
|
|
|
n := newname(lookupN("~arg", i))
|
2016-03-30 10:57:47 -07:00
|
|
|
n.Type = t.Elem()
|
2017-04-25 18:14:12 -07:00
|
|
|
n.SetClass(PAUTO)
|
2017-04-27 15:17:57 -07:00
|
|
|
n.Name.SetUsed(true)
|
2015-05-27 07:31:56 -04:00
|
|
|
n.Name.Curfn = Curfn // the calling function, not the called one
|
2016-02-25 10:35:19 -08:00
|
|
|
Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
|
2015-02-13 14:40:36 -05:00
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2016-03-10 11:49:20 -08:00
|
|
|
// The inlsubst type implements the actual inlining of a single
|
|
|
|
// function call.
|
|
|
|
type inlsubst struct {
|
|
|
|
// Target of the goto substituted in place of a return.
|
|
|
|
retlabel *Node
|
|
|
|
|
|
|
|
// Temporary result variables.
|
|
|
|
retvars []*Node
|
2016-10-26 22:58:50 -07:00
|
|
|
|
|
|
|
inlvars map[*Node]*Node
|
2017-09-18 23:02:02 -07:00
|
|
|
|
|
|
|
// bases maps from original PosBase to PosBase with an extra
|
|
|
|
// inlined call frame.
|
|
|
|
bases map[*src.PosBase]*src.PosBase
|
|
|
|
|
|
|
|
// newInlIndex is the index of the inlined call frame to
|
|
|
|
// insert for inlined nodes.
|
|
|
|
newInlIndex int
|
2016-03-10 11:49:20 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
// list inlines a list of nodes.
|
|
|
|
func (subst *inlsubst) list(ll Nodes) []*Node {
|
2016-03-08 15:10:26 -08:00
|
|
|
s := make([]*Node, 0, ll.Len())
|
|
|
|
for _, n := range ll.Slice() {
|
2016-03-10 11:49:20 -08:00
|
|
|
s = append(s, subst.node(n))
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2016-03-04 09:37:58 -08:00
|
|
|
return s
|
2016-02-27 14:31:33 -08:00
|
|
|
}
|
|
|
|
|
2016-03-10 11:49:20 -08:00
|
|
|
// node recursively copies a node from the saved pristine body of the
|
|
|
|
// inlined function, substituting references to input/output
|
|
|
|
// parameters with ones to the tmpnames, and substituting returns with
|
|
|
|
// assignments to the output.
|
|
|
|
func (subst *inlsubst) node(n *Node) *Node {
|
2015-02-13 14:40:36 -05:00
|
|
|
if n == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
switch n.Op {
|
|
|
|
case ONAME:
|
2016-10-26 22:58:50 -07:00
|
|
|
if inlvar := subst.inlvars[n]; inlvar != nil { // These will be set during inlnode
|
2015-02-13 14:40:36 -05:00
|
|
|
if Debug['m'] > 2 {
|
2016-10-26 22:58:50 -07:00
|
|
|
fmt.Printf("substituting name %+v -> %+v\n", n, inlvar)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2016-10-26 22:58:50 -07:00
|
|
|
return inlvar
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
if Debug['m'] > 2 {
|
2016-08-31 15:22:36 -07:00
|
|
|
fmt.Printf("not substituting name %+v\n", n)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
return n
|
|
|
|
|
2015-04-01 09:38:44 -07:00
|
|
|
case OLITERAL, OTYPE:
|
2017-02-17 16:07:47 -05:00
|
|
|
// If n is a named constant or type, we can continue
|
|
|
|
// using it in the inline copy. Otherwise, make a copy
|
|
|
|
// so we can update the line number.
|
|
|
|
if n.Sym != nil {
|
|
|
|
return n
|
|
|
|
}
|
2015-02-13 14:40:36 -05:00
|
|
|
|
|
|
|
// Since we don't handle bodies with closures, this return is guaranteed to belong to the current inlined function.
|
|
|
|
|
|
|
|
// dump("Return before substitution", n);
|
|
|
|
case ORETURN:
|
2016-09-16 11:00:54 +10:00
|
|
|
m := nod(OGOTO, subst.retlabel, nil)
|
2016-03-10 11:49:20 -08:00
|
|
|
m.Ninit.Set(subst.list(n.Ninit))
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2016-03-10 11:49:20 -08:00
|
|
|
if len(subst.retvars) != 0 && n.List.Len() != 0 {
|
2016-09-16 11:00:54 +10:00
|
|
|
as := nod(OAS2, nil, nil)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2016-03-10 11:49:20 -08:00
|
|
|
// Make a shallow copy of retvars.
|
|
|
|
// Otherwise OINLCALL.Rlist will be the same list,
|
|
|
|
// and later walk and typecheck may clobber it.
|
|
|
|
for _, n := range subst.retvars {
|
2016-03-09 20:29:21 -08:00
|
|
|
as.List.Append(n)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2016-03-10 11:49:20 -08:00
|
|
|
as.Rlist.Set(subst.list(n.List))
|
cmd/compile: reduce use of **Node parameters
Escape analysis has a hard time with tree-like
structures (see #13493 and #14858).
This is unlikely to change.
As a result, when invoking a function that accepts
a **Node parameter, we usually allocate a *Node
on the heap. This happens a whole lot.
This CL changes functions from taking a **Node
to acting more like append: It both modifies
the input and returns a replacement for it.
Because of the cascading nature of escape analysis,
in order to get the benefits, I had to modify
almost all such functions. The remaining functions
are in racewalk and the backend. I would be happy
to update them as well in a separate CL.
This CL was created by manually updating the
function signatures and the directly impacted
bits of code. The callsites were then automatically
updated using a bespoke script:
https://gist.github.com/josharian/046b1be7aceae244de39
For ease of reviewing and future understanding,
this CL is also broken down into four CLs,
mailed separately, which show the manual
and the automated changes separately.
They are CLs 20990, 20991, 20992, and 20993.
Passes toolstash -cmp.
name old time/op new time/op delta
Template 335ms ± 5% 324ms ± 5% -3.35% (p=0.000 n=23+24)
Unicode 176ms ± 9% 165ms ± 6% -6.12% (p=0.000 n=23+24)
GoTypes 1.10s ± 4% 1.07s ± 2% -2.77% (p=0.000 n=24+24)
Compiler 5.31s ± 3% 5.15s ± 3% -2.95% (p=0.000 n=24+24)
MakeBash 41.6s ± 1% 41.7s ± 2% ~ (p=0.586 n=23+23)
name old alloc/op new alloc/op delta
Template 63.3MB ± 0% 62.4MB ± 0% -1.36% (p=0.000 n=25+23)
Unicode 42.4MB ± 0% 41.6MB ± 0% -1.99% (p=0.000 n=24+25)
GoTypes 220MB ± 0% 217MB ± 0% -1.11% (p=0.000 n=25+25)
Compiler 994MB ± 0% 973MB ± 0% -2.08% (p=0.000 n=24+25)
name old allocs/op new allocs/op delta
Template 681k ± 0% 574k ± 0% -15.71% (p=0.000 n=24+25)
Unicode 518k ± 0% 413k ± 0% -20.34% (p=0.000 n=25+24)
GoTypes 2.08M ± 0% 1.78M ± 0% -14.62% (p=0.000 n=25+25)
Compiler 9.26M ± 0% 7.64M ± 0% -17.48% (p=0.000 n=25+25)
name old text-bytes new text-bytes delta
HelloSize 578k ± 0% 578k ± 0% ~ (all samples are equal)
CmdGoSize 6.46M ± 0% 6.46M ± 0% ~ (all samples are equal)
name old data-bytes new data-bytes delta
HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal)
CmdGoSize 281k ± 0% 281k ± 0% ~ (all samples are equal)
name old exe-bytes new exe-bytes delta
HelloSize 921k ± 0% 921k ± 0% ~ (all samples are equal)
CmdGoSize 9.86M ± 0% 9.86M ± 0% ~ (all samples are equal)
Change-Id: I277d95bd56d51c166ef7f560647aeaa092f3f475
Reviewed-on: https://go-review.googlesource.com/20959
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-03-20 08:03:31 -07:00
|
|
|
as = typecheck(as, Etop)
|
2016-03-08 15:10:26 -08:00
|
|
|
m.Ninit.Append(as)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
2016-03-19 17:02:01 -07:00
|
|
|
typecheckslice(m.Ninit.Slice(), Etop)
|
cmd/compile: reduce use of **Node parameters
Escape analysis has a hard time with tree-like
structures (see #13493 and #14858).
This is unlikely to change.
As a result, when invoking a function that accepts
a **Node parameter, we usually allocate a *Node
on the heap. This happens a whole lot.
This CL changes functions from taking a **Node
to acting more like append: It both modifies
the input and returns a replacement for it.
Because of the cascading nature of escape analysis,
in order to get the benefits, I had to modify
almost all such functions. The remaining functions
are in racewalk and the backend. I would be happy
to update them as well in a separate CL.
This CL was created by manually updating the
function signatures and the directly impacted
bits of code. The callsites were then automatically
updated using a bespoke script:
https://gist.github.com/josharian/046b1be7aceae244de39
For ease of reviewing and future understanding,
this CL is also broken down into four CLs,
mailed separately, which show the manual
and the automated changes separately.
They are CLs 20990, 20991, 20992, and 20993.
Passes toolstash -cmp.
name old time/op new time/op delta
Template 335ms ± 5% 324ms ± 5% -3.35% (p=0.000 n=23+24)
Unicode 176ms ± 9% 165ms ± 6% -6.12% (p=0.000 n=23+24)
GoTypes 1.10s ± 4% 1.07s ± 2% -2.77% (p=0.000 n=24+24)
Compiler 5.31s ± 3% 5.15s ± 3% -2.95% (p=0.000 n=24+24)
MakeBash 41.6s ± 1% 41.7s ± 2% ~ (p=0.586 n=23+23)
name old alloc/op new alloc/op delta
Template 63.3MB ± 0% 62.4MB ± 0% -1.36% (p=0.000 n=25+23)
Unicode 42.4MB ± 0% 41.6MB ± 0% -1.99% (p=0.000 n=24+25)
GoTypes 220MB ± 0% 217MB ± 0% -1.11% (p=0.000 n=25+25)
Compiler 994MB ± 0% 973MB ± 0% -2.08% (p=0.000 n=24+25)
name old allocs/op new allocs/op delta
Template 681k ± 0% 574k ± 0% -15.71% (p=0.000 n=24+25)
Unicode 518k ± 0% 413k ± 0% -20.34% (p=0.000 n=25+24)
GoTypes 2.08M ± 0% 1.78M ± 0% -14.62% (p=0.000 n=25+25)
Compiler 9.26M ± 0% 7.64M ± 0% -17.48% (p=0.000 n=25+25)
name old text-bytes new text-bytes delta
HelloSize 578k ± 0% 578k ± 0% ~ (all samples are equal)
CmdGoSize 6.46M ± 0% 6.46M ± 0% ~ (all samples are equal)
name old data-bytes new data-bytes delta
HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal)
CmdGoSize 281k ± 0% 281k ± 0% ~ (all samples are equal)
name old exe-bytes new exe-bytes delta
HelloSize 921k ± 0% 921k ± 0% ~ (all samples are equal)
CmdGoSize 9.86M ± 0% 9.86M ± 0% ~ (all samples are equal)
Change-Id: I277d95bd56d51c166ef7f560647aeaa092f3f475
Reviewed-on: https://go-review.googlesource.com/20959
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-03-20 08:03:31 -07:00
|
|
|
m = typecheck(m, Etop)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
|
|
|
// dump("Return after substitution", m);
|
|
|
|
return m
|
|
|
|
|
2015-04-01 09:38:44 -07:00
|
|
|
case OGOTO, OLABEL:
|
2017-10-23 19:57:07 +01:00
|
|
|
m := n.copy()
|
2017-09-18 23:02:02 -07:00
|
|
|
m.Pos = subst.updatedPos(m.Pos)
|
2016-03-08 15:10:26 -08:00
|
|
|
m.Ninit.Set(nil)
|
2015-02-23 16:07:24 -05:00
|
|
|
p := fmt.Sprintf("%s·%d", n.Left.Sym.Name, inlgen)
|
2016-09-15 15:45:10 +10:00
|
|
|
m.Left = newname(lookup(p))
|
2015-02-13 14:40:36 -05:00
|
|
|
|
|
|
|
return m
|
2017-02-17 16:07:47 -05:00
|
|
|
}
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2017-10-23 19:57:07 +01:00
|
|
|
m := n.copy()
|
2017-09-18 23:02:02 -07:00
|
|
|
m.Pos = subst.updatedPos(m.Pos)
|
2017-02-17 16:07:47 -05:00
|
|
|
m.Ninit.Set(nil)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2017-02-17 16:07:47 -05:00
|
|
|
if n.Op == OCLOSURE {
|
|
|
|
Fatalf("cannot inline function containing closure: %+v", n)
|
2016-03-23 16:01:15 +11:00
|
|
|
}
|
2017-02-17 16:07:47 -05:00
|
|
|
|
|
|
|
m.Left = subst.node(n.Left)
|
|
|
|
m.Right = subst.node(n.Right)
|
|
|
|
m.List.Set(subst.list(n.List))
|
|
|
|
m.Rlist.Set(subst.list(n.Rlist))
|
|
|
|
m.Ninit.Set(append(m.Ninit.Slice(), subst.list(n.Ninit)...))
|
|
|
|
m.Nbody.Set(subst.list(n.Nbody))
|
|
|
|
|
|
|
|
return m
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
2017-09-18 23:02:02 -07:00
|
|
|
func (subst *inlsubst) updatedPos(xpos src.XPos) src.XPos {
|
|
|
|
pos := Ctxt.PosTable.Pos(xpos)
|
cmd/compile,link: generate PC-value tables with inlining information
In order to generate accurate tracebacks, the runtime needs to know the
inlined call stack for a given PC. This creates two tables per function
for this purpose. The first table is the inlining tree (stored in the
function's funcdata), which has a node containing the file, line, and
function name for every inlined call. The second table is a PC-value
table that maps each PC to a node in the inlining tree (or -1 if the PC
is not the result of inlining).
To give the appearance that inlining hasn't happened, the runtime also
needs the original source position information of inlined AST nodes.
Previously the compiler plastered over the line numbers of inlined AST
nodes with the line number of the call. This meant that the PC-line
table mapped each PC to line number of the outermost call in its inlined
call stack, with no way to access the innermost line number.
Now the compiler retains line numbers of inlined AST nodes and writes
the innermost source position information to the PC-line and PC-file
tables. Some tools and tests expect to see outermost line numbers, so we
provide the OutermostLine function for displaying line info.
To keep track of the inlined call stack for an AST node, we extend the
src.PosBase type with an index into a global inlining tree. Every time
the compiler inlines a call, it creates a node in the global inlining
tree for the call, and writes its index to the PosBase of every inlined
AST node. The parent of this node is the inlining tree index of the
call. -1 signifies no parent.
For each function, the compiler creates a local inlining tree and a
PC-value table mapping each PC to an index in the local tree. These are
written to an object file, which is read by the linker. The linker
re-encodes these tables compactly by deduplicating function names and
file names.
This change increases the size of binaries by 4-5%. For example, this is
how the go1 benchmark binary is impacted by this change:
section old bytes new bytes delta
.text 3.49M ± 0% 3.49M ± 0% +0.06%
.rodata 1.12M ± 0% 1.21M ± 0% +8.21%
.gopclntab 1.50M ± 0% 1.68M ± 0% +11.89%
.debug_line 338k ± 0% 435k ± 0% +28.78%
Total 9.21M ± 0% 9.58M ± 0% +4.01%
Updates #19348.
Change-Id: Ic4f180c3b516018138236b0c35e0218270d957d3
Reviewed-on: https://go-review.googlesource.com/37231
Run-TryBot: David Lazar <lazard@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
2017-02-17 12:28:05 -05:00
|
|
|
oldbase := pos.Base() // can be nil
|
2017-09-18 23:02:02 -07:00
|
|
|
newbase := subst.bases[oldbase]
|
cmd/compile,link: generate PC-value tables with inlining information
In order to generate accurate tracebacks, the runtime needs to know the
inlined call stack for a given PC. This creates two tables per function
for this purpose. The first table is the inlining tree (stored in the
function's funcdata), which has a node containing the file, line, and
function name for every inlined call. The second table is a PC-value
table that maps each PC to a node in the inlining tree (or -1 if the PC
is not the result of inlining).
To give the appearance that inlining hasn't happened, the runtime also
needs the original source position information of inlined AST nodes.
Previously the compiler plastered over the line numbers of inlined AST
nodes with the line number of the call. This meant that the PC-line
table mapped each PC to line number of the outermost call in its inlined
call stack, with no way to access the innermost line number.
Now the compiler retains line numbers of inlined AST nodes and writes
the innermost source position information to the PC-line and PC-file
tables. Some tools and tests expect to see outermost line numbers, so we
provide the OutermostLine function for displaying line info.
To keep track of the inlined call stack for an AST node, we extend the
src.PosBase type with an index into a global inlining tree. Every time
the compiler inlines a call, it creates a node in the global inlining
tree for the call, and writes its index to the PosBase of every inlined
AST node. The parent of this node is the inlining tree index of the
call. -1 signifies no parent.
For each function, the compiler creates a local inlining tree and a
PC-value table mapping each PC to an index in the local tree. These are
written to an object file, which is read by the linker. The linker
re-encodes these tables compactly by deduplicating function names and
file names.
This change increases the size of binaries by 4-5%. For example, this is
how the go1 benchmark binary is impacted by this change:
section old bytes new bytes delta
.text 3.49M ± 0% 3.49M ± 0% +0.06%
.rodata 1.12M ± 0% 1.21M ± 0% +8.21%
.gopclntab 1.50M ± 0% 1.68M ± 0% +11.89%
.debug_line 338k ± 0% 435k ± 0% +28.78%
Total 9.21M ± 0% 9.58M ± 0% +4.01%
Updates #19348.
Change-Id: Ic4f180c3b516018138236b0c35e0218270d957d3
Reviewed-on: https://go-review.googlesource.com/37231
Run-TryBot: David Lazar <lazard@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
2017-02-17 12:28:05 -05:00
|
|
|
if newbase == nil {
|
2017-09-18 23:02:02 -07:00
|
|
|
newbase = src.NewInliningBase(oldbase, subst.newInlIndex)
|
|
|
|
subst.bases[oldbase] = newbase
|
cmd/compile,link: generate PC-value tables with inlining information
In order to generate accurate tracebacks, the runtime needs to know the
inlined call stack for a given PC. This creates two tables per function
for this purpose. The first table is the inlining tree (stored in the
function's funcdata), which has a node containing the file, line, and
function name for every inlined call. The second table is a PC-value
table that maps each PC to a node in the inlining tree (or -1 if the PC
is not the result of inlining).
To give the appearance that inlining hasn't happened, the runtime also
needs the original source position information of inlined AST nodes.
Previously the compiler plastered over the line numbers of inlined AST
nodes with the line number of the call. This meant that the PC-line
table mapped each PC to line number of the outermost call in its inlined
call stack, with no way to access the innermost line number.
Now the compiler retains line numbers of inlined AST nodes and writes
the innermost source position information to the PC-line and PC-file
tables. Some tools and tests expect to see outermost line numbers, so we
provide the OutermostLine function for displaying line info.
To keep track of the inlined call stack for an AST node, we extend the
src.PosBase type with an index into a global inlining tree. Every time
the compiler inlines a call, it creates a node in the global inlining
tree for the call, and writes its index to the PosBase of every inlined
AST node. The parent of this node is the inlining tree index of the
call. -1 signifies no parent.
For each function, the compiler creates a local inlining tree and a
PC-value table mapping each PC to an index in the local tree. These are
written to an object file, which is read by the linker. The linker
re-encodes these tables compactly by deduplicating function names and
file names.
This change increases the size of binaries by 4-5%. For example, this is
how the go1 benchmark binary is impacted by this change:
section old bytes new bytes delta
.text 3.49M ± 0% 3.49M ± 0% +0.06%
.rodata 1.12M ± 0% 1.21M ± 0% +8.21%
.gopclntab 1.50M ± 0% 1.68M ± 0% +11.89%
.debug_line 338k ± 0% 435k ± 0% +28.78%
Total 9.21M ± 0% 9.58M ± 0% +4.01%
Updates #19348.
Change-Id: Ic4f180c3b516018138236b0c35e0218270d957d3
Reviewed-on: https://go-review.googlesource.com/37231
Run-TryBot: David Lazar <lazard@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
2017-02-17 12:28:05 -05:00
|
|
|
}
|
|
|
|
pos.SetBase(newbase)
|
|
|
|
return Ctxt.PosTable.XPos(pos)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2018-05-23 15:31:52 -04:00
|
|
|
|
|
|
|
func pruneUnusedAutos(ll []*Node, vis *hairyVisitor) []*Node {
|
|
|
|
s := make([]*Node, 0, len(ll))
|
|
|
|
for _, n := range ll {
|
|
|
|
if n.Class() == PAUTO {
|
|
|
|
if _, found := vis.usedLocals[n]; !found {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s = append(s, n)
|
|
|
|
}
|
|
|
|
return s
|
|
|
|
}
|