2015-02-13 14:40:36 -05:00
|
|
|
// Copyright 2009 The Go Authors. All rights reserved.
|
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
|
|
package gc
|
|
|
|
|
|
cmd/compile: factor out Pkg, Sym, and Type into package types
- created new package cmd/compile/internal/types
- moved Pkg, Sym, Type to new package
- to break cycles, for now we need the (ugly) types/utils.go
file which contains a handful of functions that must be installed
early by the gc frontend
- to break cycles, for now we need two functions to convert between
*gc.Node and *types.Node (the latter is a dummy type)
- adjusted the gc's code to use the new package and the conversion
functions as needed
- made several Pkg, Sym, and Type methods functions as needed
- renamed constructors typ, typPtr, typArray, etc. to types.New,
types.NewPtr, types.NewArray, etc.
Passes toolstash-check -all.
Change-Id: I8adfa5e85c731645d0a7fd2030375ed6ebf54b72
Reviewed-on: https://go-review.googlesource.com/39855
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2017-04-04 17:54:02 -07:00
|
|
|
import (
|
2020-11-19 20:49:23 -05:00
|
|
|
"cmd/compile/internal/base"
|
cmd/compile: factor out Pkg, Sym, and Type into package types
- created new package cmd/compile/internal/types
- moved Pkg, Sym, Type to new package
- to break cycles, for now we need the (ugly) types/utils.go
file which contains a handful of functions that must be installed
early by the gc frontend
- to break cycles, for now we need two functions to convert between
*gc.Node and *types.Node (the latter is a dummy type)
- adjusted the gc's code to use the new package and the conversion
functions as needed
- made several Pkg, Sym, and Type methods functions as needed
- renamed constructors typ, typPtr, typArray, etc. to types.New,
types.NewPtr, types.NewArray, etc.
Passes toolstash-check -all.
Change-Id: I8adfa5e85c731645d0a7fd2030375ed6ebf54b72
Reviewed-on: https://go-review.googlesource.com/39855
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2017-04-04 17:54:02 -07:00
|
|
|
"cmd/compile/internal/types"
|
2020-06-30 07:55:16 -04:00
|
|
|
"cmd/internal/obj"
|
cmd/compile: factor out Pkg, Sym, and Type into package types
- created new package cmd/compile/internal/types
- moved Pkg, Sym, Type to new package
- to break cycles, for now we need the (ugly) types/utils.go
file which contains a handful of functions that must be installed
early by the gc frontend
- to break cycles, for now we need two functions to convert between
*gc.Node and *types.Node (the latter is a dummy type)
- adjusted the gc's code to use the new package and the conversion
functions as needed
- made several Pkg, Sym, and Type methods functions as needed
- renamed constructors typ, typPtr, typArray, etc. to types.New,
types.NewPtr, types.NewArray, etc.
Passes toolstash-check -all.
Change-Id: I8adfa5e85c731645d0a7fd2030375ed6ebf54b72
Reviewed-on: https://go-review.googlesource.com/39855
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2017-04-04 17:54:02 -07:00
|
|
|
"fmt"
|
2020-11-13 23:36:48 -08:00
|
|
|
"go/constant"
|
cmd/compile: factor out Pkg, Sym, and Type into package types
- created new package cmd/compile/internal/types
- moved Pkg, Sym, Type to new package
- to break cycles, for now we need the (ugly) types/utils.go
file which contains a handful of functions that must be installed
early by the gc frontend
- to break cycles, for now we need two functions to convert between
*gc.Node and *types.Node (the latter is a dummy type)
- adjusted the gc's code to use the new package and the conversion
functions as needed
- made several Pkg, Sym, and Type methods functions as needed
- renamed constructors typ, typPtr, typArray, etc. to types.New,
types.NewPtr, types.NewArray, etc.
Passes toolstash-check -all.
Change-Id: I8adfa5e85c731645d0a7fd2030375ed6ebf54b72
Reviewed-on: https://go-review.googlesource.com/39855
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2017-04-04 17:54:02 -07:00
|
|
|
)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2016-03-11 00:44:39 -08:00
|
|
|
type InitEntry struct {
|
|
|
|
|
Xoffset int64 // struct, array only
|
|
|
|
|
Expr *Node // bytes of run-time computed expressions
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type InitPlan struct {
|
2016-03-13 17:48:17 -07:00
|
|
|
E []InitEntry
|
2016-03-11 00:44:39 -08:00
|
|
|
}
|
|
|
|
|
|
2019-03-28 14:35:49 -07:00
|
|
|
// An InitSchedule is used to decompose assignment statements into
|
|
|
|
|
// static and dynamic initialization parts. Static initializations are
|
|
|
|
|
// handled by populating variables' linker symbol data, while dynamic
|
|
|
|
|
// initializations are accumulated to be executed in order.
|
2019-03-25 12:32:41 -07:00
|
|
|
type InitSchedule struct {
|
2019-03-28 14:35:49 -07:00
|
|
|
// out is the ordered list of dynamic initialization
|
|
|
|
|
// statements.
|
|
|
|
|
out []*Node
|
|
|
|
|
|
2019-03-25 12:35:42 -07:00
|
|
|
initplans map[*Node]*InitPlan
|
|
|
|
|
inittemps map[*Node]*Node
|
2019-03-25 12:32:41 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (s *InitSchedule) append(n *Node) {
|
|
|
|
|
s.out = append(s.out, n)
|
|
|
|
|
}
|
|
|
|
|
|
2019-03-28 14:35:49 -07:00
|
|
|
// staticInit adds an initialization statement n to the schedule.
|
|
|
|
|
func (s *InitSchedule) staticInit(n *Node) {
|
|
|
|
|
if !s.tryStaticInit(n) {
|
2020-11-19 20:49:23 -05:00
|
|
|
if base.Flag.Percent != 0 {
|
2019-03-28 14:35:49 -07:00
|
|
|
Dump("nonstatic", n)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2019-03-28 14:35:49 -07:00
|
|
|
s.append(n)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-03-28 14:35:49 -07:00
|
|
|
// tryStaticInit attempts to statically execute an initialization
|
|
|
|
|
// statement and reports whether it succeeded.
|
|
|
|
|
func (s *InitSchedule) tryStaticInit(n *Node) bool {
|
|
|
|
|
// Only worry about simple "l = r" assignments. Multiple
|
|
|
|
|
// variable/expression OAS2 assignments have already been
|
|
|
|
|
// replaced by multiple simple OAS assignments, and the other
|
|
|
|
|
// OAS2* assignments mostly necessitate dynamic execution
|
|
|
|
|
// anyway.
|
|
|
|
|
if n.Op != OAS {
|
|
|
|
|
return false
|
2019-03-25 12:35:42 -07:00
|
|
|
}
|
2019-03-28 14:35:49 -07:00
|
|
|
if n.Left.isBlank() && candiscard(n.Right) {
|
|
|
|
|
return true
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2019-03-28 14:35:49 -07:00
|
|
|
lno := setlineno(n)
|
2020-11-19 20:49:23 -05:00
|
|
|
defer func() { base.Pos = lno }()
|
2019-03-28 14:35:49 -07:00
|
|
|
return s.staticassign(n.Left, n.Right)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// like staticassign but we are copying an already
|
|
|
|
|
// initialized value r.
|
2019-03-25 12:32:41 -07:00
|
|
|
func (s *InitSchedule) staticcopy(l *Node, r *Node) bool {
|
2020-11-18 11:25:29 -05:00
|
|
|
if r.Op != ONAME && r.Op != OMETHEXPR {
|
2015-03-04 16:33:28 -08:00
|
|
|
return false
|
|
|
|
|
}
|
2017-04-25 18:14:12 -07:00
|
|
|
if r.Class() == PFUNC {
|
2020-04-11 06:52:09 -07:00
|
|
|
pfuncsym(l, r)
|
2015-03-04 16:33:28 -08:00
|
|
|
return true
|
|
|
|
|
}
|
2017-04-25 18:14:12 -07:00
|
|
|
if r.Class() != PEXTERN || r.Sym.Pkg != localpkg {
|
2015-02-17 22:13:49 -05:00
|
|
|
return false
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2015-05-26 22:19:27 -04:00
|
|
|
if r.Name.Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value
|
2015-02-17 22:13:49 -05:00
|
|
|
return false
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2015-05-26 22:19:27 -04:00
|
|
|
if r.Name.Defn.Op != OAS {
|
2015-02-17 22:13:49 -05:00
|
|
|
return false
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2019-10-03 10:49:28 -07:00
|
|
|
if r.Type.IsString() { // perhaps overwritten by cmd/link -X (#34675)
|
|
|
|
|
return false
|
|
|
|
|
}
|
2015-02-23 16:07:24 -05:00
|
|
|
orig := r
|
2015-05-26 22:19:27 -04:00
|
|
|
r = r.Name.Defn.Right
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2018-10-18 15:24:50 -07:00
|
|
|
for r.Op == OCONVNOP && !types.Identical(r.Type, l.Type) {
|
2015-06-29 12:49:25 -04:00
|
|
|
r = r.Left
|
|
|
|
|
}
|
|
|
|
|
|
2015-02-13 14:40:36 -05:00
|
|
|
switch r.Op {
|
2020-11-18 11:25:29 -05:00
|
|
|
case ONAME, OMETHEXPR:
|
2019-03-25 12:32:41 -07:00
|
|
|
if s.staticcopy(l, r) {
|
2015-02-17 22:13:49 -05:00
|
|
|
return true
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2016-12-05 15:57:45 -08:00
|
|
|
// We may have skipped past one or more OCONVNOPs, so
|
|
|
|
|
// use conv to ensure r is assignable to l (#13263).
|
2019-03-25 12:32:41 -07:00
|
|
|
s.append(nod(OAS, l, conv(r, l.Type)))
|
2015-02-17 22:13:49 -05:00
|
|
|
return true
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2020-11-13 20:38:21 -08:00
|
|
|
case ONIL:
|
|
|
|
|
return true
|
|
|
|
|
|
2015-02-13 14:40:36 -05:00
|
|
|
case OLITERAL:
|
2018-04-08 13:39:10 +01:00
|
|
|
if isZero(r) {
|
2015-02-17 22:13:49 -05:00
|
|
|
return true
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2020-04-11 06:52:09 -07:00
|
|
|
litsym(l, r, int(l.Type.Width))
|
2015-02-17 22:13:49 -05:00
|
|
|
return true
|
2015-02-13 14:40:36 -05:00
|
|
|
|
|
|
|
|
case OADDR:
|
2020-04-11 06:52:09 -07:00
|
|
|
if a := r.Left; a.Op == ONAME {
|
|
|
|
|
addrsym(l, a)
|
2015-02-17 22:13:49 -05:00
|
|
|
return true
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case OPTRLIT:
|
|
|
|
|
switch r.Left.Op {
|
2016-06-19 07:20:28 -07:00
|
|
|
case OARRAYLIT, OSLICELIT, OSTRUCTLIT, OMAPLIT:
|
2015-02-13 14:40:36 -05:00
|
|
|
// copy pointer
|
2020-04-11 06:52:09 -07:00
|
|
|
addrsym(l, s.inittemps[r])
|
2015-02-17 22:13:49 -05:00
|
|
|
return true
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
2016-06-19 07:20:28 -07:00
|
|
|
case OSLICELIT:
|
|
|
|
|
// copy slice
|
2019-03-25 12:35:42 -07:00
|
|
|
a := s.inittemps[r]
|
2020-10-12 15:02:59 +02:00
|
|
|
slicesym(l, a, r.Right.Int64Val())
|
2016-06-19 07:20:28 -07:00
|
|
|
return true
|
|
|
|
|
|
|
|
|
|
case OARRAYLIT, OSTRUCTLIT:
|
2019-03-25 12:35:42 -07:00
|
|
|
p := s.initplans[r]
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2017-10-23 19:57:07 +01:00
|
|
|
n := l.copy()
|
2015-09-08 22:22:44 +02:00
|
|
|
for i := range p.E {
|
|
|
|
|
e := &p.E[i]
|
|
|
|
|
n.Xoffset = l.Xoffset + e.Xoffset
|
|
|
|
|
n.Type = e.Expr.Type
|
2020-11-13 20:38:21 -08:00
|
|
|
if e.Expr.Op == OLITERAL || e.Expr.Op == ONIL {
|
2020-04-11 06:52:09 -07:00
|
|
|
litsym(n, e.Expr, int(n.Type.Width))
|
2018-04-03 13:17:28 +01:00
|
|
|
continue
|
|
|
|
|
}
|
2018-09-20 15:22:33 -07:00
|
|
|
ll := n.sepcopy()
|
2019-03-25 12:32:41 -07:00
|
|
|
if s.staticcopy(ll, e.Expr) {
|
2018-04-03 13:17:28 +01:00
|
|
|
continue
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2018-04-03 13:17:28 +01:00
|
|
|
// Requires computation, but we're
|
|
|
|
|
// copying someone else's computation.
|
2018-09-20 15:22:33 -07:00
|
|
|
rr := orig.sepcopy()
|
2018-04-03 13:17:28 +01:00
|
|
|
rr.Type = ll.Type
|
|
|
|
|
rr.Xoffset += e.Xoffset
|
|
|
|
|
setlineno(rr)
|
2019-03-25 12:32:41 -07:00
|
|
|
s.append(nod(OAS, ll, rr))
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
2015-02-17 22:13:49 -05:00
|
|
|
return true
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
2015-02-17 22:13:49 -05:00
|
|
|
return false
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
2019-03-25 12:32:41 -07:00
|
|
|
func (s *InitSchedule) staticassign(l *Node, r *Node) bool {
|
2015-06-29 12:49:25 -04:00
|
|
|
for r.Op == OCONVNOP {
|
|
|
|
|
r = r.Left
|
|
|
|
|
}
|
|
|
|
|
|
2015-02-13 14:40:36 -05:00
|
|
|
switch r.Op {
|
2020-11-18 11:25:29 -05:00
|
|
|
case ONAME, OMETHEXPR:
|
2019-03-25 12:32:41 -07:00
|
|
|
return s.staticcopy(l, r)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2020-11-13 20:38:21 -08:00
|
|
|
case ONIL:
|
|
|
|
|
return true
|
|
|
|
|
|
2015-02-13 14:40:36 -05:00
|
|
|
case OLITERAL:
|
2018-04-08 13:39:10 +01:00
|
|
|
if isZero(r) {
|
2015-02-17 22:13:49 -05:00
|
|
|
return true
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2020-04-11 06:52:09 -07:00
|
|
|
litsym(l, r, int(l.Type.Width))
|
2015-02-17 22:13:49 -05:00
|
|
|
return true
|
2015-02-13 14:40:36 -05:00
|
|
|
|
|
|
|
|
case OADDR:
|
2020-11-22 12:09:08 -05:00
|
|
|
if nam := stataddr(r.Left); nam != nil {
|
|
|
|
|
addrsym(l, nam)
|
2015-02-17 22:13:49 -05:00
|
|
|
return true
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
fallthrough
|
|
|
|
|
|
|
|
|
|
case OPTRLIT:
|
|
|
|
|
switch r.Left.Op {
|
2016-06-19 07:20:28 -07:00
|
|
|
case OARRAYLIT, OSLICELIT, OMAPLIT, OSTRUCTLIT:
|
cmd/compile: captureless closures are constants
In particular, we can initialize globals with them at link time instead
of generating code for them in an init() function. Less code, less
startup cost.
But the real reason for this change is binary size. This change reduces
the binary size of hello world by ~4%.
The culprit is fmt.ssFree, a global variable which is a sync.Pool of
scratch scan states. It is initalized with a captureless closure as the
pool's New action. That action in turn references all the scanf code.
If you never call any of the fmt.Scanf* routines, ssFree is never used.
But before this change, ssFree is still referenced by fmt's init
function. That keeps ssFree and all the code it references in the
binary. With this change, ssFree is initialized at link time. As a
result, fmt.init never mentions ssFree. If you don't call fmt.Scanf*,
ssFree is unreferenced and it and the scanf code are not included.
This change is an easy fix for what is generally a much harder problem,
the unnecessary initializing of unused globals (and retention of code
that they reference). Ideally we should have separate init code for
each global and only include that code if the corresponding global is
live. (We'd need to make sure that the initializing code has no side
effects, except on the global being initialized.) That is a much harder
change.
Update #6853
Change-Id: I19d1e33992287882c83efea6ce113b7cfc504b67
Reviewed-on: https://go-review.googlesource.com/17398
Reviewed-by: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
2015-12-03 13:20:58 -08:00
|
|
|
// Init pointer.
|
2016-06-20 08:18:22 -07:00
|
|
|
a := staticname(r.Left.Type)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2019-03-25 12:35:42 -07:00
|
|
|
s.inittemps[r] = a
|
2020-04-11 06:52:09 -07:00
|
|
|
addrsym(l, a)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
|
|
|
|
// Init underlying literal.
|
2019-03-25 12:32:41 -07:00
|
|
|
if !s.staticassign(a, r.Left) {
|
|
|
|
|
s.append(nod(OAS, a, r.Left))
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2015-02-17 22:13:49 -05:00
|
|
|
return true
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
cmd/compile: captureless closures are constants
In particular, we can initialize globals with them at link time instead
of generating code for them in an init() function. Less code, less
startup cost.
But the real reason for this change is binary size. This change reduces
the binary size of hello world by ~4%.
The culprit is fmt.ssFree, a global variable which is a sync.Pool of
scratch scan states. It is initalized with a captureless closure as the
pool's New action. That action in turn references all the scanf code.
If you never call any of the fmt.Scanf* routines, ssFree is never used.
But before this change, ssFree is still referenced by fmt's init
function. That keeps ssFree and all the code it references in the
binary. With this change, ssFree is initialized at link time. As a
result, fmt.init never mentions ssFree. If you don't call fmt.Scanf*,
ssFree is unreferenced and it and the scanf code are not included.
This change is an easy fix for what is generally a much harder problem,
the unnecessary initializing of unused globals (and retention of code
that they reference). Ideally we should have separate init code for
each global and only include that code if the corresponding global is
live. (We'd need to make sure that the initializing code has no side
effects, except on the global being initialized.) That is a much harder
change.
Update #6853
Change-Id: I19d1e33992287882c83efea6ce113b7cfc504b67
Reviewed-on: https://go-review.googlesource.com/17398
Reviewed-by: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
2015-12-03 13:20:58 -08:00
|
|
|
//dump("not static ptrlit", r);
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2018-11-18 08:34:38 -08:00
|
|
|
case OSTR2BYTES:
|
2017-04-25 18:14:12 -07:00
|
|
|
if l.Class() == PEXTERN && r.Left.Op == OLITERAL {
|
2020-10-12 15:02:59 +02:00
|
|
|
sval := r.Left.StringVal()
|
2020-04-10 21:33:27 -07:00
|
|
|
slicebytes(l, sval)
|
2015-02-17 22:13:49 -05:00
|
|
|
return true
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
2016-06-19 07:20:28 -07:00
|
|
|
case OSLICELIT:
|
2019-03-25 12:35:42 -07:00
|
|
|
s.initplan(r)
|
2016-06-19 07:20:28 -07:00
|
|
|
// Init slice.
|
2020-10-12 15:02:59 +02:00
|
|
|
bound := r.Right.Int64Val()
|
cmd/compile: factor out Pkg, Sym, and Type into package types
- created new package cmd/compile/internal/types
- moved Pkg, Sym, Type to new package
- to break cycles, for now we need the (ugly) types/utils.go
file which contains a handful of functions that must be installed
early by the gc frontend
- to break cycles, for now we need two functions to convert between
*gc.Node and *types.Node (the latter is a dummy type)
- adjusted the gc's code to use the new package and the conversion
functions as needed
- made several Pkg, Sym, and Type methods functions as needed
- renamed constructors typ, typPtr, typArray, etc. to types.New,
types.NewPtr, types.NewArray, etc.
Passes toolstash-check -all.
Change-Id: I8adfa5e85c731645d0a7fd2030375ed6ebf54b72
Reviewed-on: https://go-review.googlesource.com/39855
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2017-04-04 17:54:02 -07:00
|
|
|
ta := types.NewArray(r.Type.Elem(), bound)
|
2020-04-14 08:16:51 -07:00
|
|
|
ta.SetNoalg(true)
|
2016-06-20 08:18:22 -07:00
|
|
|
a := staticname(ta)
|
2019-03-25 12:35:42 -07:00
|
|
|
s.inittemps[r] = a
|
2020-04-08 13:51:25 -07:00
|
|
|
slicesym(l, a, bound)
|
2016-06-19 07:20:28 -07:00
|
|
|
// Fall through to init underlying array.
|
|
|
|
|
l = a
|
2015-02-13 14:40:36 -05:00
|
|
|
fallthrough
|
|
|
|
|
|
2016-06-19 07:20:28 -07:00
|
|
|
case OARRAYLIT, OSTRUCTLIT:
|
2019-03-25 12:35:42 -07:00
|
|
|
s.initplan(r)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2019-03-25 12:35:42 -07:00
|
|
|
p := s.initplans[r]
|
2017-10-23 19:57:07 +01:00
|
|
|
n := l.copy()
|
2015-09-08 22:22:44 +02:00
|
|
|
for i := range p.E {
|
|
|
|
|
e := &p.E[i]
|
|
|
|
|
n.Xoffset = l.Xoffset + e.Xoffset
|
|
|
|
|
n.Type = e.Expr.Type
|
2020-11-13 20:38:21 -08:00
|
|
|
if e.Expr.Op == OLITERAL || e.Expr.Op == ONIL {
|
2020-04-11 06:52:09 -07:00
|
|
|
litsym(n, e.Expr, int(n.Type.Width))
|
2018-04-03 13:17:28 +01:00
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
setlineno(e.Expr)
|
2018-09-20 15:22:33 -07:00
|
|
|
a := n.sepcopy()
|
2019-03-25 12:32:41 -07:00
|
|
|
if !s.staticassign(a, e.Expr) {
|
|
|
|
|
s.append(nod(OAS, a, e.Expr))
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-02-17 22:13:49 -05:00
|
|
|
return true
|
2015-02-13 14:40:36 -05:00
|
|
|
|
|
|
|
|
case OMAPLIT:
|
|
|
|
|
break
|
cmd/compile: captureless closures are constants
In particular, we can initialize globals with them at link time instead
of generating code for them in an init() function. Less code, less
startup cost.
But the real reason for this change is binary size. This change reduces
the binary size of hello world by ~4%.
The culprit is fmt.ssFree, a global variable which is a sync.Pool of
scratch scan states. It is initalized with a captureless closure as the
pool's New action. That action in turn references all the scanf code.
If you never call any of the fmt.Scanf* routines, ssFree is never used.
But before this change, ssFree is still referenced by fmt's init
function. That keeps ssFree and all the code it references in the
binary. With this change, ssFree is initialized at link time. As a
result, fmt.init never mentions ssFree. If you don't call fmt.Scanf*,
ssFree is unreferenced and it and the scanf code are not included.
This change is an easy fix for what is generally a much harder problem,
the unnecessary initializing of unused globals (and retention of code
that they reference). Ideally we should have separate init code for
each global and only include that code if the corresponding global is
live. (We'd need to make sure that the initializing code has no side
effects, except on the global being initialized.) That is a much harder
change.
Update #6853
Change-Id: I19d1e33992287882c83efea6ce113b7cfc504b67
Reviewed-on: https://go-review.googlesource.com/17398
Reviewed-by: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
2015-12-03 13:20:58 -08:00
|
|
|
|
|
|
|
|
case OCLOSURE:
|
cmd/compile: ignore OXXX nodes in closure captured vars list
Added a debug flag "-d closure" to explain compilation of
closures (should this be done some other way? Should we
rewrite the "-m" flag to "-d escapes"?) Used this to
discover that cause was an OXXX node in the captured vars
list, and in turn noticed that OXXX nodes are explicitly
ignored in all other processing of captured variables.
Couldn't figure out a reproducer, did verify that this OXXX
was not caused by an unnamed return value (which is one use
of these). Verified lack of heap allocation by examining -S
output.
Assembly:
(runtime/mgc.go:1371) PCDATA $0, $2
(runtime/mgc.go:1371) CALL "".notewakeup(SB)
(runtime/mgc.go:1377) LEAQ "".gcBgMarkWorker.func1·f(SB), AX
(runtime/mgc.go:1404) MOVQ AX, (SP)
(runtime/mgc.go:1404) MOVQ "".autotmp_2242+88(SP), CX
(runtime/mgc.go:1404) MOVQ CX, 8(SP)
(runtime/mgc.go:1404) LEAQ go.string."GC worker (idle)"(SB), AX
(runtime/mgc.go:1404) MOVQ AX, 16(SP)
(runtime/mgc.go:1404) MOVQ $16, 24(SP)
(runtime/mgc.go:1404) MOVB $20, 32(SP)
(runtime/mgc.go:1404) MOVQ $0, 40(SP)
(runtime/mgc.go:1404) PCDATA $0, $2
(runtime/mgc.go:1404) CALL "".gopark(SB)
Added a check for compiling_runtime to ensure that this is
caught in the future. Added a test to test the check.
Verified that 1.5.3 did NOT reject the test case when
compiled with -+ flag, so this is not a recently added bug.
Cause of bug is two-part -- there was no leaking closure
detection ever, and instead it relied on capture-of-variables
to trigger compiling_runtime test, but closures improved in
1.5.3 so that mere capture of a value did not also capture
the variable, which thus allowed closures to escape, as well
as this case where the escape was spurious. In
fixedbugs/issue14999.go, compare messages for f and g;
1.5.3 would reject g, but not f. 1.4 rejects both because
1.4 heap-allocates parameter x for both.
Fixes #14999.
Change-Id: I40bcdd27056810628e96763a44f2acddd503aee1
Reviewed-on: https://go-review.googlesource.com/21322
Run-TryBot: David Chase <drchase@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-03-30 14:14:00 -04:00
|
|
|
if hasemptycvars(r) {
|
2020-11-19 20:49:23 -05:00
|
|
|
if base.Debug.Closure > 0 {
|
|
|
|
|
base.WarnfAt(r.Pos, "closure converted to global")
|
cmd/compile: ignore OXXX nodes in closure captured vars list
Added a debug flag "-d closure" to explain compilation of
closures (should this be done some other way? Should we
rewrite the "-m" flag to "-d escapes"?) Used this to
discover that cause was an OXXX node in the captured vars
list, and in turn noticed that OXXX nodes are explicitly
ignored in all other processing of captured variables.
Couldn't figure out a reproducer, did verify that this OXXX
was not caused by an unnamed return value (which is one use
of these). Verified lack of heap allocation by examining -S
output.
Assembly:
(runtime/mgc.go:1371) PCDATA $0, $2
(runtime/mgc.go:1371) CALL "".notewakeup(SB)
(runtime/mgc.go:1377) LEAQ "".gcBgMarkWorker.func1·f(SB), AX
(runtime/mgc.go:1404) MOVQ AX, (SP)
(runtime/mgc.go:1404) MOVQ "".autotmp_2242+88(SP), CX
(runtime/mgc.go:1404) MOVQ CX, 8(SP)
(runtime/mgc.go:1404) LEAQ go.string."GC worker (idle)"(SB), AX
(runtime/mgc.go:1404) MOVQ AX, 16(SP)
(runtime/mgc.go:1404) MOVQ $16, 24(SP)
(runtime/mgc.go:1404) MOVB $20, 32(SP)
(runtime/mgc.go:1404) MOVQ $0, 40(SP)
(runtime/mgc.go:1404) PCDATA $0, $2
(runtime/mgc.go:1404) CALL "".gopark(SB)
Added a check for compiling_runtime to ensure that this is
caught in the future. Added a test to test the check.
Verified that 1.5.3 did NOT reject the test case when
compiled with -+ flag, so this is not a recently added bug.
Cause of bug is two-part -- there was no leaking closure
detection ever, and instead it relied on capture-of-variables
to trigger compiling_runtime test, but closures improved in
1.5.3 so that mere capture of a value did not also capture
the variable, which thus allowed closures to escape, as well
as this case where the escape was spurious. In
fixedbugs/issue14999.go, compare messages for f and g;
1.5.3 would reject g, but not f. 1.4 rejects both because
1.4 heap-allocates parameter x for both.
Fixes #14999.
Change-Id: I40bcdd27056810628e96763a44f2acddd503aee1
Reviewed-on: https://go-review.googlesource.com/21322
Run-TryBot: David Chase <drchase@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-03-30 14:14:00 -04:00
|
|
|
}
|
cmd/compile: captureless closures are constants
In particular, we can initialize globals with them at link time instead
of generating code for them in an init() function. Less code, less
startup cost.
But the real reason for this change is binary size. This change reduces
the binary size of hello world by ~4%.
The culprit is fmt.ssFree, a global variable which is a sync.Pool of
scratch scan states. It is initalized with a captureless closure as the
pool's New action. That action in turn references all the scanf code.
If you never call any of the fmt.Scanf* routines, ssFree is never used.
But before this change, ssFree is still referenced by fmt's init
function. That keeps ssFree and all the code it references in the
binary. With this change, ssFree is initialized at link time. As a
result, fmt.init never mentions ssFree. If you don't call fmt.Scanf*,
ssFree is unreferenced and it and the scanf code are not included.
This change is an easy fix for what is generally a much harder problem,
the unnecessary initializing of unused globals (and retention of code
that they reference). Ideally we should have separate init code for
each global and only include that code if the corresponding global is
live. (We'd need to make sure that the initializing code has no side
effects, except on the global being initialized.) That is a much harder
change.
Update #6853
Change-Id: I19d1e33992287882c83efea6ce113b7cfc504b67
Reviewed-on: https://go-review.googlesource.com/17398
Reviewed-by: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
2015-12-03 13:20:58 -08:00
|
|
|
// Closures with no captured variables are globals,
|
|
|
|
|
// so the assignment can be done at link time.
|
[dev.regabi] cmd/compile: clean up Node.Func
The original meaning of type Func was "extra fields factored out
of a few cases of type Node having to do with functions",
but those specific cases didn't necessarily have any relation.
A typical declared function is represented by an ODCLFUNC Node
at its declaration and an ONAME node at its uses, and both those
have a .Func field, but they are *different* Funcs.
Similarly, a closure is represented both by an OCLOSURE Node for
the value itself and an ODCLFUNC Node for the underlying function
implementing the closure. Those too have *different* Funcs,
and the Func.Closure field in one points to the other and vice versa.
This has led to no end of confusion over the years.
This CL elevates type Func to be the canonical identifier for
a given Go function.
This looks like a trivial CL but in fact is the result of a lot of
scaffolding and rewriting, discarded once the result was achieved, to
separate out the three different kinds of Func nodes into three
separate fields, limited in use to each specific Node type, to
understand which Func fields are used by which Node types and what the
possible overlaps are. There were a few overlaps, most notably around
closures, which led to more fields being added to type Func to keep
them separate even though there is now a single Func instead of two
different ones for each function.
A future CL can and should change Curfn to be a *Func instead of
a *Node, finally eliminating the confusion about whether Curfn
is an ODCLFUNC node (as it is most of the time) or an ONAME node
(as it is when type-checking an inlined function body).
Although sizeof_test.go makes it look like Func is growing by two
words, there are now half as many Funcs in a running compilation,
so the memory footprint has actually been reduced substantially.
Change-Id: I598bd96c95728093dc769a835d48f2154a406a61
Reviewed-on: https://go-review.googlesource.com/c/go/+/272253
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-11-16 17:00:10 -05:00
|
|
|
pfuncsym(l, r.Func.Nname)
|
cmd/compile: captureless closures are constants
In particular, we can initialize globals with them at link time instead
of generating code for them in an init() function. Less code, less
startup cost.
But the real reason for this change is binary size. This change reduces
the binary size of hello world by ~4%.
The culprit is fmt.ssFree, a global variable which is a sync.Pool of
scratch scan states. It is initalized with a captureless closure as the
pool's New action. That action in turn references all the scanf code.
If you never call any of the fmt.Scanf* routines, ssFree is never used.
But before this change, ssFree is still referenced by fmt's init
function. That keeps ssFree and all the code it references in the
binary. With this change, ssFree is initialized at link time. As a
result, fmt.init never mentions ssFree. If you don't call fmt.Scanf*,
ssFree is unreferenced and it and the scanf code are not included.
This change is an easy fix for what is generally a much harder problem,
the unnecessary initializing of unused globals (and retention of code
that they reference). Ideally we should have separate init code for
each global and only include that code if the corresponding global is
live. (We'd need to make sure that the initializing code has no side
effects, except on the global being initialized.) That is a much harder
change.
Update #6853
Change-Id: I19d1e33992287882c83efea6ce113b7cfc504b67
Reviewed-on: https://go-review.googlesource.com/17398
Reviewed-by: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
2015-12-03 13:20:58 -08:00
|
|
|
return true
|
|
|
|
|
}
|
2017-08-10 23:41:17 +09:00
|
|
|
closuredebugruntimecheck(r)
|
2016-05-12 17:22:47 -07:00
|
|
|
|
|
|
|
|
case OCONVIFACE:
|
|
|
|
|
// This logic is mirrored in isStaticCompositeLiteral.
|
|
|
|
|
// If you change something here, change it there, and vice versa.
|
|
|
|
|
|
|
|
|
|
// Determine the underlying concrete type and value we are converting from.
|
|
|
|
|
val := r
|
|
|
|
|
for val.Op == OCONVIFACE {
|
|
|
|
|
val = val.Left
|
|
|
|
|
}
|
2020-11-13 20:38:21 -08:00
|
|
|
|
2016-05-12 17:22:47 -07:00
|
|
|
if val.Type.IsInterface() {
|
|
|
|
|
// val is an interface type.
|
|
|
|
|
// If val is nil, we can statically initialize l;
|
|
|
|
|
// both words are zero and so there no work to do, so report success.
|
|
|
|
|
// If val is non-nil, we have no concrete type to record,
|
|
|
|
|
// and we won't be able to statically initialize its value, so report failure.
|
2020-11-13 20:38:21 -08:00
|
|
|
return val.Op == ONIL
|
2016-05-12 17:22:47 -07:00
|
|
|
}
|
|
|
|
|
|
2020-09-20 23:29:20 -04:00
|
|
|
markTypeUsedInInterface(val.Type, l.Sym.Linksym())
|
[dev.link] cmd/compile, cmd/link: remove dead methods if type is not used in interface
Currently, a method of a reachable type is live if it matches a
method of a reachable interface. In fact, we only need to retain
the method if the type is actually converted to an interface. If
the type is never converted to an interface, there is no way to
call the method through an interface method call (but the type
descriptor could still be used, e.g. in calling
runtime.newobject).
A type can be used in an interface in two ways:
- directly converted to interface. (Any interface counts, as it
is possible to convert one interface to another.)
- obtained by reflection from a related type (e.g. obtaining an
interface of T from []T).
For the former, we let the compiler emit a marker on the type
descriptor symbol when it is converted to an interface. In the
linker, we only need to check methods of marked types.
For the latter, when the linker visits a marked type, it needs to
visit all its "child" types as marked (i.e. potentially could be
converted to interface).
This reduces binary size:
cmd/compile 18792016 18706096 (-0.5%)
cmd/go 14120572 13398948 (-5.1%)
Change-Id: I4465c7eeabf575f4dc84017214c610fa05ae31fd
Reviewed-on: https://go-review.googlesource.com/c/go/+/237298
Run-TryBot: Cherry Zhang <cherryyz@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Than McIntosh <thanm@google.com>
Reviewed-by: Jeremy Faller <jeremy@golang.org>
2020-06-08 18:38:59 -04:00
|
|
|
|
2016-05-12 17:22:47 -07:00
|
|
|
var itab *Node
|
|
|
|
|
if l.Type.IsEmptyInterface() {
|
|
|
|
|
itab = typename(val.Type)
|
|
|
|
|
} else {
|
|
|
|
|
itab = itabname(val.Type, l.Type)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Create a copy of l to modify while we emit data.
|
2018-04-05 14:45:38 +01:00
|
|
|
n := l.copy()
|
2016-05-12 17:22:47 -07:00
|
|
|
|
|
|
|
|
// Emit itab, advance offset.
|
2020-04-11 06:52:09 -07:00
|
|
|
addrsym(n, itab.Left) // itab is an OADDR node
|
2016-05-12 17:22:47 -07:00
|
|
|
n.Xoffset += int64(Widthptr)
|
|
|
|
|
|
|
|
|
|
// Emit data.
|
|
|
|
|
if isdirectiface(val.Type) {
|
2020-11-13 20:38:21 -08:00
|
|
|
if val.Op == ONIL {
|
2016-05-12 17:22:47 -07:00
|
|
|
// Nil is zero, nothing to do.
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
// Copy val directly into n.
|
|
|
|
|
n.Type = val.Type
|
|
|
|
|
setlineno(val)
|
2018-09-20 15:22:33 -07:00
|
|
|
a := n.sepcopy()
|
2019-03-25 12:32:41 -07:00
|
|
|
if !s.staticassign(a, val) {
|
|
|
|
|
s.append(nod(OAS, a, val))
|
2016-05-12 17:22:47 -07:00
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
// Construct temp to hold val, write pointer to temp into n.
|
|
|
|
|
a := staticname(val.Type)
|
2019-03-25 12:35:42 -07:00
|
|
|
s.inittemps[val] = a
|
2019-03-25 12:32:41 -07:00
|
|
|
if !s.staticassign(a, val) {
|
|
|
|
|
s.append(nod(OAS, a, val))
|
2016-05-12 17:22:47 -07:00
|
|
|
}
|
2020-04-11 06:52:09 -07:00
|
|
|
addrsym(n, a)
|
2016-05-12 17:22:47 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return true
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
cmd/compile: captureless closures are constants
In particular, we can initialize globals with them at link time instead
of generating code for them in an init() function. Less code, less
startup cost.
But the real reason for this change is binary size. This change reduces
the binary size of hello world by ~4%.
The culprit is fmt.ssFree, a global variable which is a sync.Pool of
scratch scan states. It is initalized with a captureless closure as the
pool's New action. That action in turn references all the scanf code.
If you never call any of the fmt.Scanf* routines, ssFree is never used.
But before this change, ssFree is still referenced by fmt's init
function. That keeps ssFree and all the code it references in the
binary. With this change, ssFree is initialized at link time. As a
result, fmt.init never mentions ssFree. If you don't call fmt.Scanf*,
ssFree is unreferenced and it and the scanf code are not included.
This change is an easy fix for what is generally a much harder problem,
the unnecessary initializing of unused globals (and retention of code
that they reference). Ideally we should have separate init code for
each global and only include that code if the corresponding global is
live. (We'd need to make sure that the initializing code has no side
effects, except on the global being initialized.) That is a much harder
change.
Update #6853
Change-Id: I19d1e33992287882c83efea6ce113b7cfc504b67
Reviewed-on: https://go-review.googlesource.com/17398
Reviewed-by: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
2015-12-03 13:20:58 -08:00
|
|
|
//dump("not static", r);
|
2015-02-17 22:13:49 -05:00
|
|
|
return false
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
2016-06-19 14:12:59 -07:00
|
|
|
// initContext is the context in which static data is populated.
|
|
|
|
|
// It is either in an init function or in any other function.
|
|
|
|
|
// Static data populated in an init function will be written either
|
|
|
|
|
// zero times (as a readonly, static data symbol) or
|
|
|
|
|
// one time (during init function execution).
|
|
|
|
|
// Either way, there is no opportunity for races or further modification,
|
|
|
|
|
// so the data can be written to a (possibly readonly) data symbol.
|
|
|
|
|
// Static data populated in any other function needs to be local to
|
|
|
|
|
// that function to allow multiple instances of that function
|
|
|
|
|
// to execute concurrently without clobbering each others' data.
|
|
|
|
|
type initContext uint8
|
|
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
inInitFunction initContext = iota
|
|
|
|
|
inNonInitFunction
|
|
|
|
|
)
|
|
|
|
|
|
2019-05-13 13:43:49 -07:00
|
|
|
func (c initContext) String() string {
|
|
|
|
|
if c == inInitFunction {
|
|
|
|
|
return "inInitFunction"
|
|
|
|
|
}
|
|
|
|
|
return "inNonInitFunction"
|
|
|
|
|
}
|
|
|
|
|
|
2015-10-22 09:51:12 +09:00
|
|
|
// from here down is the walk analysis
|
|
|
|
|
// of composite literals.
|
|
|
|
|
// most of the work is to generate
|
|
|
|
|
// data statements for the constant
|
|
|
|
|
// part of the composite literal.
|
2016-06-20 08:18:22 -07:00
|
|
|
|
2017-03-28 10:36:18 -07:00
|
|
|
var statuniqgen int // name generator for static temps
|
|
|
|
|
|
2020-07-15 20:01:32 -04:00
|
|
|
// staticname returns a name backed by a (writable) static data symbol.
|
|
|
|
|
// Use readonlystaticname for read-only node.
|
cmd/compile: factor out Pkg, Sym, and Type into package types
- created new package cmd/compile/internal/types
- moved Pkg, Sym, Type to new package
- to break cycles, for now we need the (ugly) types/utils.go
file which contains a handful of functions that must be installed
early by the gc frontend
- to break cycles, for now we need two functions to convert between
*gc.Node and *types.Node (the latter is a dummy type)
- adjusted the gc's code to use the new package and the conversion
functions as needed
- made several Pkg, Sym, and Type methods functions as needed
- renamed constructors typ, typPtr, typArray, etc. to types.New,
types.NewPtr, types.NewArray, etc.
Passes toolstash-check -all.
Change-Id: I8adfa5e85c731645d0a7fd2030375ed6ebf54b72
Reviewed-on: https://go-review.googlesource.com/39855
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2017-04-04 17:54:02 -07:00
|
|
|
func staticname(t *types.Type) *Node {
|
2017-03-31 16:04:52 -07:00
|
|
|
// Don't use lookupN; it interns the resulting string, but these are all unique.
|
2020-06-30 07:55:16 -04:00
|
|
|
n := newname(lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen)))
|
2015-02-13 14:40:36 -05:00
|
|
|
statuniqgen++
|
|
|
|
|
addvar(n, t, PEXTERN)
|
2020-07-15 20:01:32 -04:00
|
|
|
n.Sym.Linksym().Set(obj.AttrLocal, true)
|
|
|
|
|
return n
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// readonlystaticname returns a name backed by a (writable) static data symbol.
|
|
|
|
|
func readonlystaticname(t *types.Type) *Node {
|
|
|
|
|
n := staticname(t)
|
|
|
|
|
n.MarkReadonly()
|
|
|
|
|
n.Sym.Linksym().Set(obj.AttrContentAddressable, true)
|
2015-02-13 14:40:36 -05:00
|
|
|
return n
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-14 14:03:57 -07:00
|
|
|
func (n *Node) isSimpleName() bool {
|
2020-11-18 11:25:29 -05:00
|
|
|
return (n.Op == ONAME || n.Op == OMETHEXPR) && n.Class() != PAUTOHEAP && n.Class() != PEXTERN
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
2016-03-07 22:54:46 -08:00
|
|
|
func litas(l *Node, r *Node, init *Nodes) {
|
2016-09-16 11:00:54 +10:00
|
|
|
a := nod(OAS, l, r)
|
2018-11-18 08:34:38 -08:00
|
|
|
a = typecheck(a, ctxStmt)
|
cmd/compile: reduce use of **Node parameters
Escape analysis has a hard time with tree-like
structures (see #13493 and #14858).
This is unlikely to change.
As a result, when invoking a function that accepts
a **Node parameter, we usually allocate a *Node
on the heap. This happens a whole lot.
This CL changes functions from taking a **Node
to acting more like append: It both modifies
the input and returns a replacement for it.
Because of the cascading nature of escape analysis,
in order to get the benefits, I had to modify
almost all such functions. The remaining functions
are in racewalk and the backend. I would be happy
to update them as well in a separate CL.
This CL was created by manually updating the
function signatures and the directly impacted
bits of code. The callsites were then automatically
updated using a bespoke script:
https://gist.github.com/josharian/046b1be7aceae244de39
For ease of reviewing and future understanding,
this CL is also broken down into four CLs,
mailed separately, which show the manual
and the automated changes separately.
They are CLs 20990, 20991, 20992, and 20993.
Passes toolstash -cmp.
name old time/op new time/op delta
Template 335ms ± 5% 324ms ± 5% -3.35% (p=0.000 n=23+24)
Unicode 176ms ± 9% 165ms ± 6% -6.12% (p=0.000 n=23+24)
GoTypes 1.10s ± 4% 1.07s ± 2% -2.77% (p=0.000 n=24+24)
Compiler 5.31s ± 3% 5.15s ± 3% -2.95% (p=0.000 n=24+24)
MakeBash 41.6s ± 1% 41.7s ± 2% ~ (p=0.586 n=23+23)
name old alloc/op new alloc/op delta
Template 63.3MB ± 0% 62.4MB ± 0% -1.36% (p=0.000 n=25+23)
Unicode 42.4MB ± 0% 41.6MB ± 0% -1.99% (p=0.000 n=24+25)
GoTypes 220MB ± 0% 217MB ± 0% -1.11% (p=0.000 n=25+25)
Compiler 994MB ± 0% 973MB ± 0% -2.08% (p=0.000 n=24+25)
name old allocs/op new allocs/op delta
Template 681k ± 0% 574k ± 0% -15.71% (p=0.000 n=24+25)
Unicode 518k ± 0% 413k ± 0% -20.34% (p=0.000 n=25+24)
GoTypes 2.08M ± 0% 1.78M ± 0% -14.62% (p=0.000 n=25+25)
Compiler 9.26M ± 0% 7.64M ± 0% -17.48% (p=0.000 n=25+25)
name old text-bytes new text-bytes delta
HelloSize 578k ± 0% 578k ± 0% ~ (all samples are equal)
CmdGoSize 6.46M ± 0% 6.46M ± 0% ~ (all samples are equal)
name old data-bytes new data-bytes delta
HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal)
CmdGoSize 281k ± 0% 281k ± 0% ~ (all samples are equal)
name old exe-bytes new exe-bytes delta
HelloSize 921k ± 0% 921k ± 0% ~ (all samples are equal)
CmdGoSize 9.86M ± 0% 9.86M ± 0% ~ (all samples are equal)
Change-Id: I277d95bd56d51c166ef7f560647aeaa092f3f475
Reviewed-on: https://go-review.googlesource.com/20959
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-03-20 08:03:31 -07:00
|
|
|
a = walkexpr(a, init)
|
2016-03-07 22:54:46 -08:00
|
|
|
init.Append(a)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
2016-03-13 17:06:10 -07:00
|
|
|
// initGenType is a bitmap indicating the types of generation that will occur for a static value.
|
|
|
|
|
type initGenType uint8
|
|
|
|
|
|
2015-02-13 14:40:36 -05:00
|
|
|
const (
|
2016-03-13 17:06:10 -07:00
|
|
|
initDynamic initGenType = 1 << iota // contains some dynamic values, for which init code will be generated
|
|
|
|
|
initConst // contains some constant values, which may be written into data symbols
|
2015-02-13 14:40:36 -05:00
|
|
|
)
|
|
|
|
|
|
2016-05-16 14:28:44 -07:00
|
|
|
// getdyn calculates the initGenType for n.
|
|
|
|
|
// If top is false, getdyn is recursing.
|
|
|
|
|
func getdyn(n *Node, top bool) initGenType {
|
2015-02-13 14:40:36 -05:00
|
|
|
switch n.Op {
|
|
|
|
|
default:
|
2020-10-27 17:08:57 +07:00
|
|
|
if n.isGoConst() {
|
2016-03-13 17:06:10 -07:00
|
|
|
return initConst
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2016-03-13 17:06:10 -07:00
|
|
|
return initDynamic
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2016-06-19 07:20:28 -07:00
|
|
|
case OSLICELIT:
|
|
|
|
|
if !top {
|
2016-03-13 17:06:10 -07:00
|
|
|
return initDynamic
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2020-10-12 15:02:59 +02:00
|
|
|
if n.Right.Int64Val()/4 > int64(n.List.Len()) {
|
2018-11-26 14:33:32 -08:00
|
|
|
// <25% of entries have explicit values.
|
|
|
|
|
// Very rough estimation, it takes 4 bytes of instructions
|
|
|
|
|
// to initialize 1 byte of result. So don't use a static
|
|
|
|
|
// initializer if the dynamic initialization code would be
|
|
|
|
|
// smaller than the static value.
|
|
|
|
|
// See issue 23780.
|
|
|
|
|
return initDynamic
|
|
|
|
|
}
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2016-06-19 07:20:28 -07:00
|
|
|
case OARRAYLIT, OSTRUCTLIT:
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2016-03-13 17:06:10 -07:00
|
|
|
|
|
|
|
|
var mode initGenType
|
2016-03-08 15:10:26 -08:00
|
|
|
for _, n1 := range n.List.Slice() {
|
2016-10-27 02:02:30 -07:00
|
|
|
switch n1.Op {
|
|
|
|
|
case OKEY:
|
|
|
|
|
n1 = n1.Right
|
|
|
|
|
case OSTRUCTKEY:
|
|
|
|
|
n1 = n1.Left
|
2016-10-12 15:48:18 -07:00
|
|
|
}
|
2016-10-27 02:02:30 -07:00
|
|
|
mode |= getdyn(n1, false)
|
2016-03-13 17:06:10 -07:00
|
|
|
if mode == initDynamic|initConst {
|
2015-02-13 14:40:36 -05:00
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return mode
|
|
|
|
|
}
|
|
|
|
|
|
2016-04-18 11:17:55 -07:00
|
|
|
// isStaticCompositeLiteral reports whether n is a compile-time constant.
|
|
|
|
|
func isStaticCompositeLiteral(n *Node) bool {
|
2016-04-19 12:08:33 -07:00
|
|
|
switch n.Op {
|
2016-06-19 07:20:28 -07:00
|
|
|
case OSLICELIT:
|
|
|
|
|
return false
|
2016-10-12 15:48:18 -07:00
|
|
|
case OARRAYLIT:
|
2016-06-19 07:20:28 -07:00
|
|
|
for _, r := range n.List.Slice() {
|
2016-10-27 02:02:30 -07:00
|
|
|
if r.Op == OKEY {
|
|
|
|
|
r = r.Right
|
2016-06-19 07:20:28 -07:00
|
|
|
}
|
2016-10-27 02:02:30 -07:00
|
|
|
if !isStaticCompositeLiteral(r) {
|
2016-06-19 07:20:28 -07:00
|
|
|
return false
|
|
|
|
|
}
|
2016-10-12 15:48:18 -07:00
|
|
|
}
|
|
|
|
|
return true
|
|
|
|
|
case OSTRUCTLIT:
|
|
|
|
|
for _, r := range n.List.Slice() {
|
|
|
|
|
if r.Op != OSTRUCTKEY {
|
2020-11-19 20:49:23 -05:00
|
|
|
base.Fatalf("isStaticCompositeLiteral: rhs not OSTRUCTKEY: %v", r)
|
2016-10-12 15:48:18 -07:00
|
|
|
}
|
|
|
|
|
if !isStaticCompositeLiteral(r.Left) {
|
2016-06-19 07:20:28 -07:00
|
|
|
return false
|
|
|
|
|
}
|
2016-04-19 12:08:33 -07:00
|
|
|
}
|
2016-06-19 07:20:28 -07:00
|
|
|
return true
|
2020-11-13 20:38:21 -08:00
|
|
|
case OLITERAL, ONIL:
|
2016-04-19 12:08:33 -07:00
|
|
|
return true
|
2016-05-12 17:22:47 -07:00
|
|
|
case OCONVIFACE:
|
|
|
|
|
// See staticassign's OCONVIFACE case for comments.
|
|
|
|
|
val := n
|
|
|
|
|
for val.Op == OCONVIFACE {
|
|
|
|
|
val = val.Left
|
|
|
|
|
}
|
|
|
|
|
if val.Type.IsInterface() {
|
2020-11-13 20:38:21 -08:00
|
|
|
return val.Op == ONIL
|
2016-05-12 17:22:47 -07:00
|
|
|
}
|
2020-11-13 20:38:21 -08:00
|
|
|
if isdirectiface(val.Type) && val.Op == ONIL {
|
2016-05-12 17:22:47 -07:00
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
return isStaticCompositeLiteral(val)
|
2016-04-18 11:17:55 -07:00
|
|
|
}
|
2016-06-19 07:20:28 -07:00
|
|
|
return false
|
2016-04-18 11:17:55 -07:00
|
|
|
}
|
|
|
|
|
|
2016-06-19 14:12:59 -07:00
|
|
|
// initKind is a kind of static initialization: static, dynamic, or local.
|
|
|
|
|
// Static initialization represents literals and
|
|
|
|
|
// literal components of composite literals.
|
|
|
|
|
// Dynamic initialization represents non-literals and
|
|
|
|
|
// non-literal components of composite literals.
|
2019-09-08 19:36:13 +03:00
|
|
|
// LocalCode initialization represents initialization
|
2016-06-19 14:12:59 -07:00
|
|
|
// that occurs purely in generated code local to the function of use.
|
|
|
|
|
// Initialization code is sometimes generated in passes,
|
|
|
|
|
// first static then dynamic.
|
|
|
|
|
type initKind uint8
|
|
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
initKindStatic initKind = iota + 1
|
|
|
|
|
initKindDynamic
|
|
|
|
|
initKindLocalCode
|
|
|
|
|
)
|
|
|
|
|
|
2016-06-19 12:11:47 -07:00
|
|
|
// fixedlit handles struct, array, and slice literals.
|
|
|
|
|
// TODO: expand documentation.
|
2016-06-19 14:12:59 -07:00
|
|
|
func fixedlit(ctxt initContext, kind initKind, n *Node, var_ *Node, init *Nodes) {
|
2020-05-07 00:35:28 +07:00
|
|
|
isBlank := var_ == nblank
|
2016-10-12 15:48:18 -07:00
|
|
|
var splitnode func(*Node) (a *Node, value *Node)
|
2016-06-19 12:11:47 -07:00
|
|
|
switch n.Op {
|
|
|
|
|
case OARRAYLIT, OSLICELIT:
|
2016-10-27 02:02:30 -07:00
|
|
|
var k int64
|
2016-10-12 15:48:18 -07:00
|
|
|
splitnode = func(r *Node) (*Node, *Node) {
|
2016-10-27 02:02:30 -07:00
|
|
|
if r.Op == OKEY {
|
2018-11-28 14:34:45 -08:00
|
|
|
k = indexconst(r.Left)
|
|
|
|
|
if k < 0 {
|
2020-11-19 20:49:23 -05:00
|
|
|
base.Fatalf("fixedlit: invalid index %v", r.Left)
|
2018-11-28 14:34:45 -08:00
|
|
|
}
|
2016-10-27 02:02:30 -07:00
|
|
|
r = r.Right
|
2016-10-12 15:48:18 -07:00
|
|
|
}
|
2016-10-27 02:02:30 -07:00
|
|
|
a := nod(OINDEX, var_, nodintconst(k))
|
|
|
|
|
k++
|
2020-05-07 00:35:28 +07:00
|
|
|
if isBlank {
|
|
|
|
|
a = nblank
|
|
|
|
|
}
|
2016-10-27 02:02:30 -07:00
|
|
|
return a, r
|
2016-10-12 15:48:18 -07:00
|
|
|
}
|
2016-06-19 12:11:47 -07:00
|
|
|
case OSTRUCTLIT:
|
2016-10-12 15:48:18 -07:00
|
|
|
splitnode = func(r *Node) (*Node, *Node) {
|
|
|
|
|
if r.Op != OSTRUCTKEY {
|
2020-11-19 20:49:23 -05:00
|
|
|
base.Fatalf("fixedlit: rhs not OSTRUCTKEY: %v", r)
|
2016-10-12 15:48:18 -07:00
|
|
|
}
|
2020-05-07 00:35:28 +07:00
|
|
|
if r.Sym.IsBlank() || isBlank {
|
2017-03-09 22:41:32 -08:00
|
|
|
return nblank, r.Left
|
|
|
|
|
}
|
2019-01-04 17:34:33 -08:00
|
|
|
setlineno(r)
|
2016-10-12 15:48:18 -07:00
|
|
|
return nodSym(ODOT, var_, r.Sym), r.Left
|
|
|
|
|
}
|
2016-06-19 12:11:47 -07:00
|
|
|
default:
|
2020-11-19 20:49:23 -05:00
|
|
|
base.Fatalf("fixedlit bad op: %v", n.Op)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
2016-03-08 15:10:26 -08:00
|
|
|
for _, r := range n.List.Slice() {
|
2016-10-12 15:48:18 -07:00
|
|
|
a, value := splitnode(r)
|
2020-04-28 00:41:02 +07:00
|
|
|
if a == nblank && candiscard(value) {
|
|
|
|
|
continue
|
|
|
|
|
}
|
2015-09-08 22:22:44 +02:00
|
|
|
|
2015-02-13 14:40:36 -05:00
|
|
|
switch value.Op {
|
2016-06-19 07:20:28 -07:00
|
|
|
case OSLICELIT:
|
2016-06-19 14:12:59 -07:00
|
|
|
if (kind == initKindStatic && ctxt == inNonInitFunction) || (kind == initKindDynamic && ctxt == inInitFunction) {
|
2016-06-19 07:20:28 -07:00
|
|
|
slicelit(ctxt, value, a, init)
|
2016-06-19 12:11:47 -07:00
|
|
|
continue
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
2016-06-19 12:11:47 -07:00
|
|
|
case OARRAYLIT, OSTRUCTLIT:
|
2016-06-19 14:12:59 -07:00
|
|
|
fixedlit(ctxt, kind, value, a, init)
|
2015-02-13 14:40:36 -05:00
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
|
2020-10-27 17:08:57 +07:00
|
|
|
islit := value.isGoConst()
|
2016-06-19 14:12:59 -07:00
|
|
|
if (kind == initKindStatic && !islit) || (kind == initKindDynamic && islit) {
|
2015-02-13 14:40:36 -05:00
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
|
2016-06-19 12:11:47 -07:00
|
|
|
// build list of assignments: var[index] = expr
|
2019-01-04 17:34:33 -08:00
|
|
|
setlineno(a)
|
2016-10-12 15:48:18 -07:00
|
|
|
a = nod(OAS, a, value)
|
2018-11-18 08:34:38 -08:00
|
|
|
a = typecheck(a, ctxStmt)
|
2016-06-19 14:12:59 -07:00
|
|
|
switch kind {
|
|
|
|
|
case initKindStatic:
|
2017-02-15 21:16:49 -08:00
|
|
|
genAsStatic(a)
|
2016-06-19 14:12:59 -07:00
|
|
|
case initKindDynamic, initKindLocalCode:
|
2018-10-05 08:54:50 -07:00
|
|
|
a = orderStmtInPlace(a, map[string][]*Node{})
|
cmd/compile: reduce use of **Node parameters
Escape analysis has a hard time with tree-like
structures (see #13493 and #14858).
This is unlikely to change.
As a result, when invoking a function that accepts
a **Node parameter, we usually allocate a *Node
on the heap. This happens a whole lot.
This CL changes functions from taking a **Node
to acting more like append: It both modifies
the input and returns a replacement for it.
Because of the cascading nature of escape analysis,
in order to get the benefits, I had to modify
almost all such functions. The remaining functions
are in racewalk and the backend. I would be happy
to update them as well in a separate CL.
This CL was created by manually updating the
function signatures and the directly impacted
bits of code. The callsites were then automatically
updated using a bespoke script:
https://gist.github.com/josharian/046b1be7aceae244de39
For ease of reviewing and future understanding,
this CL is also broken down into four CLs,
mailed separately, which show the manual
and the automated changes separately.
They are CLs 20990, 20991, 20992, and 20993.
Passes toolstash -cmp.
name old time/op new time/op delta
Template 335ms ± 5% 324ms ± 5% -3.35% (p=0.000 n=23+24)
Unicode 176ms ± 9% 165ms ± 6% -6.12% (p=0.000 n=23+24)
GoTypes 1.10s ± 4% 1.07s ± 2% -2.77% (p=0.000 n=24+24)
Compiler 5.31s ± 3% 5.15s ± 3% -2.95% (p=0.000 n=24+24)
MakeBash 41.6s ± 1% 41.7s ± 2% ~ (p=0.586 n=23+23)
name old alloc/op new alloc/op delta
Template 63.3MB ± 0% 62.4MB ± 0% -1.36% (p=0.000 n=25+23)
Unicode 42.4MB ± 0% 41.6MB ± 0% -1.99% (p=0.000 n=24+25)
GoTypes 220MB ± 0% 217MB ± 0% -1.11% (p=0.000 n=25+25)
Compiler 994MB ± 0% 973MB ± 0% -2.08% (p=0.000 n=24+25)
name old allocs/op new allocs/op delta
Template 681k ± 0% 574k ± 0% -15.71% (p=0.000 n=24+25)
Unicode 518k ± 0% 413k ± 0% -20.34% (p=0.000 n=25+24)
GoTypes 2.08M ± 0% 1.78M ± 0% -14.62% (p=0.000 n=25+25)
Compiler 9.26M ± 0% 7.64M ± 0% -17.48% (p=0.000 n=25+25)
name old text-bytes new text-bytes delta
HelloSize 578k ± 0% 578k ± 0% ~ (all samples are equal)
CmdGoSize 6.46M ± 0% 6.46M ± 0% ~ (all samples are equal)
name old data-bytes new data-bytes delta
HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal)
CmdGoSize 281k ± 0% 281k ± 0% ~ (all samples are equal)
name old exe-bytes new exe-bytes delta
HelloSize 921k ± 0% 921k ± 0% ~ (all samples are equal)
CmdGoSize 9.86M ± 0% 9.86M ± 0% ~ (all samples are equal)
Change-Id: I277d95bd56d51c166ef7f560647aeaa092f3f475
Reviewed-on: https://go-review.googlesource.com/20959
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-03-20 08:03:31 -07:00
|
|
|
a = walkstmt(a)
|
2017-02-15 21:16:49 -08:00
|
|
|
init.Append(a)
|
2016-06-19 14:12:59 -07:00
|
|
|
default:
|
2020-11-19 20:49:23 -05:00
|
|
|
base.Fatalf("fixedlit: bad kind %d", kind)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-09-28 23:30:08 +07:00
|
|
|
func isSmallSliceLit(n *Node) bool {
|
|
|
|
|
if n.Op != OSLICELIT {
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
r := n.Right
|
|
|
|
|
|
2020-10-12 15:02:59 +02:00
|
|
|
return smallintconst(r) && (n.Type.Elem().Width == 0 || r.Int64Val() <= smallArrayBytes/n.Type.Elem().Width)
|
2019-09-28 23:30:08 +07:00
|
|
|
}
|
|
|
|
|
|
2016-06-19 14:12:59 -07:00
|
|
|
func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
|
2016-03-31 15:18:39 -07:00
|
|
|
// make an array type corresponding the number of elements we have
|
2020-10-12 15:02:59 +02:00
|
|
|
t := types.NewArray(n.Type.Elem(), n.Right.Int64Val())
|
2015-02-13 14:40:36 -05:00
|
|
|
dowidth(t)
|
|
|
|
|
|
2016-06-19 14:12:59 -07:00
|
|
|
if ctxt == inNonInitFunction {
|
2015-02-13 14:40:36 -05:00
|
|
|
// put everything into static array
|
2016-06-20 08:18:22 -07:00
|
|
|
vstat := staticname(t)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2016-06-19 14:12:59 -07:00
|
|
|
fixedlit(ctxt, initKindStatic, n, vstat, init)
|
|
|
|
|
fixedlit(ctxt, initKindDynamic, n, vstat, init)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
|
|
|
|
// copy static to slice
|
2018-11-18 08:34:38 -08:00
|
|
|
var_ = typecheck(var_, ctxExpr|ctxAssign)
|
2020-11-22 12:09:08 -05:00
|
|
|
nam := stataddr(var_)
|
|
|
|
|
if nam == nil || nam.Class() != PEXTERN {
|
2020-11-19 20:49:23 -05:00
|
|
|
base.Fatalf("slicelit: %v", var_)
|
2017-02-15 21:16:49 -08:00
|
|
|
}
|
2020-11-22 12:09:08 -05:00
|
|
|
slicesym(nam, vstat, t.NumElem())
|
2015-02-13 14:40:36 -05:00
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// recipe for var = []t{...}
|
|
|
|
|
// 1. make a static array
|
|
|
|
|
// var vstat [...]t
|
|
|
|
|
// 2. assign (data statements) the constant part
|
|
|
|
|
// vstat = constpart{}
|
|
|
|
|
// 3. make an auto pointer to array and allocate heap to it
|
|
|
|
|
// var vauto *[...]t = new([...]t)
|
|
|
|
|
// 4. copy the static array to the auto array
|
|
|
|
|
// *vauto = vstat
|
2016-04-23 22:59:01 -07:00
|
|
|
// 5. for each dynamic part assign to the array
|
|
|
|
|
// vauto[i] = dynamic part
|
|
|
|
|
// 6. assign slice of allocated heap to var
|
|
|
|
|
// var = vauto[:]
|
2015-02-13 14:40:36 -05:00
|
|
|
//
|
|
|
|
|
// an optimization is done if there is no constant part
|
|
|
|
|
// 3. var vauto *[...]t = new([...]t)
|
2016-04-23 22:59:01 -07:00
|
|
|
// 5. vauto[i] = dynamic part
|
|
|
|
|
// 6. var = vauto[:]
|
2015-02-13 14:40:36 -05:00
|
|
|
|
|
|
|
|
// if the literal contains constants,
|
|
|
|
|
// make static initialized array (1),(2)
|
2015-03-02 14:22:05 -05:00
|
|
|
var vstat *Node
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2016-05-16 14:28:44 -07:00
|
|
|
mode := getdyn(n, true)
|
2019-09-28 23:30:08 +07:00
|
|
|
if mode&initConst != 0 && !isSmallSliceLit(n) {
|
2016-06-20 08:18:22 -07:00
|
|
|
if ctxt == inInitFunction {
|
2020-07-15 20:01:32 -04:00
|
|
|
vstat = readonlystaticname(t)
|
|
|
|
|
} else {
|
|
|
|
|
vstat = staticname(t)
|
2016-06-20 08:18:22 -07:00
|
|
|
}
|
2016-06-19 14:12:59 -07:00
|
|
|
fixedlit(ctxt, initKindStatic, n, vstat, init)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// make new auto *array (3 declare)
|
cmd/compile: factor out Pkg, Sym, and Type into package types
- created new package cmd/compile/internal/types
- moved Pkg, Sym, Type to new package
- to break cycles, for now we need the (ugly) types/utils.go
file which contains a handful of functions that must be installed
early by the gc frontend
- to break cycles, for now we need two functions to convert between
*gc.Node and *types.Node (the latter is a dummy type)
- adjusted the gc's code to use the new package and the conversion
functions as needed
- made several Pkg, Sym, and Type methods functions as needed
- renamed constructors typ, typPtr, typArray, etc. to types.New,
types.NewPtr, types.NewArray, etc.
Passes toolstash-check -all.
Change-Id: I8adfa5e85c731645d0a7fd2030375ed6ebf54b72
Reviewed-on: https://go-review.googlesource.com/39855
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2017-04-04 17:54:02 -07:00
|
|
|
vauto := temp(types.NewPtr(t))
|
2015-02-13 14:40:36 -05:00
|
|
|
|
|
|
|
|
// set auto to point at new temp or heap (3 assign)
|
2015-02-23 16:07:24 -05:00
|
|
|
var a *Node
|
2015-05-26 23:05:35 -04:00
|
|
|
if x := prealloc[n]; x != nil {
|
2015-10-22 09:51:12 +09:00
|
|
|
// temp allocated during order.go for dddarg
|
2018-10-18 15:24:50 -07:00
|
|
|
if !types.Identical(t, x.Type) {
|
2018-10-15 09:44:22 -07:00
|
|
|
panic("dotdotdot base type does not match order's assigned type")
|
|
|
|
|
}
|
2015-02-13 14:40:36 -05:00
|
|
|
|
|
|
|
|
if vstat == nil {
|
2016-09-16 11:00:54 +10:00
|
|
|
a = nod(OAS, x, nil)
|
2018-11-18 08:34:38 -08:00
|
|
|
a = typecheck(a, ctxStmt)
|
2016-03-07 22:54:46 -08:00
|
|
|
init.Append(a) // zero new temp
|
2018-09-07 14:55:09 -07:00
|
|
|
} else {
|
|
|
|
|
// Declare that we're about to initialize all of x.
|
|
|
|
|
// (Which happens at the *vauto = vstat below.)
|
|
|
|
|
init.Append(nod(OVARDEF, x, nil))
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
2016-09-16 11:00:54 +10:00
|
|
|
a = nod(OADDR, x, nil)
|
2015-02-13 14:40:36 -05:00
|
|
|
} else if n.Esc == EscNone {
|
|
|
|
|
a = temp(t)
|
|
|
|
|
if vstat == nil {
|
2016-09-16 11:00:54 +10:00
|
|
|
a = nod(OAS, temp(t), nil)
|
2018-11-18 08:34:38 -08:00
|
|
|
a = typecheck(a, ctxStmt)
|
2016-03-07 22:54:46 -08:00
|
|
|
init.Append(a) // zero new temp
|
2015-02-13 14:40:36 -05:00
|
|
|
a = a.Left
|
2018-09-07 14:55:09 -07:00
|
|
|
} else {
|
|
|
|
|
init.Append(nod(OVARDEF, a, nil))
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
2016-09-16 11:00:54 +10:00
|
|
|
a = nod(OADDR, a, nil)
|
2015-02-13 14:40:36 -05:00
|
|
|
} else {
|
2016-09-16 11:00:54 +10:00
|
|
|
a = nod(ONEW, nil, nil)
|
2016-03-10 10:13:42 -08:00
|
|
|
a.List.Set1(typenod(t))
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
2016-09-16 11:00:54 +10:00
|
|
|
a = nod(OAS, vauto, a)
|
2018-11-18 08:34:38 -08:00
|
|
|
a = typecheck(a, ctxStmt)
|
cmd/compile: reduce use of **Node parameters
Escape analysis has a hard time with tree-like
structures (see #13493 and #14858).
This is unlikely to change.
As a result, when invoking a function that accepts
a **Node parameter, we usually allocate a *Node
on the heap. This happens a whole lot.
This CL changes functions from taking a **Node
to acting more like append: It both modifies
the input and returns a replacement for it.
Because of the cascading nature of escape analysis,
in order to get the benefits, I had to modify
almost all such functions. The remaining functions
are in racewalk and the backend. I would be happy
to update them as well in a separate CL.
This CL was created by manually updating the
function signatures and the directly impacted
bits of code. The callsites were then automatically
updated using a bespoke script:
https://gist.github.com/josharian/046b1be7aceae244de39
For ease of reviewing and future understanding,
this CL is also broken down into four CLs,
mailed separately, which show the manual
and the automated changes separately.
They are CLs 20990, 20991, 20992, and 20993.
Passes toolstash -cmp.
name old time/op new time/op delta
Template 335ms ± 5% 324ms ± 5% -3.35% (p=0.000 n=23+24)
Unicode 176ms ± 9% 165ms ± 6% -6.12% (p=0.000 n=23+24)
GoTypes 1.10s ± 4% 1.07s ± 2% -2.77% (p=0.000 n=24+24)
Compiler 5.31s ± 3% 5.15s ± 3% -2.95% (p=0.000 n=24+24)
MakeBash 41.6s ± 1% 41.7s ± 2% ~ (p=0.586 n=23+23)
name old alloc/op new alloc/op delta
Template 63.3MB ± 0% 62.4MB ± 0% -1.36% (p=0.000 n=25+23)
Unicode 42.4MB ± 0% 41.6MB ± 0% -1.99% (p=0.000 n=24+25)
GoTypes 220MB ± 0% 217MB ± 0% -1.11% (p=0.000 n=25+25)
Compiler 994MB ± 0% 973MB ± 0% -2.08% (p=0.000 n=24+25)
name old allocs/op new allocs/op delta
Template 681k ± 0% 574k ± 0% -15.71% (p=0.000 n=24+25)
Unicode 518k ± 0% 413k ± 0% -20.34% (p=0.000 n=25+24)
GoTypes 2.08M ± 0% 1.78M ± 0% -14.62% (p=0.000 n=25+25)
Compiler 9.26M ± 0% 7.64M ± 0% -17.48% (p=0.000 n=25+25)
name old text-bytes new text-bytes delta
HelloSize 578k ± 0% 578k ± 0% ~ (all samples are equal)
CmdGoSize 6.46M ± 0% 6.46M ± 0% ~ (all samples are equal)
name old data-bytes new data-bytes delta
HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal)
CmdGoSize 281k ± 0% 281k ± 0% ~ (all samples are equal)
name old exe-bytes new exe-bytes delta
HelloSize 921k ± 0% 921k ± 0% ~ (all samples are equal)
CmdGoSize 9.86M ± 0% 9.86M ± 0% ~ (all samples are equal)
Change-Id: I277d95bd56d51c166ef7f560647aeaa092f3f475
Reviewed-on: https://go-review.googlesource.com/20959
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-03-20 08:03:31 -07:00
|
|
|
a = walkexpr(a, init)
|
2016-03-07 22:54:46 -08:00
|
|
|
init.Append(a)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
|
|
|
|
if vstat != nil {
|
|
|
|
|
// copy static to heap (4)
|
2018-11-18 08:34:38 -08:00
|
|
|
a = nod(ODEREF, vauto, nil)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2016-09-16 11:00:54 +10:00
|
|
|
a = nod(OAS, a, vstat)
|
2018-11-18 08:34:38 -08:00
|
|
|
a = typecheck(a, ctxStmt)
|
cmd/compile: reduce use of **Node parameters
Escape analysis has a hard time with tree-like
structures (see #13493 and #14858).
This is unlikely to change.
As a result, when invoking a function that accepts
a **Node parameter, we usually allocate a *Node
on the heap. This happens a whole lot.
This CL changes functions from taking a **Node
to acting more like append: It both modifies
the input and returns a replacement for it.
Because of the cascading nature of escape analysis,
in order to get the benefits, I had to modify
almost all such functions. The remaining functions
are in racewalk and the backend. I would be happy
to update them as well in a separate CL.
This CL was created by manually updating the
function signatures and the directly impacted
bits of code. The callsites were then automatically
updated using a bespoke script:
https://gist.github.com/josharian/046b1be7aceae244de39
For ease of reviewing and future understanding,
this CL is also broken down into four CLs,
mailed separately, which show the manual
and the automated changes separately.
They are CLs 20990, 20991, 20992, and 20993.
Passes toolstash -cmp.
name old time/op new time/op delta
Template 335ms ± 5% 324ms ± 5% -3.35% (p=0.000 n=23+24)
Unicode 176ms ± 9% 165ms ± 6% -6.12% (p=0.000 n=23+24)
GoTypes 1.10s ± 4% 1.07s ± 2% -2.77% (p=0.000 n=24+24)
Compiler 5.31s ± 3% 5.15s ± 3% -2.95% (p=0.000 n=24+24)
MakeBash 41.6s ± 1% 41.7s ± 2% ~ (p=0.586 n=23+23)
name old alloc/op new alloc/op delta
Template 63.3MB ± 0% 62.4MB ± 0% -1.36% (p=0.000 n=25+23)
Unicode 42.4MB ± 0% 41.6MB ± 0% -1.99% (p=0.000 n=24+25)
GoTypes 220MB ± 0% 217MB ± 0% -1.11% (p=0.000 n=25+25)
Compiler 994MB ± 0% 973MB ± 0% -2.08% (p=0.000 n=24+25)
name old allocs/op new allocs/op delta
Template 681k ± 0% 574k ± 0% -15.71% (p=0.000 n=24+25)
Unicode 518k ± 0% 413k ± 0% -20.34% (p=0.000 n=25+24)
GoTypes 2.08M ± 0% 1.78M ± 0% -14.62% (p=0.000 n=25+25)
Compiler 9.26M ± 0% 7.64M ± 0% -17.48% (p=0.000 n=25+25)
name old text-bytes new text-bytes delta
HelloSize 578k ± 0% 578k ± 0% ~ (all samples are equal)
CmdGoSize 6.46M ± 0% 6.46M ± 0% ~ (all samples are equal)
name old data-bytes new data-bytes delta
HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal)
CmdGoSize 281k ± 0% 281k ± 0% ~ (all samples are equal)
name old exe-bytes new exe-bytes delta
HelloSize 921k ± 0% 921k ± 0% ~ (all samples are equal)
CmdGoSize 9.86M ± 0% 9.86M ± 0% ~ (all samples are equal)
Change-Id: I277d95bd56d51c166ef7f560647aeaa092f3f475
Reviewed-on: https://go-review.googlesource.com/20959
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-03-20 08:03:31 -07:00
|
|
|
a = walkexpr(a, init)
|
2016-03-07 22:54:46 -08:00
|
|
|
init.Append(a)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
2016-04-23 22:59:01 -07:00
|
|
|
// put dynamics into array (5)
|
2016-10-27 02:02:30 -07:00
|
|
|
var index int64
|
2017-10-11 10:14:31 +01:00
|
|
|
for _, value := range n.List.Slice() {
|
|
|
|
|
if value.Op == OKEY {
|
2018-11-28 14:34:45 -08:00
|
|
|
index = indexconst(value.Left)
|
|
|
|
|
if index < 0 {
|
2020-11-19 20:49:23 -05:00
|
|
|
base.Fatalf("slicelit: invalid index %v", value.Left)
|
2018-11-28 14:34:45 -08:00
|
|
|
}
|
2017-10-11 10:14:31 +01:00
|
|
|
value = value.Right
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2016-10-27 02:02:30 -07:00
|
|
|
a := nod(OINDEX, vauto, nodintconst(index))
|
2017-02-27 19:56:38 +02:00
|
|
|
a.SetBounded(true)
|
2016-10-27 02:02:30 -07:00
|
|
|
index++
|
2015-02-13 14:40:36 -05:00
|
|
|
|
|
|
|
|
// TODO need to check bounds?
|
|
|
|
|
|
|
|
|
|
switch value.Op {
|
2016-06-19 07:20:28 -07:00
|
|
|
case OSLICELIT:
|
|
|
|
|
break
|
|
|
|
|
|
2016-06-19 12:11:47 -07:00
|
|
|
case OARRAYLIT, OSTRUCTLIT:
|
2019-05-13 13:43:49 -07:00
|
|
|
k := initKindDynamic
|
|
|
|
|
if vstat == nil {
|
|
|
|
|
// Generate both static and dynamic initializations.
|
|
|
|
|
// See issue #31987.
|
|
|
|
|
k = initKindLocalCode
|
|
|
|
|
}
|
|
|
|
|
fixedlit(ctxt, k, value, a, init)
|
2015-02-13 14:40:36 -05:00
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
|
2020-10-27 17:08:57 +07:00
|
|
|
if vstat != nil && value.isGoConst() { // already set by copy from static value
|
2015-02-13 14:40:36 -05:00
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
|
2016-04-23 22:59:01 -07:00
|
|
|
// build list of vauto[c] = expr
|
2015-06-29 16:30:19 -04:00
|
|
|
setlineno(value)
|
2016-09-16 11:00:54 +10:00
|
|
|
a = nod(OAS, a, value)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2018-11-18 08:34:38 -08:00
|
|
|
a = typecheck(a, ctxStmt)
|
2018-10-05 08:54:50 -07:00
|
|
|
a = orderStmtInPlace(a, map[string][]*Node{})
|
cmd/compile: reduce use of **Node parameters
Escape analysis has a hard time with tree-like
structures (see #13493 and #14858).
This is unlikely to change.
As a result, when invoking a function that accepts
a **Node parameter, we usually allocate a *Node
on the heap. This happens a whole lot.
This CL changes functions from taking a **Node
to acting more like append: It both modifies
the input and returns a replacement for it.
Because of the cascading nature of escape analysis,
in order to get the benefits, I had to modify
almost all such functions. The remaining functions
are in racewalk and the backend. I would be happy
to update them as well in a separate CL.
This CL was created by manually updating the
function signatures and the directly impacted
bits of code. The callsites were then automatically
updated using a bespoke script:
https://gist.github.com/josharian/046b1be7aceae244de39
For ease of reviewing and future understanding,
this CL is also broken down into four CLs,
mailed separately, which show the manual
and the automated changes separately.
They are CLs 20990, 20991, 20992, and 20993.
Passes toolstash -cmp.
name old time/op new time/op delta
Template 335ms ± 5% 324ms ± 5% -3.35% (p=0.000 n=23+24)
Unicode 176ms ± 9% 165ms ± 6% -6.12% (p=0.000 n=23+24)
GoTypes 1.10s ± 4% 1.07s ± 2% -2.77% (p=0.000 n=24+24)
Compiler 5.31s ± 3% 5.15s ± 3% -2.95% (p=0.000 n=24+24)
MakeBash 41.6s ± 1% 41.7s ± 2% ~ (p=0.586 n=23+23)
name old alloc/op new alloc/op delta
Template 63.3MB ± 0% 62.4MB ± 0% -1.36% (p=0.000 n=25+23)
Unicode 42.4MB ± 0% 41.6MB ± 0% -1.99% (p=0.000 n=24+25)
GoTypes 220MB ± 0% 217MB ± 0% -1.11% (p=0.000 n=25+25)
Compiler 994MB ± 0% 973MB ± 0% -2.08% (p=0.000 n=24+25)
name old allocs/op new allocs/op delta
Template 681k ± 0% 574k ± 0% -15.71% (p=0.000 n=24+25)
Unicode 518k ± 0% 413k ± 0% -20.34% (p=0.000 n=25+24)
GoTypes 2.08M ± 0% 1.78M ± 0% -14.62% (p=0.000 n=25+25)
Compiler 9.26M ± 0% 7.64M ± 0% -17.48% (p=0.000 n=25+25)
name old text-bytes new text-bytes delta
HelloSize 578k ± 0% 578k ± 0% ~ (all samples are equal)
CmdGoSize 6.46M ± 0% 6.46M ± 0% ~ (all samples are equal)
name old data-bytes new data-bytes delta
HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal)
CmdGoSize 281k ± 0% 281k ± 0% ~ (all samples are equal)
name old exe-bytes new exe-bytes delta
HelloSize 921k ± 0% 921k ± 0% ~ (all samples are equal)
CmdGoSize 9.86M ± 0% 9.86M ± 0% ~ (all samples are equal)
Change-Id: I277d95bd56d51c166ef7f560647aeaa092f3f475
Reviewed-on: https://go-review.googlesource.com/20959
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-03-20 08:03:31 -07:00
|
|
|
a = walkstmt(a)
|
2016-03-07 22:54:46 -08:00
|
|
|
init.Append(a)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2016-04-23 22:59:01 -07:00
|
|
|
|
|
|
|
|
// make slice out of heap (6)
|
2016-09-16 11:00:54 +10:00
|
|
|
a = nod(OAS, var_, nod(OSLICE, vauto, nil))
|
2016-04-23 22:59:01 -07:00
|
|
|
|
2018-11-18 08:34:38 -08:00
|
|
|
a = typecheck(a, ctxStmt)
|
2018-10-05 08:54:50 -07:00
|
|
|
a = orderStmtInPlace(a, map[string][]*Node{})
|
2016-04-23 22:59:01 -07:00
|
|
|
a = walkstmt(a)
|
|
|
|
|
init.Append(a)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
2016-09-04 09:34:03 -07:00
|
|
|
func maplit(n *Node, m *Node, init *Nodes) {
|
2015-02-13 14:40:36 -05:00
|
|
|
// make the map var
|
2016-09-16 11:00:54 +10:00
|
|
|
a := nod(OMAKE, nil, nil)
|
2017-09-10 19:36:38 +02:00
|
|
|
a.Esc = n.Esc
|
2017-04-02 19:12:19 -07:00
|
|
|
a.List.Set2(typenod(n.Type), nodintconst(int64(n.List.Len())))
|
2016-05-16 13:56:15 -07:00
|
|
|
litas(m, a, init)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2019-04-30 09:47:19 -07:00
|
|
|
entries := n.List.Slice()
|
|
|
|
|
|
|
|
|
|
// The order pass already removed any dynamic (runtime-computed) entries.
|
|
|
|
|
// All remaining entries are static. Double-check that.
|
|
|
|
|
for _, r := range entries {
|
|
|
|
|
if !isStaticCompositeLiteral(r.Left) || !isStaticCompositeLiteral(r.Right) {
|
2020-11-19 20:49:23 -05:00
|
|
|
base.Fatalf("maplit: entry is not a literal: %v", r)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-30 09:47:19 -07:00
|
|
|
if len(entries) > 25 {
|
|
|
|
|
// For a large number of entries, put them in an array and loop.
|
2017-04-03 11:27:06 -07:00
|
|
|
|
2016-05-16 13:56:15 -07:00
|
|
|
// build types [count]Tindex and [count]Tvalue
|
2019-04-30 09:47:19 -07:00
|
|
|
tk := types.NewArray(n.Type.Key(), int64(len(entries)))
|
|
|
|
|
te := types.NewArray(n.Type.Elem(), int64(len(entries)))
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2020-04-14 23:01:18 +07:00
|
|
|
tk.SetNoalg(true)
|
|
|
|
|
te.SetNoalg(true)
|
|
|
|
|
|
2016-05-16 13:56:15 -07:00
|
|
|
dowidth(tk)
|
2019-04-22 13:37:08 -07:00
|
|
|
dowidth(te)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2016-05-16 13:56:15 -07:00
|
|
|
// make and initialize static arrays
|
2020-07-15 20:01:32 -04:00
|
|
|
vstatk := readonlystaticname(tk)
|
|
|
|
|
vstate := readonlystaticname(te)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2017-09-25 10:28:20 -07:00
|
|
|
datak := nod(OARRAYLIT, nil, nil)
|
2019-04-22 13:37:08 -07:00
|
|
|
datae := nod(OARRAYLIT, nil, nil)
|
2019-04-30 09:47:19 -07:00
|
|
|
for _, r := range entries {
|
2017-09-25 10:28:20 -07:00
|
|
|
datak.List.Append(r.Left)
|
2019-04-22 13:37:08 -07:00
|
|
|
datae.List.Append(r.Right)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2017-09-25 10:28:20 -07:00
|
|
|
fixedlit(inInitFunction, initKindStatic, datak, vstatk, init)
|
2019-04-22 13:37:08 -07:00
|
|
|
fixedlit(inInitFunction, initKindStatic, datae, vstate, init)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
|
|
|
|
// loop adding structure elements to map
|
2016-05-16 13:56:15 -07:00
|
|
|
// for i = 0; i < len(vstatk); i++ {
|
2019-04-22 13:37:08 -07:00
|
|
|
// map[vstatk[i]] = vstate[i]
|
2015-02-13 14:40:36 -05:00
|
|
|
// }
|
cmd/compile: factor out Pkg, Sym, and Type into package types
- created new package cmd/compile/internal/types
- moved Pkg, Sym, Type to new package
- to break cycles, for now we need the (ugly) types/utils.go
file which contains a handful of functions that must be installed
early by the gc frontend
- to break cycles, for now we need two functions to convert between
*gc.Node and *types.Node (the latter is a dummy type)
- adjusted the gc's code to use the new package and the conversion
functions as needed
- made several Pkg, Sym, and Type methods functions as needed
- renamed constructors typ, typPtr, typArray, etc. to types.New,
types.NewPtr, types.NewArray, etc.
Passes toolstash-check -all.
Change-Id: I8adfa5e85c731645d0a7fd2030375ed6ebf54b72
Reviewed-on: https://go-review.googlesource.com/39855
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2017-04-04 17:54:02 -07:00
|
|
|
i := temp(types.Types[TINT])
|
2019-04-22 13:37:08 -07:00
|
|
|
rhs := nod(OINDEX, vstate, i)
|
2017-02-27 19:56:38 +02:00
|
|
|
rhs.SetBounded(true)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2016-09-16 11:00:54 +10:00
|
|
|
kidx := nod(OINDEX, vstatk, i)
|
2017-02-27 19:56:38 +02:00
|
|
|
kidx.SetBounded(true)
|
2016-09-16 11:00:54 +10:00
|
|
|
lhs := nod(OINDEX, m, kidx)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2016-09-16 11:00:54 +10:00
|
|
|
zero := nod(OAS, i, nodintconst(0))
|
|
|
|
|
cond := nod(OLT, i, nodintconst(tk.NumElem()))
|
|
|
|
|
incr := nod(OAS, i, nod(OADD, i, nodintconst(1)))
|
|
|
|
|
body := nod(OAS, lhs, rhs)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2016-09-16 11:00:54 +10:00
|
|
|
loop := nod(OFOR, cond, incr)
|
2016-05-16 13:56:15 -07:00
|
|
|
loop.Nbody.Set1(body)
|
|
|
|
|
loop.Ninit.Set1(zero)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2018-11-18 08:34:38 -08:00
|
|
|
loop = typecheck(loop, ctxStmt)
|
2016-05-16 13:56:15 -07:00
|
|
|
loop = walkstmt(loop)
|
|
|
|
|
init.Append(loop)
|
2017-03-29 09:08:39 -07:00
|
|
|
return
|
|
|
|
|
}
|
2019-04-30 09:47:19 -07:00
|
|
|
// For a small number of entries, just add them directly.
|
2017-03-29 09:08:39 -07:00
|
|
|
|
|
|
|
|
// Build list of var[c] = expr.
|
2019-04-22 13:37:08 -07:00
|
|
|
// Use temporaries so that mapassign1 can have addressable key, elem.
|
2017-03-29 09:08:39 -07:00
|
|
|
// TODO(josharian): avoid map key temporaries for mapfast_* assignments with literal keys.
|
2019-04-22 13:37:08 -07:00
|
|
|
tmpkey := temp(m.Type.Key())
|
|
|
|
|
tmpelem := temp(m.Type.Elem())
|
2017-03-29 09:08:39 -07:00
|
|
|
|
2019-04-30 09:47:19 -07:00
|
|
|
for _, r := range entries {
|
2019-04-22 13:37:08 -07:00
|
|
|
index, elem := r.Left, r.Right
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2016-06-20 08:34:39 -07:00
|
|
|
setlineno(index)
|
2019-04-22 13:37:08 -07:00
|
|
|
a := nod(OAS, tmpkey, index)
|
2018-11-18 08:34:38 -08:00
|
|
|
a = typecheck(a, ctxStmt)
|
cmd/compile: reduce use of **Node parameters
Escape analysis has a hard time with tree-like
structures (see #13493 and #14858).
This is unlikely to change.
As a result, when invoking a function that accepts
a **Node parameter, we usually allocate a *Node
on the heap. This happens a whole lot.
This CL changes functions from taking a **Node
to acting more like append: It both modifies
the input and returns a replacement for it.
Because of the cascading nature of escape analysis,
in order to get the benefits, I had to modify
almost all such functions. The remaining functions
are in racewalk and the backend. I would be happy
to update them as well in a separate CL.
This CL was created by manually updating the
function signatures and the directly impacted
bits of code. The callsites were then automatically
updated using a bespoke script:
https://gist.github.com/josharian/046b1be7aceae244de39
For ease of reviewing and future understanding,
this CL is also broken down into four CLs,
mailed separately, which show the manual
and the automated changes separately.
They are CLs 20990, 20991, 20992, and 20993.
Passes toolstash -cmp.
name old time/op new time/op delta
Template 335ms ± 5% 324ms ± 5% -3.35% (p=0.000 n=23+24)
Unicode 176ms ± 9% 165ms ± 6% -6.12% (p=0.000 n=23+24)
GoTypes 1.10s ± 4% 1.07s ± 2% -2.77% (p=0.000 n=24+24)
Compiler 5.31s ± 3% 5.15s ± 3% -2.95% (p=0.000 n=24+24)
MakeBash 41.6s ± 1% 41.7s ± 2% ~ (p=0.586 n=23+23)
name old alloc/op new alloc/op delta
Template 63.3MB ± 0% 62.4MB ± 0% -1.36% (p=0.000 n=25+23)
Unicode 42.4MB ± 0% 41.6MB ± 0% -1.99% (p=0.000 n=24+25)
GoTypes 220MB ± 0% 217MB ± 0% -1.11% (p=0.000 n=25+25)
Compiler 994MB ± 0% 973MB ± 0% -2.08% (p=0.000 n=24+25)
name old allocs/op new allocs/op delta
Template 681k ± 0% 574k ± 0% -15.71% (p=0.000 n=24+25)
Unicode 518k ± 0% 413k ± 0% -20.34% (p=0.000 n=25+24)
GoTypes 2.08M ± 0% 1.78M ± 0% -14.62% (p=0.000 n=25+25)
Compiler 9.26M ± 0% 7.64M ± 0% -17.48% (p=0.000 n=25+25)
name old text-bytes new text-bytes delta
HelloSize 578k ± 0% 578k ± 0% ~ (all samples are equal)
CmdGoSize 6.46M ± 0% 6.46M ± 0% ~ (all samples are equal)
name old data-bytes new data-bytes delta
HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal)
CmdGoSize 281k ± 0% 281k ± 0% ~ (all samples are equal)
name old exe-bytes new exe-bytes delta
HelloSize 921k ± 0% 921k ± 0% ~ (all samples are equal)
CmdGoSize 9.86M ± 0% 9.86M ± 0% ~ (all samples are equal)
Change-Id: I277d95bd56d51c166ef7f560647aeaa092f3f475
Reviewed-on: https://go-review.googlesource.com/20959
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-03-20 08:03:31 -07:00
|
|
|
a = walkstmt(a)
|
2016-03-07 22:54:46 -08:00
|
|
|
init.Append(a)
|
2016-06-20 08:34:39 -07:00
|
|
|
|
2019-04-22 13:37:08 -07:00
|
|
|
setlineno(elem)
|
|
|
|
|
a = nod(OAS, tmpelem, elem)
|
2018-11-18 08:34:38 -08:00
|
|
|
a = typecheck(a, ctxStmt)
|
cmd/compile: reduce use of **Node parameters
Escape analysis has a hard time with tree-like
structures (see #13493 and #14858).
This is unlikely to change.
As a result, when invoking a function that accepts
a **Node parameter, we usually allocate a *Node
on the heap. This happens a whole lot.
This CL changes functions from taking a **Node
to acting more like append: It both modifies
the input and returns a replacement for it.
Because of the cascading nature of escape analysis,
in order to get the benefits, I had to modify
almost all such functions. The remaining functions
are in racewalk and the backend. I would be happy
to update them as well in a separate CL.
This CL was created by manually updating the
function signatures and the directly impacted
bits of code. The callsites were then automatically
updated using a bespoke script:
https://gist.github.com/josharian/046b1be7aceae244de39
For ease of reviewing and future understanding,
this CL is also broken down into four CLs,
mailed separately, which show the manual
and the automated changes separately.
They are CLs 20990, 20991, 20992, and 20993.
Passes toolstash -cmp.
name old time/op new time/op delta
Template 335ms ± 5% 324ms ± 5% -3.35% (p=0.000 n=23+24)
Unicode 176ms ± 9% 165ms ± 6% -6.12% (p=0.000 n=23+24)
GoTypes 1.10s ± 4% 1.07s ± 2% -2.77% (p=0.000 n=24+24)
Compiler 5.31s ± 3% 5.15s ± 3% -2.95% (p=0.000 n=24+24)
MakeBash 41.6s ± 1% 41.7s ± 2% ~ (p=0.586 n=23+23)
name old alloc/op new alloc/op delta
Template 63.3MB ± 0% 62.4MB ± 0% -1.36% (p=0.000 n=25+23)
Unicode 42.4MB ± 0% 41.6MB ± 0% -1.99% (p=0.000 n=24+25)
GoTypes 220MB ± 0% 217MB ± 0% -1.11% (p=0.000 n=25+25)
Compiler 994MB ± 0% 973MB ± 0% -2.08% (p=0.000 n=24+25)
name old allocs/op new allocs/op delta
Template 681k ± 0% 574k ± 0% -15.71% (p=0.000 n=24+25)
Unicode 518k ± 0% 413k ± 0% -20.34% (p=0.000 n=25+24)
GoTypes 2.08M ± 0% 1.78M ± 0% -14.62% (p=0.000 n=25+25)
Compiler 9.26M ± 0% 7.64M ± 0% -17.48% (p=0.000 n=25+25)
name old text-bytes new text-bytes delta
HelloSize 578k ± 0% 578k ± 0% ~ (all samples are equal)
CmdGoSize 6.46M ± 0% 6.46M ± 0% ~ (all samples are equal)
name old data-bytes new data-bytes delta
HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal)
CmdGoSize 281k ± 0% 281k ± 0% ~ (all samples are equal)
name old exe-bytes new exe-bytes delta
HelloSize 921k ± 0% 921k ± 0% ~ (all samples are equal)
CmdGoSize 9.86M ± 0% 9.86M ± 0% ~ (all samples are equal)
Change-Id: I277d95bd56d51c166ef7f560647aeaa092f3f475
Reviewed-on: https://go-review.googlesource.com/20959
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-03-20 08:03:31 -07:00
|
|
|
a = walkstmt(a)
|
2016-03-07 22:54:46 -08:00
|
|
|
init.Append(a)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2019-04-22 13:37:08 -07:00
|
|
|
setlineno(tmpelem)
|
|
|
|
|
a = nod(OAS, nod(OINDEX, m, tmpkey), tmpelem)
|
2018-11-18 08:34:38 -08:00
|
|
|
a = typecheck(a, ctxStmt)
|
cmd/compile: reduce use of **Node parameters
Escape analysis has a hard time with tree-like
structures (see #13493 and #14858).
This is unlikely to change.
As a result, when invoking a function that accepts
a **Node parameter, we usually allocate a *Node
on the heap. This happens a whole lot.
This CL changes functions from taking a **Node
to acting more like append: It both modifies
the input and returns a replacement for it.
Because of the cascading nature of escape analysis,
in order to get the benefits, I had to modify
almost all such functions. The remaining functions
are in racewalk and the backend. I would be happy
to update them as well in a separate CL.
This CL was created by manually updating the
function signatures and the directly impacted
bits of code. The callsites were then automatically
updated using a bespoke script:
https://gist.github.com/josharian/046b1be7aceae244de39
For ease of reviewing and future understanding,
this CL is also broken down into four CLs,
mailed separately, which show the manual
and the automated changes separately.
They are CLs 20990, 20991, 20992, and 20993.
Passes toolstash -cmp.
name old time/op new time/op delta
Template 335ms ± 5% 324ms ± 5% -3.35% (p=0.000 n=23+24)
Unicode 176ms ± 9% 165ms ± 6% -6.12% (p=0.000 n=23+24)
GoTypes 1.10s ± 4% 1.07s ± 2% -2.77% (p=0.000 n=24+24)
Compiler 5.31s ± 3% 5.15s ± 3% -2.95% (p=0.000 n=24+24)
MakeBash 41.6s ± 1% 41.7s ± 2% ~ (p=0.586 n=23+23)
name old alloc/op new alloc/op delta
Template 63.3MB ± 0% 62.4MB ± 0% -1.36% (p=0.000 n=25+23)
Unicode 42.4MB ± 0% 41.6MB ± 0% -1.99% (p=0.000 n=24+25)
GoTypes 220MB ± 0% 217MB ± 0% -1.11% (p=0.000 n=25+25)
Compiler 994MB ± 0% 973MB ± 0% -2.08% (p=0.000 n=24+25)
name old allocs/op new allocs/op delta
Template 681k ± 0% 574k ± 0% -15.71% (p=0.000 n=24+25)
Unicode 518k ± 0% 413k ± 0% -20.34% (p=0.000 n=25+24)
GoTypes 2.08M ± 0% 1.78M ± 0% -14.62% (p=0.000 n=25+25)
Compiler 9.26M ± 0% 7.64M ± 0% -17.48% (p=0.000 n=25+25)
name old text-bytes new text-bytes delta
HelloSize 578k ± 0% 578k ± 0% ~ (all samples are equal)
CmdGoSize 6.46M ± 0% 6.46M ± 0% ~ (all samples are equal)
name old data-bytes new data-bytes delta
HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal)
CmdGoSize 281k ± 0% 281k ± 0% ~ (all samples are equal)
name old exe-bytes new exe-bytes delta
HelloSize 921k ± 0% 921k ± 0% ~ (all samples are equal)
CmdGoSize 9.86M ± 0% 9.86M ± 0% ~ (all samples are equal)
Change-Id: I277d95bd56d51c166ef7f560647aeaa092f3f475
Reviewed-on: https://go-review.googlesource.com/20959
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-03-20 08:03:31 -07:00
|
|
|
a = walkstmt(a)
|
2016-03-07 22:54:46 -08:00
|
|
|
init.Append(a)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
2019-04-30 09:47:19 -07:00
|
|
|
a = nod(OVARKILL, tmpkey, nil)
|
2018-11-18 08:34:38 -08:00
|
|
|
a = typecheck(a, ctxStmt)
|
2017-03-29 09:08:39 -07:00
|
|
|
init.Append(a)
|
2019-04-22 13:37:08 -07:00
|
|
|
a = nod(OVARKILL, tmpelem, nil)
|
2018-11-18 08:34:38 -08:00
|
|
|
a = typecheck(a, ctxStmt)
|
2017-03-29 09:08:39 -07:00
|
|
|
init.Append(a)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
2016-09-04 09:34:03 -07:00
|
|
|
func anylit(n *Node, var_ *Node, init *Nodes) {
|
2015-02-23 16:07:24 -05:00
|
|
|
t := n.Type
|
2015-02-13 14:40:36 -05:00
|
|
|
switch n.Op {
|
|
|
|
|
default:
|
2020-11-19 20:49:23 -05:00
|
|
|
base.Fatalf("anylit: not lit, op=%v node=%v", n.Op, n)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2020-11-18 11:25:29 -05:00
|
|
|
case ONAME, OMETHEXPR:
|
2019-04-29 12:19:30 -07:00
|
|
|
a := nod(OAS, var_, n)
|
|
|
|
|
a = typecheck(a, ctxStmt)
|
|
|
|
|
init.Append(a)
|
|
|
|
|
|
2015-02-13 14:40:36 -05:00
|
|
|
case OPTRLIT:
|
2016-03-30 15:09:25 -07:00
|
|
|
if !t.IsPtr() {
|
2020-11-19 20:49:23 -05:00
|
|
|
base.Fatalf("anylit: not ptr")
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
2015-02-23 16:07:24 -05:00
|
|
|
var r *Node
|
2015-02-13 14:40:36 -05:00
|
|
|
if n.Right != nil {
|
2016-12-23 12:00:07 -05:00
|
|
|
// n.Right is stack temporary used as backing store.
|
|
|
|
|
init.Append(nod(OAS, n.Right, nil)) // zero backing store, just in case (#18410)
|
2016-09-16 11:00:54 +10:00
|
|
|
r = nod(OADDR, n.Right, nil)
|
2018-11-18 08:34:38 -08:00
|
|
|
r = typecheck(r, ctxExpr)
|
2015-02-13 14:40:36 -05:00
|
|
|
} else {
|
2016-09-16 11:00:54 +10:00
|
|
|
r = nod(ONEW, nil, nil)
|
2017-04-25 18:02:43 -07:00
|
|
|
r.SetTypecheck(1)
|
2015-02-13 14:40:36 -05:00
|
|
|
r.Type = t
|
|
|
|
|
r.Esc = n.Esc
|
|
|
|
|
}
|
|
|
|
|
|
cmd/compile: reduce use of **Node parameters
Escape analysis has a hard time with tree-like
structures (see #13493 and #14858).
This is unlikely to change.
As a result, when invoking a function that accepts
a **Node parameter, we usually allocate a *Node
on the heap. This happens a whole lot.
This CL changes functions from taking a **Node
to acting more like append: It both modifies
the input and returns a replacement for it.
Because of the cascading nature of escape analysis,
in order to get the benefits, I had to modify
almost all such functions. The remaining functions
are in racewalk and the backend. I would be happy
to update them as well in a separate CL.
This CL was created by manually updating the
function signatures and the directly impacted
bits of code. The callsites were then automatically
updated using a bespoke script:
https://gist.github.com/josharian/046b1be7aceae244de39
For ease of reviewing and future understanding,
this CL is also broken down into four CLs,
mailed separately, which show the manual
and the automated changes separately.
They are CLs 20990, 20991, 20992, and 20993.
Passes toolstash -cmp.
name old time/op new time/op delta
Template 335ms ± 5% 324ms ± 5% -3.35% (p=0.000 n=23+24)
Unicode 176ms ± 9% 165ms ± 6% -6.12% (p=0.000 n=23+24)
GoTypes 1.10s ± 4% 1.07s ± 2% -2.77% (p=0.000 n=24+24)
Compiler 5.31s ± 3% 5.15s ± 3% -2.95% (p=0.000 n=24+24)
MakeBash 41.6s ± 1% 41.7s ± 2% ~ (p=0.586 n=23+23)
name old alloc/op new alloc/op delta
Template 63.3MB ± 0% 62.4MB ± 0% -1.36% (p=0.000 n=25+23)
Unicode 42.4MB ± 0% 41.6MB ± 0% -1.99% (p=0.000 n=24+25)
GoTypes 220MB ± 0% 217MB ± 0% -1.11% (p=0.000 n=25+25)
Compiler 994MB ± 0% 973MB ± 0% -2.08% (p=0.000 n=24+25)
name old allocs/op new allocs/op delta
Template 681k ± 0% 574k ± 0% -15.71% (p=0.000 n=24+25)
Unicode 518k ± 0% 413k ± 0% -20.34% (p=0.000 n=25+24)
GoTypes 2.08M ± 0% 1.78M ± 0% -14.62% (p=0.000 n=25+25)
Compiler 9.26M ± 0% 7.64M ± 0% -17.48% (p=0.000 n=25+25)
name old text-bytes new text-bytes delta
HelloSize 578k ± 0% 578k ± 0% ~ (all samples are equal)
CmdGoSize 6.46M ± 0% 6.46M ± 0% ~ (all samples are equal)
name old data-bytes new data-bytes delta
HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal)
CmdGoSize 281k ± 0% 281k ± 0% ~ (all samples are equal)
name old exe-bytes new exe-bytes delta
HelloSize 921k ± 0% 921k ± 0% ~ (all samples are equal)
CmdGoSize 9.86M ± 0% 9.86M ± 0% ~ (all samples are equal)
Change-Id: I277d95bd56d51c166ef7f560647aeaa092f3f475
Reviewed-on: https://go-review.googlesource.com/20959
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-03-20 08:03:31 -07:00
|
|
|
r = walkexpr(r, init)
|
2016-09-16 11:00:54 +10:00
|
|
|
a := nod(OAS, var_, r)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2018-11-18 08:34:38 -08:00
|
|
|
a = typecheck(a, ctxStmt)
|
2016-03-07 22:54:46 -08:00
|
|
|
init.Append(a)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2018-11-18 08:34:38 -08:00
|
|
|
var_ = nod(ODEREF, var_, nil)
|
|
|
|
|
var_ = typecheck(var_, ctxExpr|ctxAssign)
|
2016-09-04 09:34:03 -07:00
|
|
|
anylit(n.Left, var_, init)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2016-06-19 12:11:47 -07:00
|
|
|
case OSTRUCTLIT, OARRAYLIT:
|
|
|
|
|
if !t.IsStruct() && !t.IsArray() {
|
2020-11-19 20:49:23 -05:00
|
|
|
base.Fatalf("anylit: not struct/array")
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
2016-03-14 14:03:57 -07:00
|
|
|
if var_.isSimpleName() && n.List.Len() > 4 {
|
2016-09-04 09:34:03 -07:00
|
|
|
// lay out static data
|
2020-07-15 20:01:32 -04:00
|
|
|
vstat := readonlystaticname(t)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2016-09-04 09:34:03 -07:00
|
|
|
ctxt := inInitFunction
|
|
|
|
|
if n.Op == OARRAYLIT {
|
|
|
|
|
ctxt = inNonInitFunction
|
|
|
|
|
}
|
|
|
|
|
fixedlit(ctxt, initKindStatic, n, vstat, init)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2016-09-04 09:34:03 -07:00
|
|
|
// copy static to var
|
2016-09-16 11:00:54 +10:00
|
|
|
a := nod(OAS, var_, vstat)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2018-11-18 08:34:38 -08:00
|
|
|
a = typecheck(a, ctxStmt)
|
2016-09-04 09:34:03 -07:00
|
|
|
a = walkexpr(a, init)
|
|
|
|
|
init.Append(a)
|
2016-09-04 01:39:16 +00:00
|
|
|
|
2016-09-04 09:34:03 -07:00
|
|
|
// add expressions to automatic
|
|
|
|
|
fixedlit(inInitFunction, initKindDynamic, n, var_, init)
|
2015-02-13 14:40:36 -05:00
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
|
2016-06-19 12:11:47 -07:00
|
|
|
var components int64
|
|
|
|
|
if n.Op == OARRAYLIT {
|
|
|
|
|
components = t.NumElem()
|
|
|
|
|
} else {
|
|
|
|
|
components = int64(t.NumFields())
|
|
|
|
|
}
|
|
|
|
|
// initialization of an array or struct with unspecified components (missing fields or arrays)
|
|
|
|
|
if var_.isSimpleName() || int64(n.List.Len()) < components {
|
2016-09-16 11:00:54 +10:00
|
|
|
a := nod(OAS, var_, nil)
|
2018-11-18 08:34:38 -08:00
|
|
|
a = typecheck(a, ctxStmt)
|
cmd/compile: reduce use of **Node parameters
Escape analysis has a hard time with tree-like
structures (see #13493 and #14858).
This is unlikely to change.
As a result, when invoking a function that accepts
a **Node parameter, we usually allocate a *Node
on the heap. This happens a whole lot.
This CL changes functions from taking a **Node
to acting more like append: It both modifies
the input and returns a replacement for it.
Because of the cascading nature of escape analysis,
in order to get the benefits, I had to modify
almost all such functions. The remaining functions
are in racewalk and the backend. I would be happy
to update them as well in a separate CL.
This CL was created by manually updating the
function signatures and the directly impacted
bits of code. The callsites were then automatically
updated using a bespoke script:
https://gist.github.com/josharian/046b1be7aceae244de39
For ease of reviewing and future understanding,
this CL is also broken down into four CLs,
mailed separately, which show the manual
and the automated changes separately.
They are CLs 20990, 20991, 20992, and 20993.
Passes toolstash -cmp.
name old time/op new time/op delta
Template 335ms ± 5% 324ms ± 5% -3.35% (p=0.000 n=23+24)
Unicode 176ms ± 9% 165ms ± 6% -6.12% (p=0.000 n=23+24)
GoTypes 1.10s ± 4% 1.07s ± 2% -2.77% (p=0.000 n=24+24)
Compiler 5.31s ± 3% 5.15s ± 3% -2.95% (p=0.000 n=24+24)
MakeBash 41.6s ± 1% 41.7s ± 2% ~ (p=0.586 n=23+23)
name old alloc/op new alloc/op delta
Template 63.3MB ± 0% 62.4MB ± 0% -1.36% (p=0.000 n=25+23)
Unicode 42.4MB ± 0% 41.6MB ± 0% -1.99% (p=0.000 n=24+25)
GoTypes 220MB ± 0% 217MB ± 0% -1.11% (p=0.000 n=25+25)
Compiler 994MB ± 0% 973MB ± 0% -2.08% (p=0.000 n=24+25)
name old allocs/op new allocs/op delta
Template 681k ± 0% 574k ± 0% -15.71% (p=0.000 n=24+25)
Unicode 518k ± 0% 413k ± 0% -20.34% (p=0.000 n=25+24)
GoTypes 2.08M ± 0% 1.78M ± 0% -14.62% (p=0.000 n=25+25)
Compiler 9.26M ± 0% 7.64M ± 0% -17.48% (p=0.000 n=25+25)
name old text-bytes new text-bytes delta
HelloSize 578k ± 0% 578k ± 0% ~ (all samples are equal)
CmdGoSize 6.46M ± 0% 6.46M ± 0% ~ (all samples are equal)
name old data-bytes new data-bytes delta
HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal)
CmdGoSize 281k ± 0% 281k ± 0% ~ (all samples are equal)
name old exe-bytes new exe-bytes delta
HelloSize 921k ± 0% 921k ± 0% ~ (all samples are equal)
CmdGoSize 9.86M ± 0% 9.86M ± 0% ~ (all samples are equal)
Change-Id: I277d95bd56d51c166ef7f560647aeaa092f3f475
Reviewed-on: https://go-review.googlesource.com/20959
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-03-20 08:03:31 -07:00
|
|
|
a = walkexpr(a, init)
|
2016-03-07 22:54:46 -08:00
|
|
|
init.Append(a)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
2016-09-04 09:34:03 -07:00
|
|
|
fixedlit(inInitFunction, initKindLocalCode, n, var_, init)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2016-06-19 07:20:28 -07:00
|
|
|
case OSLICELIT:
|
2016-09-04 09:34:03 -07:00
|
|
|
slicelit(inInitFunction, n, var_, init)
|
2015-02-13 14:40:36 -05:00
|
|
|
|
|
|
|
|
case OMAPLIT:
|
2016-03-30 14:56:08 -07:00
|
|
|
if !t.IsMap() {
|
2020-11-19 20:49:23 -05:00
|
|
|
base.Fatalf("anylit: not map")
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2016-09-04 09:34:03 -07:00
|
|
|
maplit(n, var_, init)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-07 22:54:46 -08:00
|
|
|
func oaslit(n *Node, init *Nodes) bool {
|
2015-02-13 14:40:36 -05:00
|
|
|
if n.Left == nil || n.Right == nil {
|
2016-09-04 09:34:03 -07:00
|
|
|
// not a special composite literal assignment
|
2015-03-02 12:35:15 -05:00
|
|
|
return false
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
if n.Left.Type == nil || n.Right.Type == nil {
|
2016-09-04 09:34:03 -07:00
|
|
|
// not a special composite literal assignment
|
2015-03-02 12:35:15 -05:00
|
|
|
return false
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2016-03-14 14:03:57 -07:00
|
|
|
if !n.Left.isSimpleName() {
|
2016-09-04 09:34:03 -07:00
|
|
|
// not a special composite literal assignment
|
2015-03-02 12:35:15 -05:00
|
|
|
return false
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2018-10-18 15:24:50 -07:00
|
|
|
if !types.Identical(n.Left.Type, n.Right.Type) {
|
2016-09-04 09:34:03 -07:00
|
|
|
// not a special composite literal assignment
|
2015-03-02 12:35:15 -05:00
|
|
|
return false
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch n.Right.Op {
|
|
|
|
|
default:
|
2016-09-04 09:34:03 -07:00
|
|
|
// not a special composite literal assignment
|
2015-03-02 12:35:15 -05:00
|
|
|
return false
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2016-06-19 07:20:28 -07:00
|
|
|
case OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT:
|
2015-02-17 22:13:49 -05:00
|
|
|
if vmatch1(n.Left, n.Right) {
|
2016-09-04 09:34:03 -07:00
|
|
|
// not a special composite literal assignment
|
2015-03-02 12:35:15 -05:00
|
|
|
return false
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2016-09-04 09:34:03 -07:00
|
|
|
anylit(n.Right, n.Left, init)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
n.Op = OEMPTY
|
2015-05-26 21:30:20 -04:00
|
|
|
n.Right = nil
|
2015-02-17 22:13:49 -05:00
|
|
|
return true
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func getlit(lit *Node) int {
|
2016-09-15 14:34:20 +10:00
|
|
|
if smallintconst(lit) {
|
2020-10-12 15:02:59 +02:00
|
|
|
return int(lit.Int64Val())
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
return -1
|
|
|
|
|
}
|
|
|
|
|
|
2020-11-22 12:09:08 -05:00
|
|
|
// stataddr returns the static address of n, if n has one, or else nil.
|
|
|
|
|
func stataddr(n *Node) *Node {
|
2015-02-13 14:40:36 -05:00
|
|
|
if n == nil {
|
2020-11-22 12:09:08 -05:00
|
|
|
return nil
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch n.Op {
|
2020-11-18 11:25:29 -05:00
|
|
|
case ONAME, OMETHEXPR:
|
2020-11-22 12:09:08 -05:00
|
|
|
return n.sepcopy()
|
2015-02-13 14:40:36 -05:00
|
|
|
|
|
|
|
|
case ODOT:
|
2020-11-22 12:09:08 -05:00
|
|
|
nam := stataddr(n.Left)
|
|
|
|
|
if nam == nil {
|
2015-02-13 14:40:36 -05:00
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
nam.Xoffset += n.Xoffset
|
|
|
|
|
nam.Type = n.Type
|
2020-11-22 12:09:08 -05:00
|
|
|
return nam
|
2015-02-13 14:40:36 -05:00
|
|
|
|
|
|
|
|
case OINDEX:
|
2016-03-29 09:14:19 -07:00
|
|
|
if n.Left.Type.IsSlice() {
|
2015-02-13 14:40:36 -05:00
|
|
|
break
|
|
|
|
|
}
|
2020-11-22 12:09:08 -05:00
|
|
|
nam := stataddr(n.Left)
|
|
|
|
|
if nam == nil {
|
2015-02-13 14:40:36 -05:00
|
|
|
break
|
|
|
|
|
}
|
2015-02-23 16:07:24 -05:00
|
|
|
l := getlit(n.Right)
|
2015-02-13 14:40:36 -05:00
|
|
|
if l < 0 {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Check for overflow.
|
2017-03-17 13:35:36 -07:00
|
|
|
if n.Type.Width != 0 && thearch.MAXWIDTH/n.Type.Width <= int64(l) {
|
2015-02-13 14:40:36 -05:00
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
nam.Xoffset += int64(l) * n.Type.Width
|
|
|
|
|
nam.Type = n.Type
|
2020-11-22 12:09:08 -05:00
|
|
|
return nam
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
2020-11-22 12:09:08 -05:00
|
|
|
return nil
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
2019-03-25 12:35:42 -07:00
|
|
|
func (s *InitSchedule) initplan(n *Node) {
|
|
|
|
|
if s.initplans[n] != nil {
|
2015-02-13 14:40:36 -05:00
|
|
|
return
|
|
|
|
|
}
|
2015-02-23 16:07:24 -05:00
|
|
|
p := new(InitPlan)
|
2019-03-25 12:35:42 -07:00
|
|
|
s.initplans[n] = p
|
2015-02-13 14:40:36 -05:00
|
|
|
switch n.Op {
|
|
|
|
|
default:
|
2020-11-19 20:49:23 -05:00
|
|
|
base.Fatalf("initplan")
|
2015-02-13 14:40:36 -05:00
|
|
|
|
2016-06-19 07:20:28 -07:00
|
|
|
case OARRAYLIT, OSLICELIT:
|
2016-10-27 02:02:30 -07:00
|
|
|
var k int64
|
2016-03-08 15:10:26 -08:00
|
|
|
for _, a := range n.List.Slice() {
|
2016-10-27 02:02:30 -07:00
|
|
|
if a.Op == OKEY {
|
2018-11-28 14:34:45 -08:00
|
|
|
k = indexconst(a.Left)
|
|
|
|
|
if k < 0 {
|
2020-11-19 20:49:23 -05:00
|
|
|
base.Fatalf("initplan arraylit: invalid index %v", a.Left)
|
2018-11-28 14:34:45 -08:00
|
|
|
}
|
2016-10-27 02:02:30 -07:00
|
|
|
a = a.Right
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2019-03-25 12:35:42 -07:00
|
|
|
s.addvalue(p, k*n.Type.Elem().Width, a)
|
2016-10-27 02:02:30 -07:00
|
|
|
k++
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case OSTRUCTLIT:
|
2016-03-08 15:10:26 -08:00
|
|
|
for _, a := range n.List.Slice() {
|
2016-10-12 15:48:18 -07:00
|
|
|
if a.Op != OSTRUCTKEY {
|
2020-11-19 20:49:23 -05:00
|
|
|
base.Fatalf("initplan structlit")
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2019-04-24 16:59:34 -07:00
|
|
|
if a.Sym.IsBlank() {
|
|
|
|
|
continue
|
|
|
|
|
}
|
2019-03-25 12:35:42 -07:00
|
|
|
s.addvalue(p, a.Xoffset, a.Left)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case OMAPLIT:
|
2016-03-08 15:10:26 -08:00
|
|
|
for _, a := range n.List.Slice() {
|
2015-02-13 14:40:36 -05:00
|
|
|
if a.Op != OKEY {
|
2020-11-19 20:49:23 -05:00
|
|
|
base.Fatalf("initplan maplit")
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
2019-03-25 12:35:42 -07:00
|
|
|
s.addvalue(p, -1, a.Right)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-03-25 12:35:42 -07:00
|
|
|
func (s *InitSchedule) addvalue(p *InitPlan, xoffset int64, n *Node) {
|
2015-02-13 14:40:36 -05:00
|
|
|
// special case: zero can be dropped entirely
|
2018-04-08 13:39:10 +01:00
|
|
|
if isZero(n) {
|
2015-02-13 14:40:36 -05:00
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// special case: inline struct and array (not slice) literals
|
2015-02-17 22:13:49 -05:00
|
|
|
if isvaluelit(n) {
|
2019-03-25 12:35:42 -07:00
|
|
|
s.initplan(n)
|
|
|
|
|
q := s.initplans[n]
|
2015-09-08 22:22:44 +02:00
|
|
|
for _, qe := range q.E {
|
2016-03-13 17:48:17 -07:00
|
|
|
// qe is a copy; we are not modifying entries in q.E
|
|
|
|
|
qe.Xoffset += xoffset
|
|
|
|
|
p.E = append(p.E, qe)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// add to plan
|
2016-03-13 17:48:17 -07:00
|
|
|
p.E = append(p.E, InitEntry{Xoffset: xoffset, Expr: n})
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
2018-04-08 13:39:10 +01:00
|
|
|
func isZero(n *Node) bool {
|
2015-02-13 14:40:36 -05:00
|
|
|
switch n.Op {
|
2020-11-13 20:38:21 -08:00
|
|
|
case ONIL:
|
|
|
|
|
return true
|
|
|
|
|
|
2015-02-13 14:40:36 -05:00
|
|
|
case OLITERAL:
|
2020-11-13 23:36:48 -08:00
|
|
|
switch u := n.Val(); u.Kind() {
|
|
|
|
|
case constant.String:
|
|
|
|
|
return constant.StringVal(u) == ""
|
|
|
|
|
case constant.Bool:
|
|
|
|
|
return !constant.BoolVal(u)
|
2015-02-13 14:40:36 -05:00
|
|
|
default:
|
2020-11-13 23:36:48 -08:00
|
|
|
return constant.Sign(u) == 0
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
2016-10-12 15:48:18 -07:00
|
|
|
case OARRAYLIT:
|
2016-03-08 15:10:26 -08:00
|
|
|
for _, n1 := range n.List.Slice() {
|
2016-10-27 02:02:30 -07:00
|
|
|
if n1.Op == OKEY {
|
|
|
|
|
n1 = n1.Right
|
|
|
|
|
}
|
2018-04-08 13:39:10 +01:00
|
|
|
if !isZero(n1) {
|
2015-02-17 22:13:49 -05:00
|
|
|
return false
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
}
|
2015-02-17 22:13:49 -05:00
|
|
|
return true
|
2016-10-12 15:48:18 -07:00
|
|
|
|
|
|
|
|
case OSTRUCTLIT:
|
|
|
|
|
for _, n1 := range n.List.Slice() {
|
2018-04-08 13:39:10 +01:00
|
|
|
if !isZero(n1.Left) {
|
2016-10-12 15:48:18 -07:00
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return true
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
2015-02-17 22:13:49 -05:00
|
|
|
return false
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
2015-02-17 22:13:49 -05:00
|
|
|
func isvaluelit(n *Node) bool {
|
2016-06-19 07:20:28 -07:00
|
|
|
return n.Op == OARRAYLIT || n.Op == OSTRUCTLIT
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
2017-02-15 21:16:49 -08:00
|
|
|
func genAsStatic(as *Node) {
|
2017-03-09 22:41:32 -08:00
|
|
|
if as.Left.Type == nil {
|
2020-11-19 20:49:23 -05:00
|
|
|
base.Fatalf("genAsStatic as.Left not typechecked")
|
2017-03-09 22:41:32 -08:00
|
|
|
}
|
|
|
|
|
|
2020-11-22 12:09:08 -05:00
|
|
|
nam := stataddr(as.Left)
|
|
|
|
|
if nam == nil || (nam.Class() != PEXTERN && as.Left != nblank) {
|
2020-11-19 20:49:23 -05:00
|
|
|
base.Fatalf("genAsStatic: lhs %v", as.Left)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
cmd/compile: avoid giant init functions due to many user inits
We generate code that calls each user init function one at a time.
When there are lots of user init functions,
usually due to generated code, like test/rotate* or
github.com/juju/govmomi/vim25/types,
we can end up with a giant function,
which can be slow to compile.
This CL puts in an escape valve.
When there are more than 500 functions, instead of doing:
init.0()
init.1()
// ...
we construct a static array of functions:
var fns = [...]func(){init.0, init.1, ... }
and call them in a loop.
This generates marginally bigger, marginally worse code,
so we restrict it to cases in which it might start to matter.
500 was selected as a mostly arbitrary threshold for "lots".
Each call uses two Progs, one for PCDATA and one for the call,
so at 500 calls we use ~1000 Progs.
At concurrency==8, we get a Prog cache of about
1000 Progs per worker.
So a threshold of 500 should more or less avoid
exhausting the Prog cache in most cases.
Change-Id: I276b887173ddbf65b2164ec9f9b5eb04d8c753c2
Reviewed-on: https://go-review.googlesource.com/41500
Reviewed-by: Keith Randall <khr@golang.org>
2017-04-23 17:31:15 -07:00
|
|
|
switch {
|
|
|
|
|
case as.Right.Op == OLITERAL:
|
2020-11-22 12:09:08 -05:00
|
|
|
litsym(nam, as.Right, int(as.Right.Type.Width))
|
2020-11-18 11:25:29 -05:00
|
|
|
case (as.Right.Op == ONAME || as.Right.Op == OMETHEXPR) && as.Right.Class() == PFUNC:
|
2020-11-22 12:09:08 -05:00
|
|
|
pfuncsym(nam, as.Right)
|
cmd/compile: avoid giant init functions due to many user inits
We generate code that calls each user init function one at a time.
When there are lots of user init functions,
usually due to generated code, like test/rotate* or
github.com/juju/govmomi/vim25/types,
we can end up with a giant function,
which can be slow to compile.
This CL puts in an escape valve.
When there are more than 500 functions, instead of doing:
init.0()
init.1()
// ...
we construct a static array of functions:
var fns = [...]func(){init.0, init.1, ... }
and call them in a loop.
This generates marginally bigger, marginally worse code,
so we restrict it to cases in which it might start to matter.
500 was selected as a mostly arbitrary threshold for "lots".
Each call uses two Progs, one for PCDATA and one for the call,
so at 500 calls we use ~1000 Progs.
At concurrency==8, we get a Prog cache of about
1000 Progs per worker.
So a threshold of 500 should more or less avoid
exhausting the Prog cache in most cases.
Change-Id: I276b887173ddbf65b2164ec9f9b5eb04d8c753c2
Reviewed-on: https://go-review.googlesource.com/41500
Reviewed-by: Keith Randall <khr@golang.org>
2017-04-23 17:31:15 -07:00
|
|
|
default:
|
2020-11-19 20:49:23 -05:00
|
|
|
base.Fatalf("genAsStatic: rhs %v", as.Right)
|
2015-02-13 14:40:36 -05:00
|
|
|
}
|
|
|
|
|
}
|