2015-04-15 15:51:25 -07:00
|
|
|
// Copyright 2015 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package gc
|
|
|
|
|
|
|
|
import (
|
2015-08-10 12:15:52 -07:00
|
|
|
"bytes"
|
2016-10-18 23:50:40 +02:00
|
|
|
"encoding/binary"
|
2015-06-12 11:01:13 -07:00
|
|
|
"fmt"
|
2015-08-10 12:15:52 -07:00
|
|
|
"html"
|
2015-07-22 13:13:53 -07:00
|
|
|
"os"
|
2016-11-01 15:28:10 -07:00
|
|
|
"sort"
|
2015-04-15 15:51:25 -07:00
|
|
|
|
2015-05-28 13:49:20 -07:00
|
|
|
"cmd/compile/internal/ssa"
|
2015-05-12 11:06:44 -07:00
|
|
|
"cmd/internal/obj"
|
2016-12-06 17:08:06 -08:00
|
|
|
"cmd/internal/src"
|
2016-04-06 12:01:40 -07:00
|
|
|
"cmd/internal/sys"
|
2015-04-15 15:51:25 -07:00
|
|
|
)
|
|
|
|
|
2016-01-28 13:46:30 -08:00
|
|
|
var ssaConfig *ssa.Config
|
cmd/compile: rearrange fields between ssa.Func, ssa.Cache, and ssa.Config
This makes ssa.Func, ssa.Cache, and ssa.Config fulfill
the roles laid out for them in CL 38160.
The only non-trivial change in this CL is how cached
values and blocks get IDs. Prior to this CL, their IDs were
assigned as part of resetting the cache, and only modified
IDs were reset. This required knowing how many values and
blocks were modified, which required a tight coupling between
ssa.Func and ssa.Config. To eliminate that coupling,
we now zero values and blocks during reset,
and assign their IDs when they are used.
Since unused values and blocks have ID == 0,
we can efficiently find the last used value/block,
to avoid zeroing everything.
Bulk zeroing is efficient, but not efficient enough
to obviate the need to avoid zeroing everything every time.
As a happy side-effect, ssa.Func.Free is no longer necessary.
DebugHashMatch and friends now belong in func.go.
They have been left in place for clarity and review.
I will move them in a subsequent CL.
Passes toolstash -cmp. No compiler performance impact.
No change in 'go test cmd/compile/internal/ssa' execution time.
Change-Id: I2eb7af58da067ef6a36e815a6f386cfe8634d098
Reviewed-on: https://go-review.googlesource.com/38167
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2017-03-15 11:15:13 -07:00
|
|
|
var ssaCache *ssa.Cache
|
2016-01-28 13:46:30 -08:00
|
|
|
|
cmd/compile: rearrange fields between ssa.Func, ssa.Cache, and ssa.Config
This makes ssa.Func, ssa.Cache, and ssa.Config fulfill
the roles laid out for them in CL 38160.
The only non-trivial change in this CL is how cached
values and blocks get IDs. Prior to this CL, their IDs were
assigned as part of resetting the cache, and only modified
IDs were reset. This required knowing how many values and
blocks were modified, which required a tight coupling between
ssa.Func and ssa.Config. To eliminate that coupling,
we now zero values and blocks during reset,
and assign their IDs when they are used.
Since unused values and blocks have ID == 0,
we can efficiently find the last used value/block,
to avoid zeroing everything.
Bulk zeroing is efficient, but not efficient enough
to obviate the need to avoid zeroing everything every time.
As a happy side-effect, ssa.Func.Free is no longer necessary.
DebugHashMatch and friends now belong in func.go.
They have been left in place for clarity and review.
I will move them in a subsequent CL.
Passes toolstash -cmp. No compiler performance impact.
No change in 'go test cmd/compile/internal/ssa' execution time.
Change-Id: I2eb7af58da067ef6a36e815a6f386cfe8634d098
Reviewed-on: https://go-review.googlesource.com/38167
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2017-03-15 11:15:13 -07:00
|
|
|
func initssaconfig() {
|
2017-03-17 16:04:46 -07:00
|
|
|
types := ssa.Types{
|
2017-03-18 10:16:03 -07:00
|
|
|
Bool: Types[TBOOL],
|
|
|
|
Int8: Types[TINT8],
|
|
|
|
Int16: Types[TINT16],
|
|
|
|
Int32: Types[TINT32],
|
|
|
|
Int64: Types[TINT64],
|
|
|
|
UInt8: Types[TUINT8],
|
|
|
|
UInt16: Types[TUINT16],
|
|
|
|
UInt32: Types[TUINT32],
|
|
|
|
UInt64: Types[TUINT64],
|
|
|
|
Float32: Types[TFLOAT32],
|
|
|
|
Float64: Types[TFLOAT64],
|
|
|
|
Int: Types[TINT],
|
|
|
|
Uintptr: Types[TUINTPTR],
|
|
|
|
String: Types[TSTRING],
|
2017-03-19 09:51:22 +01:00
|
|
|
BytePtr: typPtr(Types[TUINT8]),
|
|
|
|
Int32Ptr: typPtr(Types[TINT32]),
|
|
|
|
UInt32Ptr: typPtr(Types[TUINT32]),
|
|
|
|
IntPtr: typPtr(Types[TINT]),
|
|
|
|
UintptrPtr: typPtr(Types[TUINTPTR]),
|
|
|
|
Float32Ptr: typPtr(Types[TFLOAT32]),
|
|
|
|
Float64Ptr: typPtr(Types[TFLOAT64]),
|
|
|
|
BytePtrPtr: typPtr(typPtr(Types[TUINT8])),
|
2017-03-17 16:04:46 -07:00
|
|
|
}
|
2017-03-18 08:11:49 -07:00
|
|
|
// Generate a few pointer types that are uncommon in the frontend but common in the backend.
|
|
|
|
// Caching is disabled in the backend, so generating these here avoids allocations.
|
|
|
|
_ = typPtr(Types[TINTER]) // *interface{}
|
|
|
|
_ = typPtr(typPtr(Types[TSTRING])) // **string
|
|
|
|
_ = typPtr(typPtr(idealstring)) // **string
|
|
|
|
_ = typPtr(typSlice(Types[TINTER])) // *[]interface{}
|
|
|
|
_ = typPtr(typPtr(bytetype)) // **byte
|
|
|
|
_ = typPtr(typSlice(bytetype)) // *[]byte
|
|
|
|
_ = typPtr(typSlice(Types[TSTRING])) // *[]string
|
|
|
|
_ = typPtr(typSlice(idealstring)) // *[]string
|
|
|
|
_ = typPtr(typPtr(typPtr(Types[TUINT8]))) // ***uint8
|
|
|
|
_ = typPtr(Types[TINT16]) // *int16
|
|
|
|
_ = typPtr(Types[TINT64]) // *int64
|
|
|
|
_ = typPtr(errortype) // *error
|
|
|
|
typPtrCacheEnabled = false
|
2017-03-17 16:04:46 -07:00
|
|
|
ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, types, Ctxt, Debug['N'] == 0)
|
2017-03-17 13:35:36 -07:00
|
|
|
if thearch.LinkArch.Name == "386" {
|
|
|
|
ssaConfig.Set387(thearch.Use387)
|
[dev.ssa] cmd/compile: enhance command line option processing for SSA
The -d compiler flag can also specify ssa phase and flag,
for example -d=ssa/generic_cse/time,ssa/generic_cse/stats
Spaces in the phase names can be specified with an
underscore. Flags currently parsed (not necessarily
recognized by the phases yet) are:
on, off, mem, time, debug, stats, and test
On, off and time are handled in the harness,
debug, stats, and test are interpreted by the phase itself.
The pass is now attached to the Func being compiled, and a
new method logStats(key, ...value) on *Func to encourage a
semi-standardized format for that output. Output fields
are separated by tabs to ease digestion by awk and
spreadsheets. For example,
if f.pass.stats > 0 {
f.logStat("CSE REWRITES", rewrites)
}
Change-Id: I16db2b5af64c50ca9a47efeb51d961147a903abc
Reviewed-on: https://go-review.googlesource.com/19885
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Todd Neal <todd@tneal.org>
2016-02-25 13:10:51 -05:00
|
|
|
}
|
cmd/compile: rearrange fields between ssa.Func, ssa.Cache, and ssa.Config
This makes ssa.Func, ssa.Cache, and ssa.Config fulfill
the roles laid out for them in CL 38160.
The only non-trivial change in this CL is how cached
values and blocks get IDs. Prior to this CL, their IDs were
assigned as part of resetting the cache, and only modified
IDs were reset. This required knowing how many values and
blocks were modified, which required a tight coupling between
ssa.Func and ssa.Config. To eliminate that coupling,
we now zero values and blocks during reset,
and assign their IDs when they are used.
Since unused values and blocks have ID == 0,
we can efficiently find the last used value/block,
to avoid zeroing everything.
Bulk zeroing is efficient, but not efficient enough
to obviate the need to avoid zeroing everything every time.
As a happy side-effect, ssa.Func.Free is no longer necessary.
DebugHashMatch and friends now belong in func.go.
They have been left in place for clarity and review.
I will move them in a subsequent CL.
Passes toolstash -cmp. No compiler performance impact.
No change in 'go test cmd/compile/internal/ssa' execution time.
Change-Id: I2eb7af58da067ef6a36e815a6f386cfe8634d098
Reviewed-on: https://go-review.googlesource.com/38167
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2017-03-15 11:15:13 -07:00
|
|
|
ssaCache = new(ssa.Cache)
|
[dev.ssa] cmd/compile: enhance command line option processing for SSA
The -d compiler flag can also specify ssa phase and flag,
for example -d=ssa/generic_cse/time,ssa/generic_cse/stats
Spaces in the phase names can be specified with an
underscore. Flags currently parsed (not necessarily
recognized by the phases yet) are:
on, off, mem, time, debug, stats, and test
On, off and time are handled in the harness,
debug, stats, and test are interpreted by the phase itself.
The pass is now attached to the Func being compiled, and a
new method logStats(key, ...value) on *Func to encourage a
semi-standardized format for that output. Output fields
are separated by tabs to ease digestion by awk and
spreadsheets. For example,
if f.pass.stats > 0 {
f.logStat("CSE REWRITES", rewrites)
}
Change-Id: I16db2b5af64c50ca9a47efeb51d961147a903abc
Reviewed-on: https://go-review.googlesource.com/19885
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Todd Neal <todd@tneal.org>
2016-02-25 13:10:51 -05:00
|
|
|
}
|
|
|
|
|
2015-12-11 20:41:52 -08:00
|
|
|
// buildssa builds an SSA function.
|
|
|
|
func buildssa(fn *Node) *ssa.Func {
|
|
|
|
name := fn.Func.Nname.Sym.Name
|
2016-03-01 13:47:48 -08:00
|
|
|
printssa := name == os.Getenv("GOSSAFUNC")
|
2015-12-11 20:41:52 -08:00
|
|
|
if printssa {
|
2015-07-20 18:42:45 -07:00
|
|
|
fmt.Println("generating SSA for", name)
|
2016-03-04 13:16:48 -08:00
|
|
|
dumplist("buildssa-enter", fn.Func.Enter)
|
|
|
|
dumplist("buildssa-body", fn.Nbody)
|
|
|
|
dumplist("buildssa-exit", fn.Func.Exit)
|
2015-06-12 11:01:13 -07:00
|
|
|
}
|
2015-04-15 15:51:25 -07:00
|
|
|
|
2015-05-18 16:44:20 -07:00
|
|
|
var s state
|
2016-12-07 17:40:46 -08:00
|
|
|
s.pushLine(fn.Pos)
|
2015-05-30 01:03:06 -04:00
|
|
|
defer s.popLine()
|
|
|
|
|
2017-03-15 22:55:21 -07:00
|
|
|
s.hasdefer = fn.Func.HasDefer()
|
2016-02-27 17:49:31 -08:00
|
|
|
if fn.Func.Pragma&CgoUnsafeArgs != 0 {
|
|
|
|
s.cgoUnsafeArgs = true
|
|
|
|
}
|
2015-06-12 11:01:13 -07:00
|
|
|
|
2017-03-17 07:49:22 -07:00
|
|
|
fe := ssafn{
|
|
|
|
curfn: fn,
|
|
|
|
log: printssa,
|
|
|
|
}
|
[dev.ssa] cmd/compile: enhance command line option processing for SSA
The -d compiler flag can also specify ssa phase and flag,
for example -d=ssa/generic_cse/time,ssa/generic_cse/stats
Spaces in the phase names can be specified with an
underscore. Flags currently parsed (not necessarily
recognized by the phases yet) are:
on, off, mem, time, debug, stats, and test
On, off and time are handled in the harness,
debug, stats, and test are interpreted by the phase itself.
The pass is now attached to the Func being compiled, and a
new method logStats(key, ...value) on *Func to encourage a
semi-standardized format for that output. Output fields
are separated by tabs to ease digestion by awk and
spreadsheets. For example,
if f.pass.stats > 0 {
f.logStat("CSE REWRITES", rewrites)
}
Change-Id: I16db2b5af64c50ca9a47efeb51d961147a903abc
Reviewed-on: https://go-review.googlesource.com/19885
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Todd Neal <todd@tneal.org>
2016-02-25 13:10:51 -05:00
|
|
|
|
2017-03-17 07:49:22 -07:00
|
|
|
s.f = ssa.NewFunc(&fe)
|
cmd/compile: rearrange fields between ssa.Func, ssa.Cache, and ssa.Config
This makes ssa.Func, ssa.Cache, and ssa.Config fulfill
the roles laid out for them in CL 38160.
The only non-trivial change in this CL is how cached
values and blocks get IDs. Prior to this CL, their IDs were
assigned as part of resetting the cache, and only modified
IDs were reset. This required knowing how many values and
blocks were modified, which required a tight coupling between
ssa.Func and ssa.Config. To eliminate that coupling,
we now zero values and blocks during reset,
and assign their IDs when they are used.
Since unused values and blocks have ID == 0,
we can efficiently find the last used value/block,
to avoid zeroing everything.
Bulk zeroing is efficient, but not efficient enough
to obviate the need to avoid zeroing everything every time.
As a happy side-effect, ssa.Func.Free is no longer necessary.
DebugHashMatch and friends now belong in func.go.
They have been left in place for clarity and review.
I will move them in a subsequent CL.
Passes toolstash -cmp. No compiler performance impact.
No change in 'go test cmd/compile/internal/ssa' execution time.
Change-Id: I2eb7af58da067ef6a36e815a6f386cfe8634d098
Reviewed-on: https://go-review.googlesource.com/38167
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2017-03-15 11:15:13 -07:00
|
|
|
s.config = ssaConfig
|
|
|
|
s.f.Config = ssaConfig
|
|
|
|
s.f.Cache = ssaCache
|
|
|
|
s.f.Cache.Reset()
|
|
|
|
s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH", name)
|
2015-06-12 11:01:13 -07:00
|
|
|
s.f.Name = name
|
2016-11-10 16:03:47 -05:00
|
|
|
if fn.Func.Pragma&Nosplit != 0 {
|
|
|
|
s.f.NoSplit = true
|
|
|
|
}
|
2017-02-10 10:15:10 -05:00
|
|
|
if fn.Func.Pragma&Nowritebarrier != 0 {
|
|
|
|
s.f.NoWB = true
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
if s.f.WBPos.IsKnown() {
|
|
|
|
fn.Func.WBPos = s.f.WBPos
|
|
|
|
}
|
|
|
|
}()
|
2015-10-08 12:39:56 -04:00
|
|
|
s.exitCode = fn.Func.Exit
|
2015-11-09 21:35:40 -08:00
|
|
|
s.panics = map[funcLine]*ssa.Block{}
|
2015-06-12 11:01:13 -07:00
|
|
|
|
2015-08-10 12:15:52 -07:00
|
|
|
if name == os.Getenv("GOSSAFUNC") {
|
2017-03-16 22:42:10 -07:00
|
|
|
s.f.HTMLWriter = ssa.NewHTMLWriter("ssa.html", s.f.Frontend(), name)
|
2015-08-10 12:15:52 -07:00
|
|
|
// TODO: generate and print a mapping from nodes to values and blocks
|
|
|
|
}
|
|
|
|
|
2015-04-15 15:51:25 -07:00
|
|
|
// Allocate starting block
|
|
|
|
s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
|
2015-05-18 16:44:20 -07:00
|
|
|
// Allocate starting values
|
2015-07-20 15:39:14 -07:00
|
|
|
s.labels = map[string]*ssaLabel{}
|
|
|
|
s.labeledNodes = map[*Node]*ssaLabel{}
|
2016-09-30 10:12:32 -07:00
|
|
|
s.fwdVars = map[*Node]*ssa.Value{}
|
2015-11-02 08:10:26 -08:00
|
|
|
s.startmem = s.entryNewValue0(ssa.OpInitMem, ssa.TypeMem)
|
2015-07-30 11:03:05 -07:00
|
|
|
s.sp = s.entryNewValue0(ssa.OpSP, Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
|
|
|
|
s.sb = s.entryNewValue0(ssa.OpSB, Types[TUINTPTR])
|
2015-04-15 15:51:25 -07:00
|
|
|
|
2015-09-11 16:40:05 -04:00
|
|
|
s.startBlock(s.f.Entry)
|
|
|
|
s.vars[&memVar] = s.startmem
|
|
|
|
|
2015-10-15 20:25:32 -05:00
|
|
|
s.varsyms = map[*Node]interface{}{}
|
|
|
|
|
2015-06-19 21:02:28 -07:00
|
|
|
// Generate addresses of local declarations
|
|
|
|
s.decladdrs = map[*Node]*ssa.Value{}
|
2016-02-29 13:31:48 -08:00
|
|
|
for _, n := range fn.Func.Dcl {
|
2015-06-19 21:02:28 -07:00
|
|
|
switch n.Class {
|
2016-02-27 17:49:31 -08:00
|
|
|
case PPARAM, PPARAMOUT:
|
2015-10-15 20:25:32 -05:00
|
|
|
aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n})
|
2017-03-19 09:51:22 +01:00
|
|
|
s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, typPtr(n.Type), aux, s.sp)
|
2016-02-27 17:49:31 -08:00
|
|
|
if n.Class == PPARAMOUT && s.canSSA(n) {
|
|
|
|
// Save ssa-able PPARAMOUT variables so we can
|
|
|
|
// store them back to the stack at the end of
|
|
|
|
// the function.
|
|
|
|
s.returns = append(s.returns, n)
|
|
|
|
}
|
|
|
|
case PAUTO:
|
2015-08-24 02:16:19 -07:00
|
|
|
// processed at each use, to prevent Addr coming
|
|
|
|
// before the decl.
|
cmd/compile: fix liveness computation for heap-escaped parameters
The liveness computation of parameters generally was never
correct, but forcing all parameters to be live throughout the
function covered up that problem. The new SSA back end is
too clever: even though it currently keeps the parameter values live
throughout the function, it may find optimizations that mean
the current values are not written back to the original parameter
stack slots immediately or ever (for example if a parameter is set
to nil, SSA constant propagation may replace all later uses of the
parameter with a constant nil, eliminating the need to write the nil
value back to the stack slot), so the liveness code must now
track the actual operations on the stack slots, exposing these
problems.
One small problem in the handling of arguments is that nodarg
can return ONAME PPARAM nodes with adjusted offsets, so that
there are actually multiple *Node pointers for the same parameter
in the instruction stream. This might be possible to correct, but
not in this CL. For now, we fix this by using n.Orig instead of n
when considering PPARAM and PPARAMOUT nodes.
The major problem in the handling of arguments is general
confusion in the liveness code about the meaning of PPARAM|PHEAP
and PPARAMOUT|PHEAP nodes, especially as contrasted with PAUTO|PHEAP.
The difference between these two is that when a local variable "moves"
to the heap, it's really just allocated there to start with; in contrast,
when an argument moves to the heap, the actual data has to be copied
there from the stack at the beginning of the function, and when a
result "moves" to the heap the value in the heap has to be copied
back to the stack when the function returns
This general confusion is also present in the SSA back end.
The PHEAP bit worked decently when I first introduced it 7 years ago (!)
in 391425ae. The back end did nothing sophisticated, and in particular
there was no analysis at all: no escape analysis, no liveness analysis,
and certainly no SSA back end. But the complications caused in the
various downstream consumers suggest that this should be a detail
kept mainly in the front end.
This CL therefore eliminates both the PHEAP bit and even the idea of
"heap variables" from the back ends.
First, it replaces the PPARAM|PHEAP, PPARAMOUT|PHEAP, and PAUTO|PHEAP
variable classes with the single PAUTOHEAP, a pseudo-class indicating
a variable maintained on the heap and available by indirecting a
local variable kept on the stack (a plain PAUTO).
Second, walkexpr replaces all references to PAUTOHEAP variables
with indirections of the corresponding PAUTO variable.
The back ends and the liveness code now just see plain indirected
variables. This may actually produce better code, but the real goal
here is to eliminate these little-used and somewhat suspect code
paths in the back end analyses.
The OPARAM node type goes away too.
A followup CL will do the same to PPARAMREF. I'm not sure that
the back ends (SSA in particular) are handling those right either,
and with the framework established in this CL that change is trivial
and the result clearly more correct.
Fixes #15747.
Change-Id: I2770b1ce3cbc93981bfc7166be66a9da12013d74
Reviewed-on: https://go-review.googlesource.com/23393
Reviewed-by: Keith Randall <khr@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
2016-05-25 01:33:24 -04:00
|
|
|
case PAUTOHEAP:
|
|
|
|
// moved to heap - already handled by frontend
|
2015-09-06 13:42:26 -07:00
|
|
|
case PFUNC:
|
|
|
|
// local function - already handled by frontend
|
2015-07-01 20:37:25 +01:00
|
|
|
default:
|
2016-09-14 10:01:05 -07:00
|
|
|
s.Fatalf("local variable with class %s unimplemented", classnames[n.Class])
|
2015-06-19 21:02:28 -07:00
|
|
|
}
|
|
|
|
}
|
2015-04-15 15:51:25 -07:00
|
|
|
|
2017-02-15 13:12:53 -08:00
|
|
|
// Populate SSAable arguments.
|
2016-09-30 10:12:32 -07:00
|
|
|
for _, n := range fn.Func.Dcl {
|
2017-02-15 13:12:53 -08:00
|
|
|
if n.Class == PPARAM && s.canSSA(n) {
|
|
|
|
s.vars[n] = s.newValue0A(ssa.OpArg, n.Type, n)
|
2016-09-30 10:12:32 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-15 15:51:25 -07:00
|
|
|
// Convert the AST-based IR to the SSA-based IR
|
2016-10-04 09:49:33 -07:00
|
|
|
s.stmtList(fn.Func.Enter)
|
|
|
|
s.stmtList(fn.Nbody)
|
2015-04-15 15:51:25 -07:00
|
|
|
|
2015-05-18 16:44:20 -07:00
|
|
|
// fallthrough to exit
|
2015-09-08 16:04:37 -07:00
|
|
|
if s.curBlock != nil {
|
2016-03-09 19:27:57 -08:00
|
|
|
s.pushLine(fn.Func.Endlineno)
|
|
|
|
s.exit()
|
|
|
|
s.popLine()
|
2015-05-18 16:44:20 -07:00
|
|
|
}
|
|
|
|
|
2015-07-20 15:39:14 -07:00
|
|
|
if nerrors > 0 {
|
2015-12-11 20:41:52 -08:00
|
|
|
return nil
|
2015-07-20 15:39:14 -07:00
|
|
|
}
|
|
|
|
|
2016-09-30 10:12:32 -07:00
|
|
|
s.insertPhis()
|
2015-04-15 15:51:25 -07:00
|
|
|
|
2015-10-08 12:39:56 -04:00
|
|
|
// Don't carry reference this around longer than necessary
|
2016-02-29 13:31:48 -08:00
|
|
|
s.exitCode = Nodes{}
|
2015-10-08 12:39:56 -04:00
|
|
|
|
2015-07-17 16:47:43 +00:00
|
|
|
// Main call to ssa package to compile function
|
|
|
|
ssa.Compile(s.f)
|
2017-02-10 10:15:10 -05:00
|
|
|
if nerrors > 0 {
|
|
|
|
return nil
|
|
|
|
}
|
2015-07-17 16:47:43 +00:00
|
|
|
|
2015-12-11 20:41:52 -08:00
|
|
|
return s.f
|
2015-04-15 15:51:25 -07:00
|
|
|
}
|
|
|
|
|
2015-05-18 16:44:20 -07:00
|
|
|
type state struct {
|
2015-04-15 15:51:25 -07:00
|
|
|
// configuration (arch) information
|
|
|
|
config *ssa.Config
|
|
|
|
|
|
|
|
// function we're building
|
|
|
|
f *ssa.Func
|
|
|
|
|
2017-02-02 11:53:41 -05:00
|
|
|
// labels and labeled control flow nodes (OFOR, OFORUNTIL, OSWITCH, OSELECT) in f
|
2015-07-20 15:39:14 -07:00
|
|
|
labels map[string]*ssaLabel
|
|
|
|
labeledNodes map[*Node]*ssaLabel
|
|
|
|
|
2015-10-08 12:39:56 -04:00
|
|
|
// Code that must precede any return
|
|
|
|
// (e.g., copying value of heap-escaped paramout back to true paramout)
|
2016-02-29 13:31:48 -08:00
|
|
|
exitCode Nodes
|
2015-07-20 15:39:14 -07:00
|
|
|
|
|
|
|
// unlabeled break and continue statement tracking
|
|
|
|
breakTo *ssa.Block // current target for plain break statement
|
|
|
|
continueTo *ssa.Block // current target for plain continue statement
|
2015-04-15 15:51:25 -07:00
|
|
|
|
|
|
|
// current location where we're interpreting the AST
|
|
|
|
curBlock *ssa.Block
|
|
|
|
|
2015-06-19 21:02:28 -07:00
|
|
|
// variable assignments in the current block (map from variable symbol to ssa value)
|
|
|
|
// *Node is the unique identifier (an ONAME Node) for the variable.
|
2016-09-30 10:12:32 -07:00
|
|
|
// TODO: keep a single varnum map, then make all of these maps slices instead?
|
2015-06-19 21:02:28 -07:00
|
|
|
vars map[*Node]*ssa.Value
|
2015-04-15 15:51:25 -07:00
|
|
|
|
2016-09-30 10:12:32 -07:00
|
|
|
// fwdVars are variables that are used before they are defined in the current block.
|
|
|
|
// This map exists just to coalesce multiple references into a single FwdRef op.
|
|
|
|
// *Node is the unique identifier (an ONAME Node) for the variable.
|
|
|
|
fwdVars map[*Node]*ssa.Value
|
|
|
|
|
2016-03-01 23:21:55 +00:00
|
|
|
// all defined variables at the end of each block. Indexed by block ID.
|
2015-06-19 21:02:28 -07:00
|
|
|
defvars []map[*Node]*ssa.Value
|
2015-04-15 15:51:25 -07:00
|
|
|
|
2015-08-24 02:16:19 -07:00
|
|
|
// addresses of PPARAM and PPARAMOUT variables.
|
2015-06-19 21:02:28 -07:00
|
|
|
decladdrs map[*Node]*ssa.Value
|
2015-05-18 16:44:20 -07:00
|
|
|
|
2015-10-15 20:25:32 -05:00
|
|
|
// symbols for PEXTERN, PAUTO and PPARAMOUT variables so they can be reused.
|
|
|
|
varsyms map[*Node]interface{}
|
|
|
|
|
2016-03-01 23:21:55 +00:00
|
|
|
// starting values. Memory, stack pointer, and globals pointer
|
2015-05-18 16:44:20 -07:00
|
|
|
startmem *ssa.Value
|
|
|
|
sp *ssa.Value
|
2015-06-19 21:02:28 -07:00
|
|
|
sb *ssa.Value
|
2015-05-30 01:03:06 -04:00
|
|
|
|
2016-03-01 23:21:55 +00:00
|
|
|
// line number stack. The current line number is top of stack
|
2016-12-15 17:17:01 -08:00
|
|
|
line []src.XPos
|
2015-11-09 21:35:40 -08:00
|
|
|
|
|
|
|
// list of panic calls by function name and line number.
|
|
|
|
// Used to deduplicate panic calls.
|
|
|
|
panics map[funcLine]*ssa.Block
|
2016-01-14 16:02:23 -08:00
|
|
|
|
cmd/compile: fix liveness computation for heap-escaped parameters
The liveness computation of parameters generally was never
correct, but forcing all parameters to be live throughout the
function covered up that problem. The new SSA back end is
too clever: even though it currently keeps the parameter values live
throughout the function, it may find optimizations that mean
the current values are not written back to the original parameter
stack slots immediately or ever (for example if a parameter is set
to nil, SSA constant propagation may replace all later uses of the
parameter with a constant nil, eliminating the need to write the nil
value back to the stack slot), so the liveness code must now
track the actual operations on the stack slots, exposing these
problems.
One small problem in the handling of arguments is that nodarg
can return ONAME PPARAM nodes with adjusted offsets, so that
there are actually multiple *Node pointers for the same parameter
in the instruction stream. This might be possible to correct, but
not in this CL. For now, we fix this by using n.Orig instead of n
when considering PPARAM and PPARAMOUT nodes.
The major problem in the handling of arguments is general
confusion in the liveness code about the meaning of PPARAM|PHEAP
and PPARAMOUT|PHEAP nodes, especially as contrasted with PAUTO|PHEAP.
The difference between these two is that when a local variable "moves"
to the heap, it's really just allocated there to start with; in contrast,
when an argument moves to the heap, the actual data has to be copied
there from the stack at the beginning of the function, and when a
result "moves" to the heap the value in the heap has to be copied
back to the stack when the function returns
This general confusion is also present in the SSA back end.
The PHEAP bit worked decently when I first introduced it 7 years ago (!)
in 391425ae. The back end did nothing sophisticated, and in particular
there was no analysis at all: no escape analysis, no liveness analysis,
and certainly no SSA back end. But the complications caused in the
various downstream consumers suggest that this should be a detail
kept mainly in the front end.
This CL therefore eliminates both the PHEAP bit and even the idea of
"heap variables" from the back ends.
First, it replaces the PPARAM|PHEAP, PPARAMOUT|PHEAP, and PAUTO|PHEAP
variable classes with the single PAUTOHEAP, a pseudo-class indicating
a variable maintained on the heap and available by indirecting a
local variable kept on the stack (a plain PAUTO).
Second, walkexpr replaces all references to PAUTOHEAP variables
with indirections of the corresponding PAUTO variable.
The back ends and the liveness code now just see plain indirected
variables. This may actually produce better code, but the real goal
here is to eliminate these little-used and somewhat suspect code
paths in the back end analyses.
The OPARAM node type goes away too.
A followup CL will do the same to PPARAMREF. I'm not sure that
the back ends (SSA in particular) are handling those right either,
and with the framework established in this CL that change is trivial
and the result clearly more correct.
Fixes #15747.
Change-Id: I2770b1ce3cbc93981bfc7166be66a9da12013d74
Reviewed-on: https://go-review.googlesource.com/23393
Reviewed-by: Keith Randall <khr@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
2016-05-25 01:33:24 -04:00
|
|
|
// list of PPARAMOUT (return) variables.
|
2016-02-27 17:49:31 -08:00
|
|
|
returns []*Node
|
|
|
|
|
2016-09-30 10:12:32 -07:00
|
|
|
// A dummy value used during phi construction.
|
|
|
|
placeholder *ssa.Value
|
|
|
|
|
2016-02-27 17:49:31 -08:00
|
|
|
cgoUnsafeArgs bool
|
2017-03-15 22:55:21 -07:00
|
|
|
hasdefer bool // whether the function contains a defer statement
|
2015-11-09 21:35:40 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
type funcLine struct {
|
2017-02-06 13:40:19 -08:00
|
|
|
f *obj.LSym
|
2016-12-15 17:17:01 -08:00
|
|
|
line src.XPos
|
2015-04-15 15:51:25 -07:00
|
|
|
}
|
|
|
|
|
2015-07-20 15:39:14 -07:00
|
|
|
type ssaLabel struct {
|
|
|
|
target *ssa.Block // block identified by this label
|
|
|
|
breakTarget *ssa.Block // block to break to in control flow node identified by this label
|
|
|
|
continueTarget *ssa.Block // block to continue to in control flow node identified by this label
|
|
|
|
}
|
|
|
|
|
|
|
|
// label returns the label associated with sym, creating it if necessary.
|
|
|
|
func (s *state) label(sym *Sym) *ssaLabel {
|
|
|
|
lab := s.labels[sym.Name]
|
|
|
|
if lab == nil {
|
|
|
|
lab = new(ssaLabel)
|
|
|
|
s.labels[sym.Name] = lab
|
|
|
|
}
|
|
|
|
return lab
|
|
|
|
}
|
|
|
|
|
2017-03-16 22:42:10 -07:00
|
|
|
func (s *state) Logf(msg string, args ...interface{}) { s.f.Logf(msg, args...) }
|
|
|
|
func (s *state) Log() bool { return s.f.Log() }
|
|
|
|
func (s *state) Fatalf(msg string, args ...interface{}) {
|
|
|
|
s.f.Frontend().Fatalf(s.peekPos(), msg, args...)
|
2016-12-06 17:08:06 -08:00
|
|
|
}
|
2017-03-16 22:42:10 -07:00
|
|
|
func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) }
|
|
|
|
func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() }
|
2015-06-12 11:01:13 -07:00
|
|
|
|
2015-09-17 10:31:16 -07:00
|
|
|
var (
|
|
|
|
// dummy node for the memory variable
|
2015-10-22 14:22:38 -07:00
|
|
|
memVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "mem"}}
|
2015-09-17 10:31:16 -07:00
|
|
|
|
|
|
|
// dummy nodes for temporary variables
|
cmd/compile: avoid a spill in append fast path
Instead of spilling newlen, recalculate it.
This removes a spill from the fast path,
at the cost of a cheap recalculation
on the (rare) growth path.
This uses 8 bytes less of stack space.
It generates two more bytes of code,
but that is due to suboptimal register allocation;
see far below.
Runtime append microbenchmarks are all over the map,
presumably due to incidental code movement.
Sample code:
func s(b []byte) []byte {
b = append(b, 1, 2, 3)
return b
}
Before:
"".s t=1 size=160 args=0x30 locals=0x48
0x0000 00000 (append.go:8) TEXT "".s(SB), $72-48
0x0000 00000 (append.go:8) MOVQ (TLS), CX
0x0009 00009 (append.go:8) CMPQ SP, 16(CX)
0x000d 00013 (append.go:8) JLS 149
0x0013 00019 (append.go:8) SUBQ $72, SP
0x0017 00023 (append.go:8) FUNCDATA $0, gclocals·6432f8c6a0d23fa7bee6c5d96f21a92a(SB)
0x0017 00023 (append.go:8) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:9) MOVQ "".b+88(FP), CX
0x001c 00028 (append.go:9) LEAQ 3(CX), DX
0x0020 00032 (append.go:9) MOVQ DX, "".autotmp_0+64(SP)
0x0025 00037 (append.go:9) MOVQ "".b+96(FP), BX
0x002a 00042 (append.go:9) CMPQ DX, BX
0x002d 00045 (append.go:9) JGT $0, 86
0x002f 00047 (append.go:8) MOVQ "".b+80(FP), AX
0x0034 00052 (append.go:9) MOVB $1, (AX)(CX*1)
0x0038 00056 (append.go:9) MOVB $2, 1(AX)(CX*1)
0x003d 00061 (append.go:9) MOVB $3, 2(AX)(CX*1)
0x0042 00066 (append.go:10) MOVQ AX, "".~r1+104(FP)
0x0047 00071 (append.go:10) MOVQ DX, "".~r1+112(FP)
0x004c 00076 (append.go:10) MOVQ BX, "".~r1+120(FP)
0x0051 00081 (append.go:10) ADDQ $72, SP
0x0055 00085 (append.go:10) RET
0x0056 00086 (append.go:9) LEAQ type.[]uint8(SB), AX
0x005d 00093 (append.go:9) MOVQ AX, (SP)
0x0061 00097 (append.go:9) MOVQ "".b+80(FP), BP
0x0066 00102 (append.go:9) MOVQ BP, 8(SP)
0x006b 00107 (append.go:9) MOVQ CX, 16(SP)
0x0070 00112 (append.go:9) MOVQ BX, 24(SP)
0x0075 00117 (append.go:9) MOVQ DX, 32(SP)
0x007a 00122 (append.go:9) PCDATA $0, $0
0x007a 00122 (append.go:9) CALL runtime.growslice(SB)
0x007f 00127 (append.go:9) MOVQ 40(SP), AX
0x0084 00132 (append.go:9) MOVQ 56(SP), BX
0x0089 00137 (append.go:8) MOVQ "".b+88(FP), CX
0x008e 00142 (append.go:9) MOVQ "".autotmp_0+64(SP), DX
0x0093 00147 (append.go:9) JMP 52
0x0095 00149 (append.go:9) NOP
0x0095 00149 (append.go:8) CALL runtime.morestack_noctxt(SB)
0x009a 00154 (append.go:8) JMP 0
After:
"".s t=1 size=176 args=0x30 locals=0x40
0x0000 00000 (append.go:8) TEXT "".s(SB), $64-48
0x0000 00000 (append.go:8) MOVQ (TLS), CX
0x0009 00009 (append.go:8) CMPQ SP, 16(CX)
0x000d 00013 (append.go:8) JLS 151
0x0013 00019 (append.go:8) SUBQ $64, SP
0x0017 00023 (append.go:8) FUNCDATA $0, gclocals·6432f8c6a0d23fa7bee6c5d96f21a92a(SB)
0x0017 00023 (append.go:8) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:9) MOVQ "".b+80(FP), CX
0x001c 00028 (append.go:9) LEAQ 3(CX), DX
0x0020 00032 (append.go:9) MOVQ "".b+88(FP), BX
0x0025 00037 (append.go:9) CMPQ DX, BX
0x0028 00040 (append.go:9) JGT $0, 81
0x002a 00042 (append.go:8) MOVQ "".b+72(FP), AX
0x002f 00047 (append.go:9) MOVB $1, (AX)(CX*1)
0x0033 00051 (append.go:9) MOVB $2, 1(AX)(CX*1)
0x0038 00056 (append.go:9) MOVB $3, 2(AX)(CX*1)
0x003d 00061 (append.go:10) MOVQ AX, "".~r1+96(FP)
0x0042 00066 (append.go:10) MOVQ DX, "".~r1+104(FP)
0x0047 00071 (append.go:10) MOVQ BX, "".~r1+112(FP)
0x004c 00076 (append.go:10) ADDQ $64, SP
0x0050 00080 (append.go:10) RET
0x0051 00081 (append.go:9) LEAQ type.[]uint8(SB), AX
0x0058 00088 (append.go:9) MOVQ AX, (SP)
0x005c 00092 (append.go:9) MOVQ "".b+72(FP), BP
0x0061 00097 (append.go:9) MOVQ BP, 8(SP)
0x0066 00102 (append.go:9) MOVQ CX, 16(SP)
0x006b 00107 (append.go:9) MOVQ BX, 24(SP)
0x0070 00112 (append.go:9) MOVQ DX, 32(SP)
0x0075 00117 (append.go:9) PCDATA $0, $0
0x0075 00117 (append.go:9) CALL runtime.growslice(SB)
0x007a 00122 (append.go:9) MOVQ 40(SP), AX
0x007f 00127 (append.go:9) MOVQ 48(SP), CX
0x0084 00132 (append.go:9) MOVQ 56(SP), BX
0x0089 00137 (append.go:9) ADDQ $3, CX
0x008d 00141 (append.go:9) MOVQ CX, DX
0x0090 00144 (append.go:8) MOVQ "".b+80(FP), CX
0x0095 00149 (append.go:9) JMP 47
0x0097 00151 (append.go:9) NOP
0x0097 00151 (append.go:8) CALL runtime.morestack_noctxt(SB)
0x009c 00156 (append.go:8) JMP 0
Observe that in the following sequence,
we should use DX directly instead of using
CX as a temporary register, which would make
the new code a strict improvement on the old:
0x007f 00127 (append.go:9) MOVQ 48(SP), CX
0x0084 00132 (append.go:9) MOVQ 56(SP), BX
0x0089 00137 (append.go:9) ADDQ $3, CX
0x008d 00141 (append.go:9) MOVQ CX, DX
0x0090 00144 (append.go:8) MOVQ "".b+80(FP), CX
Change-Id: I4ee50b18fa53865901d2d7f86c2cbb54c6fa6924
Reviewed-on: https://go-review.googlesource.com/21812
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-10 09:08:00 -07:00
|
|
|
ptrVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ptr"}}
|
cmd/compile: re-enable in-place append optimization
CL 21891 was too clever in its attempts to avoid spills.
Storing newlen too early caused uses of append in the runtime
itself to receive an inconsistent view of a slice,
leading to corruption.
This CL makes the generate code much more similar to
the old backend. It spills more than before,
but those spills have been contained to the grow path.
It recalculates newlen unnecessarily on the fast path,
but that's measurably cheaper than spilling it.
CL 21891 caused runtime failures in 6 of 2000 runs
of net/http and crypto/x509 in my test setup.
This CL has gone 6000 runs without a failure.
Benchmarks going from master to this CL:
name old time/op new time/op delta
AppendInPlace/NoGrow/Byte-8 439ns ± 2% 436ns ± 2% -0.72% (p=0.001 n=28+27)
AppendInPlace/NoGrow/1Ptr-8 901ns ± 0% 856ns ± 0% -4.95% (p=0.000 n=26+29)
AppendInPlace/NoGrow/2Ptr-8 2.15µs ± 1% 1.95µs ± 0% -9.07% (p=0.000 n=28+30)
AppendInPlace/NoGrow/3Ptr-8 2.66µs ± 0% 2.45µs ± 0% -7.93% (p=0.000 n=29+26)
AppendInPlace/NoGrow/4Ptr-8 3.24µs ± 1% 3.02µs ± 1% -6.75% (p=0.000 n=28+30)
AppendInPlace/Grow/Byte-8 269ns ± 1% 271ns ± 1% +0.84% (p=0.000 n=30+29)
AppendInPlace/Grow/1Ptr-8 275ns ± 1% 280ns ± 1% +1.75% (p=0.000 n=30+30)
AppendInPlace/Grow/2Ptr-8 384ns ± 0% 391ns ± 0% +1.94% (p=0.000 n=27+30)
AppendInPlace/Grow/3Ptr-8 455ns ± 0% 462ns ± 0% +1.43% (p=0.000 n=29+29)
AppendInPlace/Grow/4Ptr-8 478ns ± 0% 479ns ± 0% +0.23% (p=0.000 n=30+27)
However, for the large no-grow cases, there is still more work to be done.
Going from this CL to the non-SSA backend:
name old time/op new time/op delta
AppendInPlace/NoGrow/Byte-8 436ns ± 2% 436ns ± 2% ~ (p=0.967 n=27+29)
AppendInPlace/NoGrow/1Ptr-8 856ns ± 0% 884ns ± 0% +3.28% (p=0.000 n=29+26)
AppendInPlace/NoGrow/2Ptr-8 1.95µs ± 0% 1.56µs ± 0% -20.28% (p=0.000 n=30+29)
AppendInPlace/NoGrow/3Ptr-8 2.45µs ± 0% 1.89µs ± 0% -22.88% (p=0.000 n=26+28)
AppendInPlace/NoGrow/4Ptr-8 3.02µs ± 1% 2.56µs ± 1% -15.35% (p=0.000 n=30+28)
AppendInPlace/Grow/Byte-8 271ns ± 1% 283ns ± 1% +4.56% (p=0.000 n=29+29)
AppendInPlace/Grow/1Ptr-8 280ns ± 1% 288ns ± 1% +2.99% (p=0.000 n=30+30)
AppendInPlace/Grow/2Ptr-8 391ns ± 0% 409ns ± 0% +4.66% (p=0.000 n=30+29)
AppendInPlace/Grow/3Ptr-8 462ns ± 0% 481ns ± 0% +4.13% (p=0.000 n=29+30)
AppendInPlace/Grow/4Ptr-8 479ns ± 0% 502ns ± 0% +4.81% (p=0.000 n=27+26)
New generated code:
var x []byte
func a() {
x = append(x, 1)
}
"".a t=1 size=208 args=0x0 locals=0x48
0x0000 00000 (a.go:5) TEXT "".a(SB), $72-0
0x0000 00000 (a.go:5) MOVQ (TLS), CX
0x0009 00009 (a.go:5) CMPQ SP, 16(CX)
0x000d 00013 (a.go:5) JLS 190
0x0013 00019 (a.go:5) SUBQ $72, SP
0x0017 00023 (a.go:5) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (a.go:5) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (a.go:6) MOVQ "".x+16(SB), CX
0x001e 00030 (a.go:6) MOVQ "".x+8(SB), DX
0x0025 00037 (a.go:6) MOVQ "".x(SB), BX
0x002c 00044 (a.go:6) LEAQ 1(DX), BP
0x0030 00048 (a.go:6) CMPQ BP, CX
0x0033 00051 (a.go:6) JGT $0, 73
0x0035 00053 (a.go:6) LEAQ 1(DX), AX
0x0039 00057 (a.go:6) MOVQ AX, "".x+8(SB)
0x0040 00064 (a.go:6) MOVB $1, (BX)(DX*1)
0x0044 00068 (a.go:7) ADDQ $72, SP
0x0048 00072 (a.go:7) RET
0x0049 00073 (a.go:6) LEAQ type.[]uint8(SB), AX
0x0050 00080 (a.go:6) MOVQ AX, (SP)
0x0054 00084 (a.go:6) MOVQ BX, 8(SP)
0x0059 00089 (a.go:6) MOVQ DX, 16(SP)
0x005e 00094 (a.go:6) MOVQ CX, 24(SP)
0x0063 00099 (a.go:6) MOVQ BP, 32(SP)
0x0068 00104 (a.go:6) PCDATA $0, $0
0x0068 00104 (a.go:6) CALL runtime.growslice(SB)
0x006d 00109 (a.go:6) MOVQ 40(SP), CX
0x0072 00114 (a.go:6) MOVQ 48(SP), DX
0x0077 00119 (a.go:6) MOVQ DX, "".autotmp_0+64(SP)
0x007c 00124 (a.go:6) MOVQ 56(SP), BX
0x0081 00129 (a.go:6) MOVQ BX, "".x+16(SB)
0x0088 00136 (a.go:6) MOVL runtime.writeBarrier(SB), AX
0x008e 00142 (a.go:6) TESTB AL, AL
0x0090 00144 (a.go:6) JNE $0, 162
0x0092 00146 (a.go:6) MOVQ CX, "".x(SB)
0x0099 00153 (a.go:6) MOVQ "".x(SB), BX
0x00a0 00160 (a.go:6) JMP 53
0x00a2 00162 (a.go:6) LEAQ "".x(SB), BX
0x00a9 00169 (a.go:6) MOVQ BX, (SP)
0x00ad 00173 (a.go:6) MOVQ CX, 8(SP)
0x00b2 00178 (a.go:6) PCDATA $0, $0
0x00b2 00178 (a.go:6) CALL runtime.writebarrierptr(SB)
0x00b7 00183 (a.go:6) MOVQ "".autotmp_0+64(SP), DX
0x00bc 00188 (a.go:6) JMP 153
0x00be 00190 (a.go:6) NOP
0x00be 00190 (a.go:5) CALL runtime.morestack_noctxt(SB)
0x00c3 00195 (a.go:5) JMP 0
Fixes #14969 again
Change-Id: Ia50463b1f506011aad0718a4fef1d4738e43c32d
Reviewed-on: https://go-review.googlesource.com/22197
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-18 09:40:30 -07:00
|
|
|
lenVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "len"}}
|
cmd/compile: avoid a spill in append fast path
Instead of spilling newlen, recalculate it.
This removes a spill from the fast path,
at the cost of a cheap recalculation
on the (rare) growth path.
This uses 8 bytes less of stack space.
It generates two more bytes of code,
but that is due to suboptimal register allocation;
see far below.
Runtime append microbenchmarks are all over the map,
presumably due to incidental code movement.
Sample code:
func s(b []byte) []byte {
b = append(b, 1, 2, 3)
return b
}
Before:
"".s t=1 size=160 args=0x30 locals=0x48
0x0000 00000 (append.go:8) TEXT "".s(SB), $72-48
0x0000 00000 (append.go:8) MOVQ (TLS), CX
0x0009 00009 (append.go:8) CMPQ SP, 16(CX)
0x000d 00013 (append.go:8) JLS 149
0x0013 00019 (append.go:8) SUBQ $72, SP
0x0017 00023 (append.go:8) FUNCDATA $0, gclocals·6432f8c6a0d23fa7bee6c5d96f21a92a(SB)
0x0017 00023 (append.go:8) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:9) MOVQ "".b+88(FP), CX
0x001c 00028 (append.go:9) LEAQ 3(CX), DX
0x0020 00032 (append.go:9) MOVQ DX, "".autotmp_0+64(SP)
0x0025 00037 (append.go:9) MOVQ "".b+96(FP), BX
0x002a 00042 (append.go:9) CMPQ DX, BX
0x002d 00045 (append.go:9) JGT $0, 86
0x002f 00047 (append.go:8) MOVQ "".b+80(FP), AX
0x0034 00052 (append.go:9) MOVB $1, (AX)(CX*1)
0x0038 00056 (append.go:9) MOVB $2, 1(AX)(CX*1)
0x003d 00061 (append.go:9) MOVB $3, 2(AX)(CX*1)
0x0042 00066 (append.go:10) MOVQ AX, "".~r1+104(FP)
0x0047 00071 (append.go:10) MOVQ DX, "".~r1+112(FP)
0x004c 00076 (append.go:10) MOVQ BX, "".~r1+120(FP)
0x0051 00081 (append.go:10) ADDQ $72, SP
0x0055 00085 (append.go:10) RET
0x0056 00086 (append.go:9) LEAQ type.[]uint8(SB), AX
0x005d 00093 (append.go:9) MOVQ AX, (SP)
0x0061 00097 (append.go:9) MOVQ "".b+80(FP), BP
0x0066 00102 (append.go:9) MOVQ BP, 8(SP)
0x006b 00107 (append.go:9) MOVQ CX, 16(SP)
0x0070 00112 (append.go:9) MOVQ BX, 24(SP)
0x0075 00117 (append.go:9) MOVQ DX, 32(SP)
0x007a 00122 (append.go:9) PCDATA $0, $0
0x007a 00122 (append.go:9) CALL runtime.growslice(SB)
0x007f 00127 (append.go:9) MOVQ 40(SP), AX
0x0084 00132 (append.go:9) MOVQ 56(SP), BX
0x0089 00137 (append.go:8) MOVQ "".b+88(FP), CX
0x008e 00142 (append.go:9) MOVQ "".autotmp_0+64(SP), DX
0x0093 00147 (append.go:9) JMP 52
0x0095 00149 (append.go:9) NOP
0x0095 00149 (append.go:8) CALL runtime.morestack_noctxt(SB)
0x009a 00154 (append.go:8) JMP 0
After:
"".s t=1 size=176 args=0x30 locals=0x40
0x0000 00000 (append.go:8) TEXT "".s(SB), $64-48
0x0000 00000 (append.go:8) MOVQ (TLS), CX
0x0009 00009 (append.go:8) CMPQ SP, 16(CX)
0x000d 00013 (append.go:8) JLS 151
0x0013 00019 (append.go:8) SUBQ $64, SP
0x0017 00023 (append.go:8) FUNCDATA $0, gclocals·6432f8c6a0d23fa7bee6c5d96f21a92a(SB)
0x0017 00023 (append.go:8) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:9) MOVQ "".b+80(FP), CX
0x001c 00028 (append.go:9) LEAQ 3(CX), DX
0x0020 00032 (append.go:9) MOVQ "".b+88(FP), BX
0x0025 00037 (append.go:9) CMPQ DX, BX
0x0028 00040 (append.go:9) JGT $0, 81
0x002a 00042 (append.go:8) MOVQ "".b+72(FP), AX
0x002f 00047 (append.go:9) MOVB $1, (AX)(CX*1)
0x0033 00051 (append.go:9) MOVB $2, 1(AX)(CX*1)
0x0038 00056 (append.go:9) MOVB $3, 2(AX)(CX*1)
0x003d 00061 (append.go:10) MOVQ AX, "".~r1+96(FP)
0x0042 00066 (append.go:10) MOVQ DX, "".~r1+104(FP)
0x0047 00071 (append.go:10) MOVQ BX, "".~r1+112(FP)
0x004c 00076 (append.go:10) ADDQ $64, SP
0x0050 00080 (append.go:10) RET
0x0051 00081 (append.go:9) LEAQ type.[]uint8(SB), AX
0x0058 00088 (append.go:9) MOVQ AX, (SP)
0x005c 00092 (append.go:9) MOVQ "".b+72(FP), BP
0x0061 00097 (append.go:9) MOVQ BP, 8(SP)
0x0066 00102 (append.go:9) MOVQ CX, 16(SP)
0x006b 00107 (append.go:9) MOVQ BX, 24(SP)
0x0070 00112 (append.go:9) MOVQ DX, 32(SP)
0x0075 00117 (append.go:9) PCDATA $0, $0
0x0075 00117 (append.go:9) CALL runtime.growslice(SB)
0x007a 00122 (append.go:9) MOVQ 40(SP), AX
0x007f 00127 (append.go:9) MOVQ 48(SP), CX
0x0084 00132 (append.go:9) MOVQ 56(SP), BX
0x0089 00137 (append.go:9) ADDQ $3, CX
0x008d 00141 (append.go:9) MOVQ CX, DX
0x0090 00144 (append.go:8) MOVQ "".b+80(FP), CX
0x0095 00149 (append.go:9) JMP 47
0x0097 00151 (append.go:9) NOP
0x0097 00151 (append.go:8) CALL runtime.morestack_noctxt(SB)
0x009c 00156 (append.go:8) JMP 0
Observe that in the following sequence,
we should use DX directly instead of using
CX as a temporary register, which would make
the new code a strict improvement on the old:
0x007f 00127 (append.go:9) MOVQ 48(SP), CX
0x0084 00132 (append.go:9) MOVQ 56(SP), BX
0x0089 00137 (append.go:9) ADDQ $3, CX
0x008d 00141 (append.go:9) MOVQ CX, DX
0x0090 00144 (append.go:8) MOVQ "".b+80(FP), CX
Change-Id: I4ee50b18fa53865901d2d7f86c2cbb54c6fa6924
Reviewed-on: https://go-review.googlesource.com/21812
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-10 09:08:00 -07:00
|
|
|
newlenVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "newlen"}}
|
|
|
|
capVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "cap"}}
|
|
|
|
typVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "typ"}}
|
|
|
|
okVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ok"}}
|
2015-09-17 10:31:16 -07:00
|
|
|
)
|
2015-09-12 23:27:26 -07:00
|
|
|
|
2015-04-15 15:51:25 -07:00
|
|
|
// startBlock sets the current block we're generating code in to b.
|
2015-05-18 16:44:20 -07:00
|
|
|
func (s *state) startBlock(b *ssa.Block) {
|
|
|
|
if s.curBlock != nil {
|
2015-06-24 14:03:39 -07:00
|
|
|
s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
|
2015-05-18 16:44:20 -07:00
|
|
|
}
|
2015-04-15 15:51:25 -07:00
|
|
|
s.curBlock = b
|
2015-06-19 21:02:28 -07:00
|
|
|
s.vars = map[*Node]*ssa.Value{}
|
2016-09-30 10:12:32 -07:00
|
|
|
for n := range s.fwdVars {
|
|
|
|
delete(s.fwdVars, n)
|
|
|
|
}
|
2015-04-15 15:51:25 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// endBlock marks the end of generating code for the current block.
|
2016-03-01 23:21:55 +00:00
|
|
|
// Returns the (former) current block. Returns nil if there is no current
|
2015-04-15 15:51:25 -07:00
|
|
|
// block, i.e. if no code flows to the current execution point.
|
2015-05-18 16:44:20 -07:00
|
|
|
func (s *state) endBlock() *ssa.Block {
|
2015-04-15 15:51:25 -07:00
|
|
|
b := s.curBlock
|
|
|
|
if b == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
for len(s.defvars) <= int(b.ID) {
|
|
|
|
s.defvars = append(s.defvars, nil)
|
|
|
|
}
|
|
|
|
s.defvars[b.ID] = s.vars
|
|
|
|
s.curBlock = nil
|
|
|
|
s.vars = nil
|
2016-12-07 18:14:35 -08:00
|
|
|
b.Pos = s.peekPos()
|
2015-04-15 15:51:25 -07:00
|
|
|
return b
|
|
|
|
}
|
|
|
|
|
2015-05-30 01:03:06 -04:00
|
|
|
// pushLine pushes a line number on the line number stack.
|
2016-12-15 17:17:01 -08:00
|
|
|
func (s *state) pushLine(line src.XPos) {
|
2016-12-07 16:02:42 -08:00
|
|
|
if !line.IsKnown() {
|
2016-06-30 06:36:31 -04:00
|
|
|
// the frontend may emit node with line number missing,
|
|
|
|
// use the parent line number in this case.
|
2016-12-07 18:14:35 -08:00
|
|
|
line = s.peekPos()
|
2016-06-30 06:36:31 -04:00
|
|
|
if Debug['K'] != 0 {
|
2016-12-07 16:02:42 -08:00
|
|
|
Warn("buildssa: unknown position (line 0)")
|
2016-06-30 06:36:31 -04:00
|
|
|
}
|
|
|
|
}
|
2015-05-30 01:03:06 -04:00
|
|
|
s.line = append(s.line, line)
|
|
|
|
}
|
|
|
|
|
|
|
|
// popLine pops the top of the line number stack.
|
|
|
|
func (s *state) popLine() {
|
|
|
|
s.line = s.line[:len(s.line)-1]
|
|
|
|
}
|
|
|
|
|
2016-12-07 18:14:35 -08:00
|
|
|
// peekPos peeks the top of the line number stack.
|
2016-12-15 17:17:01 -08:00
|
|
|
func (s *state) peekPos() src.XPos {
|
2015-05-30 01:03:06 -04:00
|
|
|
return s.line[len(s.line)-1]
|
|
|
|
}
|
|
|
|
|
2015-07-20 15:39:14 -07:00
|
|
|
func (s *state) Error(msg string, args ...interface{}) {
|
2016-12-07 18:14:35 -08:00
|
|
|
yyerrorl(s.peekPos(), msg, args...)
|
2015-07-20 15:39:14 -07:00
|
|
|
}
|
|
|
|
|
2015-06-11 21:29:25 -07:00
|
|
|
// newValue0 adds a new value with no arguments to the current block.
|
|
|
|
func (s *state) newValue0(op ssa.Op, t ssa.Type) *ssa.Value {
|
2016-12-07 18:14:35 -08:00
|
|
|
return s.curBlock.NewValue0(s.peekPos(), op, t)
|
2015-06-11 21:29:25 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// newValue0A adds a new value with no arguments and an aux value to the current block.
|
|
|
|
func (s *state) newValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value {
|
2016-12-07 18:14:35 -08:00
|
|
|
return s.curBlock.NewValue0A(s.peekPos(), op, t, aux)
|
2015-05-30 01:03:06 -04:00
|
|
|
}
|
|
|
|
|
2015-09-03 18:24:22 -05:00
|
|
|
// newValue0I adds a new value with no arguments and an auxint value to the current block.
|
|
|
|
func (s *state) newValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value {
|
2016-12-07 18:14:35 -08:00
|
|
|
return s.curBlock.NewValue0I(s.peekPos(), op, t, auxint)
|
2015-09-03 18:24:22 -05:00
|
|
|
}
|
|
|
|
|
2015-05-30 01:03:06 -04:00
|
|
|
// newValue1 adds a new value with one argument to the current block.
|
2015-06-11 21:29:25 -07:00
|
|
|
func (s *state) newValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value {
|
2016-12-07 18:14:35 -08:00
|
|
|
return s.curBlock.NewValue1(s.peekPos(), op, t, arg)
|
2015-06-11 21:29:25 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// newValue1A adds a new value with one argument and an aux value to the current block.
|
|
|
|
func (s *state) newValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
|
2016-12-07 18:14:35 -08:00
|
|
|
return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
|
2015-05-30 01:03:06 -04:00
|
|
|
}
|
|
|
|
|
2015-07-15 21:33:49 -07:00
|
|
|
// newValue1I adds a new value with one argument and an auxint value to the current block.
|
|
|
|
func (s *state) newValue1I(op ssa.Op, t ssa.Type, aux int64, arg *ssa.Value) *ssa.Value {
|
2016-12-07 18:14:35 -08:00
|
|
|
return s.curBlock.NewValue1I(s.peekPos(), op, t, aux, arg)
|
2015-07-15 21:33:49 -07:00
|
|
|
}
|
|
|
|
|
2015-05-30 01:03:06 -04:00
|
|
|
// newValue2 adds a new value with two arguments to the current block.
|
2015-06-11 21:29:25 -07:00
|
|
|
func (s *state) newValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value {
|
2016-12-07 18:14:35 -08:00
|
|
|
return s.curBlock.NewValue2(s.peekPos(), op, t, arg0, arg1)
|
2015-05-30 01:03:06 -04:00
|
|
|
}
|
|
|
|
|
2015-06-27 15:45:20 +01:00
|
|
|
// newValue2I adds a new value with two arguments and an auxint value to the current block.
|
|
|
|
func (s *state) newValue2I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value {
|
2016-12-07 18:14:35 -08:00
|
|
|
return s.curBlock.NewValue2I(s.peekPos(), op, t, aux, arg0, arg1)
|
2015-06-27 15:45:20 +01:00
|
|
|
}
|
|
|
|
|
2015-05-30 01:03:06 -04:00
|
|
|
// newValue3 adds a new value with three arguments to the current block.
|
2015-06-11 21:29:25 -07:00
|
|
|
func (s *state) newValue3(op ssa.Op, t ssa.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
|
2016-12-07 18:14:35 -08:00
|
|
|
return s.curBlock.NewValue3(s.peekPos(), op, t, arg0, arg1, arg2)
|
2015-05-30 01:03:06 -04:00
|
|
|
}
|
|
|
|
|
2015-08-14 21:47:20 -07:00
|
|
|
// newValue3I adds a new value with three arguments and an auxint value to the current block.
|
|
|
|
func (s *state) newValue3I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
|
2016-12-07 18:14:35 -08:00
|
|
|
return s.curBlock.NewValue3I(s.peekPos(), op, t, aux, arg0, arg1, arg2)
|
2015-08-14 21:47:20 -07:00
|
|
|
}
|
|
|
|
|
2017-03-13 21:51:08 -04:00
|
|
|
// newValue3A adds a new value with three arguments and an aux value to the current block.
|
|
|
|
func (s *state) newValue3A(op ssa.Op, t ssa.Type, aux interface{}, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
|
|
|
|
return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
|
|
|
|
}
|
|
|
|
|
2016-08-25 16:02:57 -07:00
|
|
|
// newValue4 adds a new value with four arguments to the current block.
|
|
|
|
func (s *state) newValue4(op ssa.Op, t ssa.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
|
2016-12-07 18:14:35 -08:00
|
|
|
return s.curBlock.NewValue4(s.peekPos(), op, t, arg0, arg1, arg2, arg3)
|
2016-08-25 16:02:57 -07:00
|
|
|
}
|
|
|
|
|
2015-09-03 18:24:22 -05:00
|
|
|
// entryNewValue0 adds a new value with no arguments to the entry block.
|
2015-06-11 21:29:25 -07:00
|
|
|
func (s *state) entryNewValue0(op ssa.Op, t ssa.Type) *ssa.Value {
|
2016-12-07 18:14:35 -08:00
|
|
|
return s.f.Entry.NewValue0(s.peekPos(), op, t)
|
2015-06-11 21:29:25 -07:00
|
|
|
}
|
|
|
|
|
2015-09-03 18:24:22 -05:00
|
|
|
// entryNewValue0A adds a new value with no arguments and an aux value to the entry block.
|
2015-06-11 21:29:25 -07:00
|
|
|
func (s *state) entryNewValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value {
|
2016-12-07 18:14:35 -08:00
|
|
|
return s.f.Entry.NewValue0A(s.peekPos(), op, t, aux)
|
2015-05-30 01:03:06 -04:00
|
|
|
}
|
|
|
|
|
2015-09-03 18:24:22 -05:00
|
|
|
// entryNewValue0I adds a new value with no arguments and an auxint value to the entry block.
|
|
|
|
func (s *state) entryNewValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value {
|
2016-12-07 18:14:35 -08:00
|
|
|
return s.f.Entry.NewValue0I(s.peekPos(), op, t, auxint)
|
2015-09-03 18:24:22 -05:00
|
|
|
}
|
|
|
|
|
2015-05-30 01:03:06 -04:00
|
|
|
// entryNewValue1 adds a new value with one argument to the entry block.
|
2015-06-11 21:29:25 -07:00
|
|
|
func (s *state) entryNewValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value {
|
2016-12-07 18:14:35 -08:00
|
|
|
return s.f.Entry.NewValue1(s.peekPos(), op, t, arg)
|
2015-06-11 21:29:25 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// entryNewValue1 adds a new value with one argument and an auxint value to the entry block.
|
|
|
|
func (s *state) entryNewValue1I(op ssa.Op, t ssa.Type, auxint int64, arg *ssa.Value) *ssa.Value {
|
2016-12-07 18:14:35 -08:00
|
|
|
return s.f.Entry.NewValue1I(s.peekPos(), op, t, auxint, arg)
|
2015-05-30 01:03:06 -04:00
|
|
|
}
|
|
|
|
|
2015-06-19 21:02:28 -07:00
|
|
|
// entryNewValue1A adds a new value with one argument and an aux value to the entry block.
|
|
|
|
func (s *state) entryNewValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
|
2016-12-07 18:14:35 -08:00
|
|
|
return s.f.Entry.NewValue1A(s.peekPos(), op, t, aux, arg)
|
2015-06-19 21:02:28 -07:00
|
|
|
}
|
|
|
|
|
2015-05-30 01:03:06 -04:00
|
|
|
// entryNewValue2 adds a new value with two arguments to the entry block.
|
2015-06-11 21:29:25 -07:00
|
|
|
func (s *state) entryNewValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value {
|
2016-12-07 18:14:35 -08:00
|
|
|
return s.f.Entry.NewValue2(s.peekPos(), op, t, arg0, arg1)
|
2015-05-30 01:03:06 -04:00
|
|
|
}
|
|
|
|
|
2015-09-08 16:52:25 -07:00
|
|
|
// const* routines add a new const value to the entry block.
|
2016-12-07 18:14:35 -08:00
|
|
|
func (s *state) constSlice(t ssa.Type) *ssa.Value { return s.f.ConstSlice(s.peekPos(), t) }
|
|
|
|
func (s *state) constInterface(t ssa.Type) *ssa.Value { return s.f.ConstInterface(s.peekPos(), t) }
|
|
|
|
func (s *state) constNil(t ssa.Type) *ssa.Value { return s.f.ConstNil(s.peekPos(), t) }
|
|
|
|
func (s *state) constEmptyString(t ssa.Type) *ssa.Value { return s.f.ConstEmptyString(s.peekPos(), t) }
|
2015-09-08 16:52:25 -07:00
|
|
|
func (s *state) constBool(c bool) *ssa.Value {
|
2016-12-07 18:14:35 -08:00
|
|
|
return s.f.ConstBool(s.peekPos(), Types[TBOOL], c)
|
2015-09-08 16:52:25 -07:00
|
|
|
}
|
2015-07-28 14:19:20 -07:00
|
|
|
func (s *state) constInt8(t ssa.Type, c int8) *ssa.Value {
|
2016-12-07 18:14:35 -08:00
|
|
|
return s.f.ConstInt8(s.peekPos(), t, c)
|
2015-07-28 14:19:20 -07:00
|
|
|
}
|
|
|
|
func (s *state) constInt16(t ssa.Type, c int16) *ssa.Value {
|
2016-12-07 18:14:35 -08:00
|
|
|
return s.f.ConstInt16(s.peekPos(), t, c)
|
2015-07-28 14:19:20 -07:00
|
|
|
}
|
|
|
|
func (s *state) constInt32(t ssa.Type, c int32) *ssa.Value {
|
2016-12-07 18:14:35 -08:00
|
|
|
return s.f.ConstInt32(s.peekPos(), t, c)
|
2015-07-28 14:19:20 -07:00
|
|
|
}
|
|
|
|
func (s *state) constInt64(t ssa.Type, c int64) *ssa.Value {
|
2016-12-07 18:14:35 -08:00
|
|
|
return s.f.ConstInt64(s.peekPos(), t, c)
|
2015-07-28 14:19:20 -07:00
|
|
|
}
|
2015-08-12 16:38:11 -04:00
|
|
|
func (s *state) constFloat32(t ssa.Type, c float64) *ssa.Value {
|
2016-12-07 18:14:35 -08:00
|
|
|
return s.f.ConstFloat32(s.peekPos(), t, c)
|
2015-08-12 16:38:11 -04:00
|
|
|
}
|
|
|
|
func (s *state) constFloat64(t ssa.Type, c float64) *ssa.Value {
|
2016-12-07 18:14:35 -08:00
|
|
|
return s.f.ConstFloat64(s.peekPos(), t, c)
|
2015-08-12 16:38:11 -04:00
|
|
|
}
|
2015-05-30 01:03:06 -04:00
|
|
|
func (s *state) constInt(t ssa.Type, c int64) *ssa.Value {
|
2015-07-28 14:19:20 -07:00
|
|
|
if s.config.IntSize == 8 {
|
|
|
|
return s.constInt64(t, c)
|
|
|
|
}
|
|
|
|
if int64(int32(c)) != c {
|
|
|
|
s.Fatalf("integer constant too big %d", c)
|
|
|
|
}
|
|
|
|
return s.constInt32(t, int32(c))
|
2015-05-30 01:03:06 -04:00
|
|
|
}
|
2017-03-08 12:50:00 -08:00
|
|
|
func (s *state) constOffPtrSP(t ssa.Type, c int64) *ssa.Value {
|
|
|
|
return s.f.ConstOffPtrSP(s.peekPos(), t, c, s.sp)
|
|
|
|
}
|
2015-05-30 01:03:06 -04:00
|
|
|
|
2016-10-04 09:49:33 -07:00
|
|
|
// stmtList converts the statement list n to SSA and adds it to s.
|
2016-03-08 10:26:20 -08:00
|
|
|
func (s *state) stmtList(l Nodes) {
|
2016-03-08 15:10:26 -08:00
|
|
|
for _, n := range l.Slice() {
|
|
|
|
s.stmt(n)
|
2015-04-15 15:51:25 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-04 09:49:33 -07:00
|
|
|
// stmt converts the statement n to SSA and adds it to s.
|
2015-05-18 16:44:20 -07:00
|
|
|
func (s *state) stmt(n *Node) {
|
2016-12-07 17:40:46 -08:00
|
|
|
s.pushLine(n.Pos)
|
2015-05-30 01:03:06 -04:00
|
|
|
defer s.popLine()
|
|
|
|
|
2017-03-14 11:05:03 -07:00
|
|
|
// If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere),
|
|
|
|
// then this code is dead. Stop here.
|
|
|
|
if s.curBlock == nil && n.Op != OLABEL {
|
|
|
|
return
|
2015-07-20 15:39:14 -07:00
|
|
|
}
|
|
|
|
|
2015-04-15 15:51:25 -07:00
|
|
|
s.stmtList(n.Ninit)
|
|
|
|
switch n.Op {
|
|
|
|
|
|
|
|
case OBLOCK:
|
|
|
|
s.stmtList(n.List)
|
|
|
|
|
2015-07-16 13:25:36 -06:00
|
|
|
// No-ops
|
2015-08-28 21:19:40 -05:00
|
|
|
case OEMPTY, ODCLCONST, ODCLTYPE, OFALL:
|
2015-07-10 10:47:28 -06:00
|
|
|
|
2015-07-16 13:25:36 -06:00
|
|
|
// Expression statements
|
2016-08-23 16:49:28 -07:00
|
|
|
case OCALLFUNC:
|
|
|
|
if isIntrinsicCall(n) {
|
|
|
|
s.intrinsicCall(n)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
fallthrough
|
|
|
|
|
|
|
|
case OCALLMETH, OCALLINTER:
|
2015-09-09 23:56:59 -07:00
|
|
|
s.call(n, callNormal)
|
2016-09-11 08:29:04 -07:00
|
|
|
if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class == PFUNC {
|
|
|
|
if fn := n.Left.Sym.Name; compiling_runtime && fn == "throw" ||
|
2017-03-01 15:50:57 -08:00
|
|
|
n.Left.Sym.Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block") {
|
2016-09-11 08:29:04 -07:00
|
|
|
m := s.mem()
|
|
|
|
b := s.endBlock()
|
|
|
|
b.Kind = ssa.BlockExit
|
|
|
|
b.SetControl(m)
|
|
|
|
// TODO: never rewrite OPANIC to OCALLFUNC in the
|
|
|
|
// first place. Need to wait until all backends
|
|
|
|
// go through SSA.
|
|
|
|
}
|
2016-02-06 22:35:34 -08:00
|
|
|
}
|
2015-09-09 23:56:59 -07:00
|
|
|
case ODEFER:
|
|
|
|
s.call(n.Left, callDefer)
|
|
|
|
case OPROC:
|
|
|
|
s.call(n.Left, callGo)
|
2015-07-16 13:25:36 -06:00
|
|
|
|
2015-09-17 10:31:16 -07:00
|
|
|
case OAS2DOTTYPE:
|
2016-03-08 15:10:26 -08:00
|
|
|
res, resok := s.dottype(n.Rlist.First(), true)
|
2016-10-28 11:37:45 -07:00
|
|
|
deref := false
|
|
|
|
if !canSSAType(n.Rlist.First().Type) {
|
|
|
|
if res.Op != ssa.OpLoad {
|
|
|
|
s.Fatalf("dottype of non-load")
|
|
|
|
}
|
|
|
|
mem := s.mem()
|
|
|
|
if mem.Op == ssa.OpVarKill {
|
|
|
|
mem = mem.Args[0]
|
|
|
|
}
|
|
|
|
if res.Args[1] != mem {
|
|
|
|
s.Fatalf("memory no longer live from 2-result dottype load")
|
|
|
|
}
|
|
|
|
deref = true
|
|
|
|
res = res.Args[0]
|
|
|
|
}
|
2017-02-10 10:15:10 -05:00
|
|
|
s.assign(n.List.First(), res, deref, 0)
|
|
|
|
s.assign(n.List.Second(), resok, false, 0)
|
2015-09-17 10:31:16 -07:00
|
|
|
return
|
|
|
|
|
2016-10-06 15:43:47 -04:00
|
|
|
case OAS2FUNC:
|
|
|
|
// We come here only when it is an intrinsic call returning two values.
|
|
|
|
if !isIntrinsicCall(n.Rlist.First()) {
|
|
|
|
s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Rlist.First())
|
|
|
|
}
|
|
|
|
v := s.intrinsicCall(n.Rlist.First())
|
|
|
|
v1 := s.newValue1(ssa.OpSelect0, n.List.First().Type, v)
|
|
|
|
v2 := s.newValue1(ssa.OpSelect1, n.List.Second().Type, v)
|
2017-02-10 10:15:10 -05:00
|
|
|
s.assign(n.List.First(), v1, false, 0)
|
|
|
|
s.assign(n.List.Second(), v2, false, 0)
|
2016-10-06 15:43:47 -04:00
|
|
|
return
|
|
|
|
|
2015-04-15 15:51:25 -07:00
|
|
|
case ODCL:
|
cmd/compile: fix liveness computation for heap-escaped parameters
The liveness computation of parameters generally was never
correct, but forcing all parameters to be live throughout the
function covered up that problem. The new SSA back end is
too clever: even though it currently keeps the parameter values live
throughout the function, it may find optimizations that mean
the current values are not written back to the original parameter
stack slots immediately or ever (for example if a parameter is set
to nil, SSA constant propagation may replace all later uses of the
parameter with a constant nil, eliminating the need to write the nil
value back to the stack slot), so the liveness code must now
track the actual operations on the stack slots, exposing these
problems.
One small problem in the handling of arguments is that nodarg
can return ONAME PPARAM nodes with adjusted offsets, so that
there are actually multiple *Node pointers for the same parameter
in the instruction stream. This might be possible to correct, but
not in this CL. For now, we fix this by using n.Orig instead of n
when considering PPARAM and PPARAMOUT nodes.
The major problem in the handling of arguments is general
confusion in the liveness code about the meaning of PPARAM|PHEAP
and PPARAMOUT|PHEAP nodes, especially as contrasted with PAUTO|PHEAP.
The difference between these two is that when a local variable "moves"
to the heap, it's really just allocated there to start with; in contrast,
when an argument moves to the heap, the actual data has to be copied
there from the stack at the beginning of the function, and when a
result "moves" to the heap the value in the heap has to be copied
back to the stack when the function returns
This general confusion is also present in the SSA back end.
The PHEAP bit worked decently when I first introduced it 7 years ago (!)
in 391425ae. The back end did nothing sophisticated, and in particular
there was no analysis at all: no escape analysis, no liveness analysis,
and certainly no SSA back end. But the complications caused in the
various downstream consumers suggest that this should be a detail
kept mainly in the front end.
This CL therefore eliminates both the PHEAP bit and even the idea of
"heap variables" from the back ends.
First, it replaces the PPARAM|PHEAP, PPARAMOUT|PHEAP, and PAUTO|PHEAP
variable classes with the single PAUTOHEAP, a pseudo-class indicating
a variable maintained on the heap and available by indirecting a
local variable kept on the stack (a plain PAUTO).
Second, walkexpr replaces all references to PAUTOHEAP variables
with indirections of the corresponding PAUTO variable.
The back ends and the liveness code now just see plain indirected
variables. This may actually produce better code, but the real goal
here is to eliminate these little-used and somewhat suspect code
paths in the back end analyses.
The OPARAM node type goes away too.
A followup CL will do the same to PPARAMREF. I'm not sure that
the back ends (SSA in particular) are handling those right either,
and with the framework established in this CL that change is trivial
and the result clearly more correct.
Fixes #15747.
Change-Id: I2770b1ce3cbc93981bfc7166be66a9da12013d74
Reviewed-on: https://go-review.googlesource.com/23393
Reviewed-by: Keith Randall <khr@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
2016-05-25 01:33:24 -04:00
|
|
|
if n.Left.Class == PAUTOHEAP {
|
|
|
|
Fatalf("DCL %v", n)
|
2015-06-12 14:23:29 +01:00
|
|
|
}
|
2015-04-15 15:51:25 -07:00
|
|
|
|
2015-07-20 15:39:14 -07:00
|
|
|
case OLABEL:
|
|
|
|
sym := n.Left.Sym
|
|
|
|
lab := s.label(sym)
|
|
|
|
|
|
|
|
// Associate label with its control flow node, if any
|
2017-03-14 11:05:03 -07:00
|
|
|
if ctl := n.labeledControl(); ctl != nil {
|
|
|
|
s.labeledNodes[ctl] = lab
|
2015-04-15 15:51:25 -07:00
|
|
|
}
|
2015-07-20 15:39:14 -07:00
|
|
|
|
|
|
|
// The label might already have a target block via a goto.
|
|
|
|
if lab.target == nil {
|
|
|
|
lab.target = s.f.NewBlock(ssa.BlockPlain)
|
2015-06-12 16:24:33 -07:00
|
|
|
}
|
2015-04-15 15:51:25 -07:00
|
|
|
|
2017-03-03 16:15:35 -08:00
|
|
|
// Go to that label.
|
|
|
|
// (We pretend "label:" is preceded by "goto label", unless the predecessor is unreachable.)
|
|
|
|
if s.curBlock != nil {
|
|
|
|
b := s.endBlock()
|
|
|
|
b.AddEdgeTo(lab.target)
|
|
|
|
}
|
2015-07-20 15:39:14 -07:00
|
|
|
s.startBlock(lab.target)
|
|
|
|
|
|
|
|
case OGOTO:
|
|
|
|
sym := n.Left.Sym
|
|
|
|
|
|
|
|
lab := s.label(sym)
|
|
|
|
if lab.target == nil {
|
|
|
|
lab.target = s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
}
|
2015-04-15 15:51:25 -07:00
|
|
|
|
2015-07-20 15:39:14 -07:00
|
|
|
b := s.endBlock()
|
2015-08-28 21:36:29 -05:00
|
|
|
b.AddEdgeTo(lab.target)
|
2015-07-20 15:39:14 -07:00
|
|
|
|
2016-12-19 10:30:44 -08:00
|
|
|
case OAS:
|
2016-04-01 11:05:30 -07:00
|
|
|
if n.Left == n.Right && n.Left.Op == ONAME {
|
|
|
|
// An x=x assignment. No point in doing anything
|
|
|
|
// here. In addition, skipping this assignment
|
|
|
|
// prevents generating:
|
|
|
|
// VARDEF x
|
|
|
|
// COPY x -> x
|
|
|
|
// which is bad because x is incorrectly considered
|
|
|
|
// dead before the vardef. See issue #14904.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-01-25 17:06:54 -08:00
|
|
|
var t *Type
|
2015-08-29 14:54:45 -07:00
|
|
|
if n.Right != nil {
|
2016-01-25 17:06:54 -08:00
|
|
|
t = n.Right.Type
|
|
|
|
} else {
|
|
|
|
t = n.Left.Type
|
|
|
|
}
|
|
|
|
|
|
|
|
// Evaluate RHS.
|
|
|
|
rhs := n.Right
|
cmd/compile: avoid write barrier in append fast path
When we are writing the result of an append back
to the same slice, we don’t need a write barrier
on the fast path.
This re-implements an optimization that was present
in the old backend.
Updates #14921
Fixes #14969
Sample code:
var x []byte
func p() {
x = append(x, 1, 2, 3)
}
Before:
"".p t=1 size=224 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 199
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x(SB), DX
0x0025 00037 (append.go:19) MOVQ "".x+8(SB), BX
0x002c 00044 (append.go:19) MOVQ BX, "".autotmp_0+64(SP)
0x0031 00049 (append.go:22) LEAQ 3(BX), BP
0x0035 00053 (append.go:22) CMPQ BP, CX
0x0038 00056 (append.go:22) JGT $0, 131
0x003a 00058 (append.go:22) MOVB $1, (DX)(BX*1)
0x003e 00062 (append.go:22) MOVB $2, 1(DX)(BX*1)
0x0043 00067 (append.go:22) MOVB $3, 2(DX)(BX*1)
0x0048 00072 (append.go:22) MOVQ BP, "".x+8(SB)
0x004f 00079 (append.go:22) MOVQ CX, "".x+16(SB)
0x0056 00086 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x005c 00092 (append.go:22) TESTB AL, AL
0x005e 00094 (append.go:22) JNE $0, 108
0x0060 00096 (append.go:22) MOVQ DX, "".x(SB)
0x0067 00103 (append.go:23) ADDQ $72, SP
0x006b 00107 (append.go:23) RET
0x006c 00108 (append.go:22) LEAQ "".x(SB), CX
0x0073 00115 (append.go:22) MOVQ CX, (SP)
0x0077 00119 (append.go:22) MOVQ DX, 8(SP)
0x007c 00124 (append.go:22) PCDATA $0, $0
0x007c 00124 (append.go:22) CALL runtime.writebarrierptr(SB)
0x0081 00129 (append.go:23) JMP 103
0x0083 00131 (append.go:22) LEAQ type.[]uint8(SB), AX
0x008a 00138 (append.go:22) MOVQ AX, (SP)
0x008e 00142 (append.go:22) MOVQ DX, 8(SP)
0x0093 00147 (append.go:22) MOVQ BX, 16(SP)
0x0098 00152 (append.go:22) MOVQ CX, 24(SP)
0x009d 00157 (append.go:22) MOVQ BP, 32(SP)
0x00a2 00162 (append.go:22) PCDATA $0, $0
0x00a2 00162 (append.go:22) CALL runtime.growslice(SB)
0x00a7 00167 (append.go:22) MOVQ 40(SP), DX
0x00ac 00172 (append.go:22) MOVQ 48(SP), AX
0x00b1 00177 (append.go:22) MOVQ 56(SP), CX
0x00b6 00182 (append.go:22) ADDQ $3, AX
0x00ba 00186 (append.go:19) MOVQ "".autotmp_0+64(SP), BX
0x00bf 00191 (append.go:22) MOVQ AX, BP
0x00c2 00194 (append.go:22) JMP 58
0x00c7 00199 (append.go:22) NOP
0x00c7 00199 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00cc 00204 (append.go:21) JMP 0
After:
"".p t=1 size=208 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 191
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x+8(SB), DX
0x0025 00037 (append.go:19) MOVQ DX, "".autotmp_0+64(SP)
0x002a 00042 (append.go:19) MOVQ "".x(SB), BX
0x0031 00049 (append.go:22) LEAQ 3(DX), BP
0x0035 00053 (append.go:22) MOVQ BP, "".x+8(SB)
0x003c 00060 (append.go:22) CMPQ BP, CX
0x003f 00063 (append.go:22) JGT $0, 84
0x0041 00065 (append.go:22) MOVB $1, (BX)(DX*1)
0x0045 00069 (append.go:22) MOVB $2, 1(BX)(DX*1)
0x004a 00074 (append.go:22) MOVB $3, 2(BX)(DX*1)
0x004f 00079 (append.go:23) ADDQ $72, SP
0x0053 00083 (append.go:23) RET
0x0054 00084 (append.go:22) LEAQ type.[]uint8(SB), AX
0x005b 00091 (append.go:22) MOVQ AX, (SP)
0x005f 00095 (append.go:22) MOVQ BX, 8(SP)
0x0064 00100 (append.go:22) MOVQ DX, 16(SP)
0x0069 00105 (append.go:22) MOVQ CX, 24(SP)
0x006e 00110 (append.go:22) MOVQ BP, 32(SP)
0x0073 00115 (append.go:22) PCDATA $0, $0
0x0073 00115 (append.go:22) CALL runtime.growslice(SB)
0x0078 00120 (append.go:22) MOVQ 40(SP), CX
0x007d 00125 (append.go:22) MOVQ 56(SP), AX
0x0082 00130 (append.go:22) MOVQ AX, "".x+16(SB)
0x0089 00137 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x008f 00143 (append.go:22) TESTB AL, AL
0x0091 00145 (append.go:22) JNE $0, 168
0x0093 00147 (append.go:22) MOVQ CX, "".x(SB)
0x009a 00154 (append.go:22) MOVQ "".x(SB), BX
0x00a1 00161 (append.go:19) MOVQ "".autotmp_0+64(SP), DX
0x00a6 00166 (append.go:22) JMP 65
0x00a8 00168 (append.go:22) LEAQ "".x(SB), DX
0x00af 00175 (append.go:22) MOVQ DX, (SP)
0x00b3 00179 (append.go:22) MOVQ CX, 8(SP)
0x00b8 00184 (append.go:22) PCDATA $0, $0
0x00b8 00184 (append.go:22) CALL runtime.writebarrierptr(SB)
0x00bd 00189 (append.go:22) JMP 154
0x00bf 00191 (append.go:22) NOP
0x00bf 00191 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00c4 00196 (append.go:21) JMP 0
Change-Id: I77a41ad3a22557a4bb4654de7d6d24a029efe34a
Reviewed-on: https://go-review.googlesource.com/21813
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-10 09:44:17 -07:00
|
|
|
if rhs != nil {
|
|
|
|
switch rhs.Op {
|
2016-06-19 07:20:28 -07:00
|
|
|
case OSTRUCTLIT, OARRAYLIT, OSLICELIT:
|
cmd/compile: avoid write barrier in append fast path
When we are writing the result of an append back
to the same slice, we don’t need a write barrier
on the fast path.
This re-implements an optimization that was present
in the old backend.
Updates #14921
Fixes #14969
Sample code:
var x []byte
func p() {
x = append(x, 1, 2, 3)
}
Before:
"".p t=1 size=224 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 199
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x(SB), DX
0x0025 00037 (append.go:19) MOVQ "".x+8(SB), BX
0x002c 00044 (append.go:19) MOVQ BX, "".autotmp_0+64(SP)
0x0031 00049 (append.go:22) LEAQ 3(BX), BP
0x0035 00053 (append.go:22) CMPQ BP, CX
0x0038 00056 (append.go:22) JGT $0, 131
0x003a 00058 (append.go:22) MOVB $1, (DX)(BX*1)
0x003e 00062 (append.go:22) MOVB $2, 1(DX)(BX*1)
0x0043 00067 (append.go:22) MOVB $3, 2(DX)(BX*1)
0x0048 00072 (append.go:22) MOVQ BP, "".x+8(SB)
0x004f 00079 (append.go:22) MOVQ CX, "".x+16(SB)
0x0056 00086 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x005c 00092 (append.go:22) TESTB AL, AL
0x005e 00094 (append.go:22) JNE $0, 108
0x0060 00096 (append.go:22) MOVQ DX, "".x(SB)
0x0067 00103 (append.go:23) ADDQ $72, SP
0x006b 00107 (append.go:23) RET
0x006c 00108 (append.go:22) LEAQ "".x(SB), CX
0x0073 00115 (append.go:22) MOVQ CX, (SP)
0x0077 00119 (append.go:22) MOVQ DX, 8(SP)
0x007c 00124 (append.go:22) PCDATA $0, $0
0x007c 00124 (append.go:22) CALL runtime.writebarrierptr(SB)
0x0081 00129 (append.go:23) JMP 103
0x0083 00131 (append.go:22) LEAQ type.[]uint8(SB), AX
0x008a 00138 (append.go:22) MOVQ AX, (SP)
0x008e 00142 (append.go:22) MOVQ DX, 8(SP)
0x0093 00147 (append.go:22) MOVQ BX, 16(SP)
0x0098 00152 (append.go:22) MOVQ CX, 24(SP)
0x009d 00157 (append.go:22) MOVQ BP, 32(SP)
0x00a2 00162 (append.go:22) PCDATA $0, $0
0x00a2 00162 (append.go:22) CALL runtime.growslice(SB)
0x00a7 00167 (append.go:22) MOVQ 40(SP), DX
0x00ac 00172 (append.go:22) MOVQ 48(SP), AX
0x00b1 00177 (append.go:22) MOVQ 56(SP), CX
0x00b6 00182 (append.go:22) ADDQ $3, AX
0x00ba 00186 (append.go:19) MOVQ "".autotmp_0+64(SP), BX
0x00bf 00191 (append.go:22) MOVQ AX, BP
0x00c2 00194 (append.go:22) JMP 58
0x00c7 00199 (append.go:22) NOP
0x00c7 00199 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00cc 00204 (append.go:21) JMP 0
After:
"".p t=1 size=208 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 191
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x+8(SB), DX
0x0025 00037 (append.go:19) MOVQ DX, "".autotmp_0+64(SP)
0x002a 00042 (append.go:19) MOVQ "".x(SB), BX
0x0031 00049 (append.go:22) LEAQ 3(DX), BP
0x0035 00053 (append.go:22) MOVQ BP, "".x+8(SB)
0x003c 00060 (append.go:22) CMPQ BP, CX
0x003f 00063 (append.go:22) JGT $0, 84
0x0041 00065 (append.go:22) MOVB $1, (BX)(DX*1)
0x0045 00069 (append.go:22) MOVB $2, 1(BX)(DX*1)
0x004a 00074 (append.go:22) MOVB $3, 2(BX)(DX*1)
0x004f 00079 (append.go:23) ADDQ $72, SP
0x0053 00083 (append.go:23) RET
0x0054 00084 (append.go:22) LEAQ type.[]uint8(SB), AX
0x005b 00091 (append.go:22) MOVQ AX, (SP)
0x005f 00095 (append.go:22) MOVQ BX, 8(SP)
0x0064 00100 (append.go:22) MOVQ DX, 16(SP)
0x0069 00105 (append.go:22) MOVQ CX, 24(SP)
0x006e 00110 (append.go:22) MOVQ BP, 32(SP)
0x0073 00115 (append.go:22) PCDATA $0, $0
0x0073 00115 (append.go:22) CALL runtime.growslice(SB)
0x0078 00120 (append.go:22) MOVQ 40(SP), CX
0x007d 00125 (append.go:22) MOVQ 56(SP), AX
0x0082 00130 (append.go:22) MOVQ AX, "".x+16(SB)
0x0089 00137 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x008f 00143 (append.go:22) TESTB AL, AL
0x0091 00145 (append.go:22) JNE $0, 168
0x0093 00147 (append.go:22) MOVQ CX, "".x(SB)
0x009a 00154 (append.go:22) MOVQ "".x(SB), BX
0x00a1 00161 (append.go:19) MOVQ "".autotmp_0+64(SP), DX
0x00a6 00166 (append.go:22) JMP 65
0x00a8 00168 (append.go:22) LEAQ "".x(SB), DX
0x00af 00175 (append.go:22) MOVQ DX, (SP)
0x00b3 00179 (append.go:22) MOVQ CX, 8(SP)
0x00b8 00184 (append.go:22) PCDATA $0, $0
0x00b8 00184 (append.go:22) CALL runtime.writebarrierptr(SB)
0x00bd 00189 (append.go:22) JMP 154
0x00bf 00191 (append.go:22) NOP
0x00bf 00191 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00c4 00196 (append.go:21) JMP 0
Change-Id: I77a41ad3a22557a4bb4654de7d6d24a029efe34a
Reviewed-on: https://go-review.googlesource.com/21813
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-10 09:44:17 -07:00
|
|
|
// All literals with nonzero fields have already been
|
|
|
|
// rewritten during walk. Any that remain are just T{}
|
|
|
|
// or equivalents. Use the zero value.
|
|
|
|
if !iszero(rhs) {
|
|
|
|
Fatalf("literal with nonzero value in SSA: %v", rhs)
|
|
|
|
}
|
|
|
|
rhs = nil
|
|
|
|
case OAPPEND:
|
|
|
|
// If we're writing the result of an append back to the same slice,
|
|
|
|
// handle it specially to avoid write barriers on the fast (non-growth) path.
|
|
|
|
// If the slice can be SSA'd, it'll be on the stack,
|
|
|
|
// so there will be no write barriers,
|
|
|
|
// so there's no need to attempt to prevent them.
|
2016-10-19 11:47:52 -04:00
|
|
|
if samesafeexpr(n.Left, rhs.List.First()) {
|
|
|
|
if !s.canSSA(n.Left) {
|
|
|
|
if Debug_append > 0 {
|
2016-12-07 17:40:46 -08:00
|
|
|
Warnl(n.Pos, "append: len-only update")
|
2016-10-19 11:47:52 -04:00
|
|
|
}
|
|
|
|
s.append(rhs, true)
|
|
|
|
return
|
|
|
|
} else {
|
|
|
|
if Debug_append > 0 { // replicating old diagnostic message
|
2016-12-07 17:40:46 -08:00
|
|
|
Warnl(n.Pos, "append: len-only update (in local slice)")
|
2016-10-19 11:47:52 -04:00
|
|
|
}
|
|
|
|
}
|
cmd/compile: avoid write barrier in append fast path
When we are writing the result of an append back
to the same slice, we don’t need a write barrier
on the fast path.
This re-implements an optimization that was present
in the old backend.
Updates #14921
Fixes #14969
Sample code:
var x []byte
func p() {
x = append(x, 1, 2, 3)
}
Before:
"".p t=1 size=224 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 199
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x(SB), DX
0x0025 00037 (append.go:19) MOVQ "".x+8(SB), BX
0x002c 00044 (append.go:19) MOVQ BX, "".autotmp_0+64(SP)
0x0031 00049 (append.go:22) LEAQ 3(BX), BP
0x0035 00053 (append.go:22) CMPQ BP, CX
0x0038 00056 (append.go:22) JGT $0, 131
0x003a 00058 (append.go:22) MOVB $1, (DX)(BX*1)
0x003e 00062 (append.go:22) MOVB $2, 1(DX)(BX*1)
0x0043 00067 (append.go:22) MOVB $3, 2(DX)(BX*1)
0x0048 00072 (append.go:22) MOVQ BP, "".x+8(SB)
0x004f 00079 (append.go:22) MOVQ CX, "".x+16(SB)
0x0056 00086 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x005c 00092 (append.go:22) TESTB AL, AL
0x005e 00094 (append.go:22) JNE $0, 108
0x0060 00096 (append.go:22) MOVQ DX, "".x(SB)
0x0067 00103 (append.go:23) ADDQ $72, SP
0x006b 00107 (append.go:23) RET
0x006c 00108 (append.go:22) LEAQ "".x(SB), CX
0x0073 00115 (append.go:22) MOVQ CX, (SP)
0x0077 00119 (append.go:22) MOVQ DX, 8(SP)
0x007c 00124 (append.go:22) PCDATA $0, $0
0x007c 00124 (append.go:22) CALL runtime.writebarrierptr(SB)
0x0081 00129 (append.go:23) JMP 103
0x0083 00131 (append.go:22) LEAQ type.[]uint8(SB), AX
0x008a 00138 (append.go:22) MOVQ AX, (SP)
0x008e 00142 (append.go:22) MOVQ DX, 8(SP)
0x0093 00147 (append.go:22) MOVQ BX, 16(SP)
0x0098 00152 (append.go:22) MOVQ CX, 24(SP)
0x009d 00157 (append.go:22) MOVQ BP, 32(SP)
0x00a2 00162 (append.go:22) PCDATA $0, $0
0x00a2 00162 (append.go:22) CALL runtime.growslice(SB)
0x00a7 00167 (append.go:22) MOVQ 40(SP), DX
0x00ac 00172 (append.go:22) MOVQ 48(SP), AX
0x00b1 00177 (append.go:22) MOVQ 56(SP), CX
0x00b6 00182 (append.go:22) ADDQ $3, AX
0x00ba 00186 (append.go:19) MOVQ "".autotmp_0+64(SP), BX
0x00bf 00191 (append.go:22) MOVQ AX, BP
0x00c2 00194 (append.go:22) JMP 58
0x00c7 00199 (append.go:22) NOP
0x00c7 00199 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00cc 00204 (append.go:21) JMP 0
After:
"".p t=1 size=208 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 191
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x+8(SB), DX
0x0025 00037 (append.go:19) MOVQ DX, "".autotmp_0+64(SP)
0x002a 00042 (append.go:19) MOVQ "".x(SB), BX
0x0031 00049 (append.go:22) LEAQ 3(DX), BP
0x0035 00053 (append.go:22) MOVQ BP, "".x+8(SB)
0x003c 00060 (append.go:22) CMPQ BP, CX
0x003f 00063 (append.go:22) JGT $0, 84
0x0041 00065 (append.go:22) MOVB $1, (BX)(DX*1)
0x0045 00069 (append.go:22) MOVB $2, 1(BX)(DX*1)
0x004a 00074 (append.go:22) MOVB $3, 2(BX)(DX*1)
0x004f 00079 (append.go:23) ADDQ $72, SP
0x0053 00083 (append.go:23) RET
0x0054 00084 (append.go:22) LEAQ type.[]uint8(SB), AX
0x005b 00091 (append.go:22) MOVQ AX, (SP)
0x005f 00095 (append.go:22) MOVQ BX, 8(SP)
0x0064 00100 (append.go:22) MOVQ DX, 16(SP)
0x0069 00105 (append.go:22) MOVQ CX, 24(SP)
0x006e 00110 (append.go:22) MOVQ BP, 32(SP)
0x0073 00115 (append.go:22) PCDATA $0, $0
0x0073 00115 (append.go:22) CALL runtime.growslice(SB)
0x0078 00120 (append.go:22) MOVQ 40(SP), CX
0x007d 00125 (append.go:22) MOVQ 56(SP), AX
0x0082 00130 (append.go:22) MOVQ AX, "".x+16(SB)
0x0089 00137 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x008f 00143 (append.go:22) TESTB AL, AL
0x0091 00145 (append.go:22) JNE $0, 168
0x0093 00147 (append.go:22) MOVQ CX, "".x(SB)
0x009a 00154 (append.go:22) MOVQ "".x(SB), BX
0x00a1 00161 (append.go:19) MOVQ "".autotmp_0+64(SP), DX
0x00a6 00166 (append.go:22) JMP 65
0x00a8 00168 (append.go:22) LEAQ "".x(SB), DX
0x00af 00175 (append.go:22) MOVQ DX, (SP)
0x00b3 00179 (append.go:22) MOVQ CX, 8(SP)
0x00b8 00184 (append.go:22) PCDATA $0, $0
0x00b8 00184 (append.go:22) CALL runtime.writebarrierptr(SB)
0x00bd 00189 (append.go:22) JMP 154
0x00bf 00191 (append.go:22) NOP
0x00bf 00191 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00c4 00196 (append.go:21) JMP 0
Change-Id: I77a41ad3a22557a4bb4654de7d6d24a029efe34a
Reviewed-on: https://go-review.googlesource.com/21813
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-10 09:44:17 -07:00
|
|
|
}
|
2016-01-25 17:06:54 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
var r *ssa.Value
|
|
|
|
deref := !canSSAType(t)
|
|
|
|
if deref {
|
|
|
|
if rhs == nil {
|
|
|
|
r = nil // Signal assign to use OpZero.
|
|
|
|
} else {
|
2017-02-02 19:47:59 -05:00
|
|
|
r = s.addr(rhs, false)
|
2016-01-25 17:06:54 -08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if rhs == nil {
|
|
|
|
r = s.zeroVal(t)
|
2015-09-18 22:12:38 -07:00
|
|
|
} else {
|
2016-01-25 17:06:54 -08:00
|
|
|
r = s.expr(rhs)
|
2015-09-18 22:12:38 -07:00
|
|
|
}
|
2015-08-29 14:54:45 -07:00
|
|
|
}
|
2016-01-25 17:06:54 -08:00
|
|
|
|
2016-03-21 10:22:03 -07:00
|
|
|
var skip skipMask
|
|
|
|
if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) {
|
|
|
|
// We're assigning a slicing operation back to its source.
|
|
|
|
// Don't write back fields we aren't changing. See issue #14855.
|
2016-04-21 11:55:33 -07:00
|
|
|
i, j, k := rhs.SliceBounds()
|
2016-04-01 14:51:02 -07:00
|
|
|
if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64() == 0) {
|
2016-03-21 10:22:03 -07:00
|
|
|
// [0:...] is the same as [:...]
|
|
|
|
i = nil
|
|
|
|
}
|
|
|
|
// TODO: detect defaults for len/cap also.
|
|
|
|
// Currently doesn't really work because (*p)[:len(*p)] appears here as:
|
|
|
|
// tmp = len(*p)
|
|
|
|
// (*p)[:tmp]
|
|
|
|
//if j != nil && (j.Op == OLEN && samesafeexpr(j.Left, n.Left)) {
|
|
|
|
// j = nil
|
|
|
|
//}
|
|
|
|
//if k != nil && (k.Op == OCAP && samesafeexpr(k.Left, n.Left)) {
|
|
|
|
// k = nil
|
|
|
|
//}
|
|
|
|
if i == nil {
|
|
|
|
skip |= skipPtr
|
|
|
|
if j == nil {
|
|
|
|
skip |= skipLen
|
|
|
|
}
|
|
|
|
if k == nil {
|
|
|
|
skip |= skipCap
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-10 10:15:10 -05:00
|
|
|
s.assign(n.Left, r, deref, skip)
|
2015-06-12 14:23:29 +01:00
|
|
|
|
2015-04-15 15:51:25 -07:00
|
|
|
case OIF:
|
|
|
|
bThen := s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
bEnd := s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
var bElse *ssa.Block
|
2016-03-08 15:10:26 -08:00
|
|
|
if n.Rlist.Len() != 0 {
|
2015-04-15 15:51:25 -07:00
|
|
|
bElse = s.f.NewBlock(ssa.BlockPlain)
|
2015-11-02 16:56:53 -08:00
|
|
|
s.condBranch(n.Left, bThen, bElse, n.Likely)
|
|
|
|
} else {
|
|
|
|
s.condBranch(n.Left, bThen, bEnd, n.Likely)
|
2015-04-15 15:51:25 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
s.startBlock(bThen)
|
2016-10-04 09:49:33 -07:00
|
|
|
s.stmtList(n.Nbody)
|
2015-07-20 18:42:45 -07:00
|
|
|
if b := s.endBlock(); b != nil {
|
2015-08-28 21:36:29 -05:00
|
|
|
b.AddEdgeTo(bEnd)
|
2015-04-15 15:51:25 -07:00
|
|
|
}
|
|
|
|
|
2016-03-08 15:10:26 -08:00
|
|
|
if n.Rlist.Len() != 0 {
|
2015-04-15 15:51:25 -07:00
|
|
|
s.startBlock(bElse)
|
2015-06-11 10:20:39 -07:00
|
|
|
s.stmtList(n.Rlist)
|
2015-07-20 18:42:45 -07:00
|
|
|
if b := s.endBlock(); b != nil {
|
2015-08-28 21:36:29 -05:00
|
|
|
b.AddEdgeTo(bEnd)
|
2015-04-15 15:51:25 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
s.startBlock(bEnd)
|
|
|
|
|
|
|
|
case ORETURN:
|
|
|
|
s.stmtList(n.List)
|
2016-02-27 17:49:31 -08:00
|
|
|
s.exit()
|
2015-09-08 21:28:44 -07:00
|
|
|
case ORETJMP:
|
|
|
|
s.stmtList(n.List)
|
2016-02-27 17:49:31 -08:00
|
|
|
b := s.exit()
|
|
|
|
b.Kind = ssa.BlockRetJmp // override BlockRet
|
2017-02-06 13:30:40 -08:00
|
|
|
b.Aux = Linksym(n.Left.Sym)
|
2015-04-15 15:51:25 -07:00
|
|
|
|
2015-07-20 15:39:14 -07:00
|
|
|
case OCONTINUE, OBREAK:
|
|
|
|
var to *ssa.Block
|
|
|
|
if n.Left == nil {
|
|
|
|
// plain break/continue
|
2017-03-14 11:05:03 -07:00
|
|
|
switch n.Op {
|
|
|
|
case OCONTINUE:
|
|
|
|
to = s.continueTo
|
|
|
|
case OBREAK:
|
|
|
|
to = s.breakTo
|
2015-07-20 15:39:14 -07:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// labeled break/continue; look up the target
|
|
|
|
sym := n.Left.Sym
|
|
|
|
lab := s.label(sym)
|
|
|
|
switch n.Op {
|
|
|
|
case OCONTINUE:
|
|
|
|
to = lab.continueTarget
|
|
|
|
case OBREAK:
|
|
|
|
to = lab.breakTarget
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-14 11:05:03 -07:00
|
|
|
b := s.endBlock()
|
|
|
|
b.AddEdgeTo(to)
|
2015-07-20 15:39:14 -07:00
|
|
|
|
2017-02-02 11:53:41 -05:00
|
|
|
case OFOR, OFORUNTIL:
|
2015-07-06 15:29:39 -07:00
|
|
|
// OFOR: for Ninit; Left; Right { Nbody }
|
2017-02-02 11:53:41 -05:00
|
|
|
// For = cond; body; incr
|
|
|
|
// Foruntil = body; incr; cond
|
2015-04-15 15:51:25 -07:00
|
|
|
bCond := s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
bBody := s.f.NewBlock(ssa.BlockPlain)
|
2015-07-06 15:29:39 -07:00
|
|
|
bIncr := s.f.NewBlock(ssa.BlockPlain)
|
2015-04-15 15:51:25 -07:00
|
|
|
bEnd := s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
|
2017-02-02 11:53:41 -05:00
|
|
|
// first, jump to condition test (OFOR) or body (OFORUNTIL)
|
2015-04-15 15:51:25 -07:00
|
|
|
b := s.endBlock()
|
2017-02-02 11:53:41 -05:00
|
|
|
if n.Op == OFOR {
|
|
|
|
b.AddEdgeTo(bCond)
|
|
|
|
// generate code to test condition
|
|
|
|
s.startBlock(bCond)
|
|
|
|
if n.Left != nil {
|
|
|
|
s.condBranch(n.Left, bBody, bEnd, 1)
|
|
|
|
} else {
|
|
|
|
b := s.endBlock()
|
|
|
|
b.Kind = ssa.BlockPlain
|
|
|
|
b.AddEdgeTo(bBody)
|
|
|
|
}
|
2015-04-15 15:51:25 -07:00
|
|
|
|
2015-07-06 15:29:39 -07:00
|
|
|
} else {
|
2015-11-02 16:56:53 -08:00
|
|
|
b.AddEdgeTo(bBody)
|
2015-07-06 15:29:39 -07:00
|
|
|
}
|
2015-04-15 15:51:25 -07:00
|
|
|
|
2015-07-20 15:39:14 -07:00
|
|
|
// set up for continue/break in body
|
|
|
|
prevContinue := s.continueTo
|
|
|
|
prevBreak := s.breakTo
|
|
|
|
s.continueTo = bIncr
|
|
|
|
s.breakTo = bEnd
|
|
|
|
lab := s.labeledNodes[n]
|
|
|
|
if lab != nil {
|
|
|
|
// labeled for loop
|
|
|
|
lab.continueTarget = bIncr
|
|
|
|
lab.breakTarget = bEnd
|
|
|
|
}
|
|
|
|
|
2015-04-15 15:51:25 -07:00
|
|
|
// generate body
|
|
|
|
s.startBlock(bBody)
|
2016-10-04 09:49:33 -07:00
|
|
|
s.stmtList(n.Nbody)
|
2015-07-20 15:39:14 -07:00
|
|
|
|
|
|
|
// tear down continue/break
|
|
|
|
s.continueTo = prevContinue
|
|
|
|
s.breakTo = prevBreak
|
|
|
|
if lab != nil {
|
|
|
|
lab.continueTarget = nil
|
|
|
|
lab.breakTarget = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// done with body, goto incr
|
2015-07-06 15:29:39 -07:00
|
|
|
if b := s.endBlock(); b != nil {
|
2015-08-28 21:36:29 -05:00
|
|
|
b.AddEdgeTo(bIncr)
|
2015-07-06 15:29:39 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// generate incr
|
|
|
|
s.startBlock(bIncr)
|
2015-06-24 17:48:22 -07:00
|
|
|
if n.Right != nil {
|
|
|
|
s.stmt(n.Right)
|
|
|
|
}
|
2015-07-06 15:29:39 -07:00
|
|
|
if b := s.endBlock(); b != nil {
|
2015-08-28 21:36:29 -05:00
|
|
|
b.AddEdgeTo(bCond)
|
2015-07-04 09:07:54 -07:00
|
|
|
}
|
2017-02-02 11:53:41 -05:00
|
|
|
|
|
|
|
if n.Op == OFORUNTIL {
|
|
|
|
// generate code to test condition
|
|
|
|
s.startBlock(bCond)
|
|
|
|
if n.Left != nil {
|
|
|
|
s.condBranch(n.Left, bBody, bEnd, 1)
|
|
|
|
} else {
|
|
|
|
b := s.endBlock()
|
|
|
|
b.Kind = ssa.BlockPlain
|
|
|
|
b.AddEdgeTo(bBody)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-15 15:51:25 -07:00
|
|
|
s.startBlock(bEnd)
|
|
|
|
|
2015-07-20 15:39:14 -07:00
|
|
|
case OSWITCH, OSELECT:
|
|
|
|
// These have been mostly rewritten by the front end into their Nbody fields.
|
|
|
|
// Our main task is to correctly hook up any break statements.
|
|
|
|
bEnd := s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
|
|
|
|
prevBreak := s.breakTo
|
|
|
|
s.breakTo = bEnd
|
|
|
|
lab := s.labeledNodes[n]
|
|
|
|
if lab != nil {
|
|
|
|
// labeled
|
|
|
|
lab.breakTarget = bEnd
|
|
|
|
}
|
|
|
|
|
|
|
|
// generate body code
|
2016-10-04 09:49:33 -07:00
|
|
|
s.stmtList(n.Nbody)
|
2015-07-20 15:39:14 -07:00
|
|
|
|
|
|
|
s.breakTo = prevBreak
|
|
|
|
if lab != nil {
|
|
|
|
lab.breakTarget = nil
|
|
|
|
}
|
|
|
|
|
2017-03-01 15:50:57 -08:00
|
|
|
// walk adds explicit OBREAK nodes to the end of all reachable code paths.
|
|
|
|
// If we still have a current block here, then mark it unreachable.
|
|
|
|
if s.curBlock != nil {
|
|
|
|
m := s.mem()
|
|
|
|
b := s.endBlock()
|
|
|
|
b.Kind = ssa.BlockExit
|
|
|
|
b.SetControl(m)
|
2015-07-20 15:39:14 -07:00
|
|
|
}
|
|
|
|
s.startBlock(bEnd)
|
|
|
|
|
2015-04-15 15:51:25 -07:00
|
|
|
case OVARKILL:
|
2015-08-24 02:16:19 -07:00
|
|
|
// Insert a varkill op to record that a variable is no longer live.
|
|
|
|
// We only care about liveness info at call sites, so putting the
|
|
|
|
// varkill in the store chain is enough to keep it correctly ordered
|
|
|
|
// with respect to call ops.
|
2016-02-27 17:49:31 -08:00
|
|
|
if !s.canSSA(n.Left) {
|
2015-09-19 12:01:39 -07:00
|
|
|
s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, n.Left, s.mem())
|
|
|
|
}
|
2015-08-28 22:51:01 -07:00
|
|
|
|
2016-01-19 09:59:21 -08:00
|
|
|
case OVARLIVE:
|
|
|
|
// Insert a varlive op to record that a variable is still live.
|
2017-02-27 19:56:38 +02:00
|
|
|
if !n.Left.Addrtaken() {
|
2016-09-09 21:08:46 -07:00
|
|
|
s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left)
|
2016-01-19 09:59:21 -08:00
|
|
|
}
|
|
|
|
s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, ssa.TypeMem, n.Left, s.mem())
|
|
|
|
|
2015-09-12 14:06:44 -07:00
|
|
|
case OCHECKNIL:
|
|
|
|
p := s.expr(n.Left)
|
|
|
|
s.nilCheck(p)
|
|
|
|
|
2015-04-15 15:51:25 -07:00
|
|
|
default:
|
2016-09-14 10:01:05 -07:00
|
|
|
s.Fatalf("unhandled stmt %v", n.Op)
|
2015-04-15 15:51:25 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-27 17:49:31 -08:00
|
|
|
// exit processes any code that needs to be generated just before returning.
|
2016-03-01 23:21:55 +00:00
|
|
|
// It returns a BlockRet block that ends the control flow. Its control value
|
2016-02-27 17:49:31 -08:00
|
|
|
// will be set to the final memory state.
|
|
|
|
func (s *state) exit() *ssa.Block {
|
2017-03-15 22:55:21 -07:00
|
|
|
if s.hasdefer {
|
2016-03-09 19:27:57 -08:00
|
|
|
s.rtcall(Deferreturn, true, nil)
|
|
|
|
}
|
|
|
|
|
2016-03-01 23:21:55 +00:00
|
|
|
// Run exit code. Typically, this code copies heap-allocated PPARAMOUT
|
2016-02-27 17:49:31 -08:00
|
|
|
// variables back to the stack.
|
2016-10-04 09:49:33 -07:00
|
|
|
s.stmtList(s.exitCode)
|
2016-02-27 17:49:31 -08:00
|
|
|
|
|
|
|
// Store SSAable PPARAMOUT variables back to stack locations.
|
|
|
|
for _, n := range s.returns {
|
2016-04-21 19:28:28 -07:00
|
|
|
addr := s.decladdrs[n]
|
2016-02-27 17:49:31 -08:00
|
|
|
val := s.variable(n, n.Type)
|
|
|
|
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, n, s.mem())
|
2017-03-13 21:51:08 -04:00
|
|
|
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, n.Type, addr, val, s.mem())
|
2016-02-27 17:49:31 -08:00
|
|
|
// TODO: if val is ever spilled, we'd like to use the
|
2016-03-01 23:21:55 +00:00
|
|
|
// PPARAMOUT slot for spilling it. That won't happen
|
2016-02-27 17:49:31 -08:00
|
|
|
// currently.
|
|
|
|
}
|
|
|
|
|
|
|
|
// Do actual return.
|
|
|
|
m := s.mem()
|
|
|
|
b := s.endBlock()
|
|
|
|
b.Kind = ssa.BlockRet
|
2016-03-15 20:45:50 -07:00
|
|
|
b.SetControl(m)
|
2016-02-27 17:49:31 -08:00
|
|
|
return b
|
|
|
|
}
|
|
|
|
|
2015-07-19 15:48:20 -07:00
|
|
|
type opAndType struct {
|
2015-11-16 13:20:16 -08:00
|
|
|
op Op
|
|
|
|
etype EType
|
2015-07-19 15:48:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
var opToSSA = map[opAndType]ssa.Op{
|
2015-08-12 16:38:11 -04:00
|
|
|
opAndType{OADD, TINT8}: ssa.OpAdd8,
|
|
|
|
opAndType{OADD, TUINT8}: ssa.OpAdd8,
|
|
|
|
opAndType{OADD, TINT16}: ssa.OpAdd16,
|
|
|
|
opAndType{OADD, TUINT16}: ssa.OpAdd16,
|
|
|
|
opAndType{OADD, TINT32}: ssa.OpAdd32,
|
|
|
|
opAndType{OADD, TUINT32}: ssa.OpAdd32,
|
|
|
|
opAndType{OADD, TPTR32}: ssa.OpAdd32,
|
|
|
|
opAndType{OADD, TINT64}: ssa.OpAdd64,
|
|
|
|
opAndType{OADD, TUINT64}: ssa.OpAdd64,
|
|
|
|
opAndType{OADD, TPTR64}: ssa.OpAdd64,
|
|
|
|
opAndType{OADD, TFLOAT32}: ssa.OpAdd32F,
|
|
|
|
opAndType{OADD, TFLOAT64}: ssa.OpAdd64F,
|
|
|
|
|
|
|
|
opAndType{OSUB, TINT8}: ssa.OpSub8,
|
|
|
|
opAndType{OSUB, TUINT8}: ssa.OpSub8,
|
|
|
|
opAndType{OSUB, TINT16}: ssa.OpSub16,
|
|
|
|
opAndType{OSUB, TUINT16}: ssa.OpSub16,
|
|
|
|
opAndType{OSUB, TINT32}: ssa.OpSub32,
|
|
|
|
opAndType{OSUB, TUINT32}: ssa.OpSub32,
|
|
|
|
opAndType{OSUB, TINT64}: ssa.OpSub64,
|
|
|
|
opAndType{OSUB, TUINT64}: ssa.OpSub64,
|
|
|
|
opAndType{OSUB, TFLOAT32}: ssa.OpSub32F,
|
|
|
|
opAndType{OSUB, TFLOAT64}: ssa.OpSub64F,
|
2015-07-19 15:48:20 -07:00
|
|
|
|
2015-07-22 19:19:40 -07:00
|
|
|
opAndType{ONOT, TBOOL}: ssa.OpNot,
|
|
|
|
|
2015-08-28 14:24:10 -04:00
|
|
|
opAndType{OMINUS, TINT8}: ssa.OpNeg8,
|
|
|
|
opAndType{OMINUS, TUINT8}: ssa.OpNeg8,
|
|
|
|
opAndType{OMINUS, TINT16}: ssa.OpNeg16,
|
|
|
|
opAndType{OMINUS, TUINT16}: ssa.OpNeg16,
|
|
|
|
opAndType{OMINUS, TINT32}: ssa.OpNeg32,
|
|
|
|
opAndType{OMINUS, TUINT32}: ssa.OpNeg32,
|
|
|
|
opAndType{OMINUS, TINT64}: ssa.OpNeg64,
|
|
|
|
opAndType{OMINUS, TUINT64}: ssa.OpNeg64,
|
|
|
|
opAndType{OMINUS, TFLOAT32}: ssa.OpNeg32F,
|
|
|
|
opAndType{OMINUS, TFLOAT64}: ssa.OpNeg64F,
|
2015-07-21 16:58:18 +02:00
|
|
|
|
2015-07-29 17:07:09 -07:00
|
|
|
opAndType{OCOM, TINT8}: ssa.OpCom8,
|
|
|
|
opAndType{OCOM, TUINT8}: ssa.OpCom8,
|
|
|
|
opAndType{OCOM, TINT16}: ssa.OpCom16,
|
|
|
|
opAndType{OCOM, TUINT16}: ssa.OpCom16,
|
|
|
|
opAndType{OCOM, TINT32}: ssa.OpCom32,
|
|
|
|
opAndType{OCOM, TUINT32}: ssa.OpCom32,
|
|
|
|
opAndType{OCOM, TINT64}: ssa.OpCom64,
|
|
|
|
opAndType{OCOM, TUINT64}: ssa.OpCom64,
|
|
|
|
|
2015-09-06 19:24:59 -07:00
|
|
|
opAndType{OIMAG, TCOMPLEX64}: ssa.OpComplexImag,
|
|
|
|
opAndType{OIMAG, TCOMPLEX128}: ssa.OpComplexImag,
|
|
|
|
opAndType{OREAL, TCOMPLEX64}: ssa.OpComplexReal,
|
|
|
|
opAndType{OREAL, TCOMPLEX128}: ssa.OpComplexReal,
|
|
|
|
|
2015-08-12 16:38:11 -04:00
|
|
|
opAndType{OMUL, TINT8}: ssa.OpMul8,
|
|
|
|
opAndType{OMUL, TUINT8}: ssa.OpMul8,
|
|
|
|
opAndType{OMUL, TINT16}: ssa.OpMul16,
|
|
|
|
opAndType{OMUL, TUINT16}: ssa.OpMul16,
|
|
|
|
opAndType{OMUL, TINT32}: ssa.OpMul32,
|
|
|
|
opAndType{OMUL, TUINT32}: ssa.OpMul32,
|
|
|
|
opAndType{OMUL, TINT64}: ssa.OpMul64,
|
|
|
|
opAndType{OMUL, TUINT64}: ssa.OpMul64,
|
|
|
|
opAndType{OMUL, TFLOAT32}: ssa.OpMul32F,
|
|
|
|
opAndType{OMUL, TFLOAT64}: ssa.OpMul64F,
|
|
|
|
|
|
|
|
opAndType{ODIV, TFLOAT32}: ssa.OpDiv32F,
|
|
|
|
opAndType{ODIV, TFLOAT64}: ssa.OpDiv64F,
|
2015-07-22 13:46:15 -07:00
|
|
|
|
2015-08-17 17:46:06 -05:00
|
|
|
opAndType{ODIV, TINT8}: ssa.OpDiv8,
|
|
|
|
opAndType{ODIV, TUINT8}: ssa.OpDiv8u,
|
|
|
|
opAndType{ODIV, TINT16}: ssa.OpDiv16,
|
|
|
|
opAndType{ODIV, TUINT16}: ssa.OpDiv16u,
|
|
|
|
opAndType{ODIV, TINT32}: ssa.OpDiv32,
|
|
|
|
opAndType{ODIV, TUINT32}: ssa.OpDiv32u,
|
|
|
|
opAndType{ODIV, TINT64}: ssa.OpDiv64,
|
|
|
|
opAndType{ODIV, TUINT64}: ssa.OpDiv64u,
|
|
|
|
|
2015-08-18 19:51:44 -05:00
|
|
|
opAndType{OMOD, TINT8}: ssa.OpMod8,
|
|
|
|
opAndType{OMOD, TUINT8}: ssa.OpMod8u,
|
|
|
|
opAndType{OMOD, TINT16}: ssa.OpMod16,
|
|
|
|
opAndType{OMOD, TUINT16}: ssa.OpMod16u,
|
|
|
|
opAndType{OMOD, TINT32}: ssa.OpMod32,
|
|
|
|
opAndType{OMOD, TUINT32}: ssa.OpMod32u,
|
|
|
|
opAndType{OMOD, TINT64}: ssa.OpMod64,
|
|
|
|
opAndType{OMOD, TUINT64}: ssa.OpMod64u,
|
|
|
|
|
2015-07-28 14:58:49 +02:00
|
|
|
opAndType{OAND, TINT8}: ssa.OpAnd8,
|
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled
For integer types less than a machine register, we have to decide
what the invariants are for the high bits of the register. We used
to set the high bits to the correct extension (sign or zero, as
determined by the type) of the low bits.
This CL makes the compiler ignore the high bits of the register
altogether (they are junk).
On this plus side, this means ops that generate subword results don't
have to worry about correctly extending them. On the minus side,
ops that consume subword arguments have to deal with the input
registers not being correctly extended.
For x86, this tradeoff is probably worth it. Almost all opcodes
have versions that use only the correct subword piece of their
inputs. (The one big exception is array indexing.) Not many opcodes
can correctly sign extend on output.
For other architectures, the tradeoff is probably not so clear, as
they don't have many subword-safe opcodes (e.g. 16-bit compare,
ignoring the high 16/48 bits). Fortunately we can decide whether
we do this per-architecture.
For the machine-independent opcodes, we pretend that the "register"
size is equal to the type width, so sign extension is immaterial.
Opcodes that care about the signedness of the input (e.g. compare,
right shift) have two different variants.
Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d
Reviewed-on: https://go-review.googlesource.com/12600
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
|
|
|
opAndType{OAND, TUINT8}: ssa.OpAnd8,
|
2015-07-28 14:58:49 +02:00
|
|
|
opAndType{OAND, TINT16}: ssa.OpAnd16,
|
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled
For integer types less than a machine register, we have to decide
what the invariants are for the high bits of the register. We used
to set the high bits to the correct extension (sign or zero, as
determined by the type) of the low bits.
This CL makes the compiler ignore the high bits of the register
altogether (they are junk).
On this plus side, this means ops that generate subword results don't
have to worry about correctly extending them. On the minus side,
ops that consume subword arguments have to deal with the input
registers not being correctly extended.
For x86, this tradeoff is probably worth it. Almost all opcodes
have versions that use only the correct subword piece of their
inputs. (The one big exception is array indexing.) Not many opcodes
can correctly sign extend on output.
For other architectures, the tradeoff is probably not so clear, as
they don't have many subword-safe opcodes (e.g. 16-bit compare,
ignoring the high 16/48 bits). Fortunately we can decide whether
we do this per-architecture.
For the machine-independent opcodes, we pretend that the "register"
size is equal to the type width, so sign extension is immaterial.
Opcodes that care about the signedness of the input (e.g. compare,
right shift) have two different variants.
Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d
Reviewed-on: https://go-review.googlesource.com/12600
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
|
|
|
opAndType{OAND, TUINT16}: ssa.OpAnd16,
|
2015-07-28 14:58:49 +02:00
|
|
|
opAndType{OAND, TINT32}: ssa.OpAnd32,
|
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled
For integer types less than a machine register, we have to decide
what the invariants are for the high bits of the register. We used
to set the high bits to the correct extension (sign or zero, as
determined by the type) of the low bits.
This CL makes the compiler ignore the high bits of the register
altogether (they are junk).
On this plus side, this means ops that generate subword results don't
have to worry about correctly extending them. On the minus side,
ops that consume subword arguments have to deal with the input
registers not being correctly extended.
For x86, this tradeoff is probably worth it. Almost all opcodes
have versions that use only the correct subword piece of their
inputs. (The one big exception is array indexing.) Not many opcodes
can correctly sign extend on output.
For other architectures, the tradeoff is probably not so clear, as
they don't have many subword-safe opcodes (e.g. 16-bit compare,
ignoring the high 16/48 bits). Fortunately we can decide whether
we do this per-architecture.
For the machine-independent opcodes, we pretend that the "register"
size is equal to the type width, so sign extension is immaterial.
Opcodes that care about the signedness of the input (e.g. compare,
right shift) have two different variants.
Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d
Reviewed-on: https://go-review.googlesource.com/12600
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
|
|
|
opAndType{OAND, TUINT32}: ssa.OpAnd32,
|
2015-07-28 14:58:49 +02:00
|
|
|
opAndType{OAND, TINT64}: ssa.OpAnd64,
|
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled
For integer types less than a machine register, we have to decide
what the invariants are for the high bits of the register. We used
to set the high bits to the correct extension (sign or zero, as
determined by the type) of the low bits.
This CL makes the compiler ignore the high bits of the register
altogether (they are junk).
On this plus side, this means ops that generate subword results don't
have to worry about correctly extending them. On the minus side,
ops that consume subword arguments have to deal with the input
registers not being correctly extended.
For x86, this tradeoff is probably worth it. Almost all opcodes
have versions that use only the correct subword piece of their
inputs. (The one big exception is array indexing.) Not many opcodes
can correctly sign extend on output.
For other architectures, the tradeoff is probably not so clear, as
they don't have many subword-safe opcodes (e.g. 16-bit compare,
ignoring the high 16/48 bits). Fortunately we can decide whether
we do this per-architecture.
For the machine-independent opcodes, we pretend that the "register"
size is equal to the type width, so sign extension is immaterial.
Opcodes that care about the signedness of the input (e.g. compare,
right shift) have two different variants.
Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d
Reviewed-on: https://go-review.googlesource.com/12600
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
|
|
|
opAndType{OAND, TUINT64}: ssa.OpAnd64,
|
2015-07-28 14:58:49 +02:00
|
|
|
|
2015-07-29 17:52:25 +02:00
|
|
|
opAndType{OOR, TINT8}: ssa.OpOr8,
|
|
|
|
opAndType{OOR, TUINT8}: ssa.OpOr8,
|
|
|
|
opAndType{OOR, TINT16}: ssa.OpOr16,
|
|
|
|
opAndType{OOR, TUINT16}: ssa.OpOr16,
|
|
|
|
opAndType{OOR, TINT32}: ssa.OpOr32,
|
|
|
|
opAndType{OOR, TUINT32}: ssa.OpOr32,
|
|
|
|
opAndType{OOR, TINT64}: ssa.OpOr64,
|
|
|
|
opAndType{OOR, TUINT64}: ssa.OpOr64,
|
|
|
|
|
2015-07-30 12:33:36 +02:00
|
|
|
opAndType{OXOR, TINT8}: ssa.OpXor8,
|
|
|
|
opAndType{OXOR, TUINT8}: ssa.OpXor8,
|
|
|
|
opAndType{OXOR, TINT16}: ssa.OpXor16,
|
|
|
|
opAndType{OXOR, TUINT16}: ssa.OpXor16,
|
|
|
|
opAndType{OXOR, TINT32}: ssa.OpXor32,
|
|
|
|
opAndType{OXOR, TUINT32}: ssa.OpXor32,
|
|
|
|
opAndType{OXOR, TINT64}: ssa.OpXor64,
|
|
|
|
opAndType{OXOR, TUINT64}: ssa.OpXor64,
|
|
|
|
|
2016-04-24 21:21:07 +02:00
|
|
|
opAndType{OEQ, TBOOL}: ssa.OpEqB,
|
2015-07-28 14:14:25 -07:00
|
|
|
opAndType{OEQ, TINT8}: ssa.OpEq8,
|
|
|
|
opAndType{OEQ, TUINT8}: ssa.OpEq8,
|
|
|
|
opAndType{OEQ, TINT16}: ssa.OpEq16,
|
|
|
|
opAndType{OEQ, TUINT16}: ssa.OpEq16,
|
|
|
|
opAndType{OEQ, TINT32}: ssa.OpEq32,
|
|
|
|
opAndType{OEQ, TUINT32}: ssa.OpEq32,
|
|
|
|
opAndType{OEQ, TINT64}: ssa.OpEq64,
|
|
|
|
opAndType{OEQ, TUINT64}: ssa.OpEq64,
|
2015-09-10 13:53:27 -07:00
|
|
|
opAndType{OEQ, TINTER}: ssa.OpEqInter,
|
2016-04-18 14:02:08 -07:00
|
|
|
opAndType{OEQ, TSLICE}: ssa.OpEqSlice,
|
2015-07-28 14:14:25 -07:00
|
|
|
opAndType{OEQ, TFUNC}: ssa.OpEqPtr,
|
|
|
|
opAndType{OEQ, TMAP}: ssa.OpEqPtr,
|
|
|
|
opAndType{OEQ, TCHAN}: ssa.OpEqPtr,
|
2016-05-13 11:25:07 -04:00
|
|
|
opAndType{OEQ, TPTR32}: ssa.OpEqPtr,
|
2015-08-30 20:47:26 -05:00
|
|
|
opAndType{OEQ, TPTR64}: ssa.OpEqPtr,
|
2015-07-28 14:14:25 -07:00
|
|
|
opAndType{OEQ, TUINTPTR}: ssa.OpEqPtr,
|
|
|
|
opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr,
|
2015-08-18 14:39:26 -04:00
|
|
|
opAndType{OEQ, TFLOAT64}: ssa.OpEq64F,
|
|
|
|
opAndType{OEQ, TFLOAT32}: ssa.OpEq32F,
|
2015-07-28 14:14:25 -07:00
|
|
|
|
2016-04-24 21:21:07 +02:00
|
|
|
opAndType{ONE, TBOOL}: ssa.OpNeqB,
|
2015-07-28 14:14:25 -07:00
|
|
|
opAndType{ONE, TINT8}: ssa.OpNeq8,
|
|
|
|
opAndType{ONE, TUINT8}: ssa.OpNeq8,
|
|
|
|
opAndType{ONE, TINT16}: ssa.OpNeq16,
|
|
|
|
opAndType{ONE, TUINT16}: ssa.OpNeq16,
|
|
|
|
opAndType{ONE, TINT32}: ssa.OpNeq32,
|
|
|
|
opAndType{ONE, TUINT32}: ssa.OpNeq32,
|
|
|
|
opAndType{ONE, TINT64}: ssa.OpNeq64,
|
|
|
|
opAndType{ONE, TUINT64}: ssa.OpNeq64,
|
2015-09-10 13:53:27 -07:00
|
|
|
opAndType{ONE, TINTER}: ssa.OpNeqInter,
|
2016-04-18 14:02:08 -07:00
|
|
|
opAndType{ONE, TSLICE}: ssa.OpNeqSlice,
|
2015-07-28 14:14:25 -07:00
|
|
|
opAndType{ONE, TFUNC}: ssa.OpNeqPtr,
|
|
|
|
opAndType{ONE, TMAP}: ssa.OpNeqPtr,
|
|
|
|
opAndType{ONE, TCHAN}: ssa.OpNeqPtr,
|
2016-05-13 11:25:07 -04:00
|
|
|
opAndType{ONE, TPTR32}: ssa.OpNeqPtr,
|
2015-08-30 20:47:26 -05:00
|
|
|
opAndType{ONE, TPTR64}: ssa.OpNeqPtr,
|
2015-07-28 14:14:25 -07:00
|
|
|
opAndType{ONE, TUINTPTR}: ssa.OpNeqPtr,
|
|
|
|
opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr,
|
2015-08-18 14:39:26 -04:00
|
|
|
opAndType{ONE, TFLOAT64}: ssa.OpNeq64F,
|
|
|
|
opAndType{ONE, TFLOAT32}: ssa.OpNeq32F,
|
|
|
|
|
|
|
|
opAndType{OLT, TINT8}: ssa.OpLess8,
|
|
|
|
opAndType{OLT, TUINT8}: ssa.OpLess8U,
|
|
|
|
opAndType{OLT, TINT16}: ssa.OpLess16,
|
|
|
|
opAndType{OLT, TUINT16}: ssa.OpLess16U,
|
|
|
|
opAndType{OLT, TINT32}: ssa.OpLess32,
|
|
|
|
opAndType{OLT, TUINT32}: ssa.OpLess32U,
|
|
|
|
opAndType{OLT, TINT64}: ssa.OpLess64,
|
|
|
|
opAndType{OLT, TUINT64}: ssa.OpLess64U,
|
|
|
|
opAndType{OLT, TFLOAT64}: ssa.OpLess64F,
|
|
|
|
opAndType{OLT, TFLOAT32}: ssa.OpLess32F,
|
|
|
|
|
|
|
|
opAndType{OGT, TINT8}: ssa.OpGreater8,
|
|
|
|
opAndType{OGT, TUINT8}: ssa.OpGreater8U,
|
|
|
|
opAndType{OGT, TINT16}: ssa.OpGreater16,
|
|
|
|
opAndType{OGT, TUINT16}: ssa.OpGreater16U,
|
|
|
|
opAndType{OGT, TINT32}: ssa.OpGreater32,
|
|
|
|
opAndType{OGT, TUINT32}: ssa.OpGreater32U,
|
|
|
|
opAndType{OGT, TINT64}: ssa.OpGreater64,
|
|
|
|
opAndType{OGT, TUINT64}: ssa.OpGreater64U,
|
|
|
|
opAndType{OGT, TFLOAT64}: ssa.OpGreater64F,
|
|
|
|
opAndType{OGT, TFLOAT32}: ssa.OpGreater32F,
|
|
|
|
|
|
|
|
opAndType{OLE, TINT8}: ssa.OpLeq8,
|
|
|
|
opAndType{OLE, TUINT8}: ssa.OpLeq8U,
|
|
|
|
opAndType{OLE, TINT16}: ssa.OpLeq16,
|
|
|
|
opAndType{OLE, TUINT16}: ssa.OpLeq16U,
|
|
|
|
opAndType{OLE, TINT32}: ssa.OpLeq32,
|
|
|
|
opAndType{OLE, TUINT32}: ssa.OpLeq32U,
|
|
|
|
opAndType{OLE, TINT64}: ssa.OpLeq64,
|
|
|
|
opAndType{OLE, TUINT64}: ssa.OpLeq64U,
|
|
|
|
opAndType{OLE, TFLOAT64}: ssa.OpLeq64F,
|
|
|
|
opAndType{OLE, TFLOAT32}: ssa.OpLeq32F,
|
|
|
|
|
|
|
|
opAndType{OGE, TINT8}: ssa.OpGeq8,
|
|
|
|
opAndType{OGE, TUINT8}: ssa.OpGeq8U,
|
|
|
|
opAndType{OGE, TINT16}: ssa.OpGeq16,
|
|
|
|
opAndType{OGE, TUINT16}: ssa.OpGeq16U,
|
|
|
|
opAndType{OGE, TINT32}: ssa.OpGeq32,
|
|
|
|
opAndType{OGE, TUINT32}: ssa.OpGeq32U,
|
|
|
|
opAndType{OGE, TINT64}: ssa.OpGeq64,
|
|
|
|
opAndType{OGE, TUINT64}: ssa.OpGeq64U,
|
|
|
|
opAndType{OGE, TFLOAT64}: ssa.OpGeq64F,
|
|
|
|
opAndType{OGE, TFLOAT32}: ssa.OpGeq32F,
|
2015-07-19 15:48:20 -07:00
|
|
|
}
|
|
|
|
|
2015-11-16 13:20:16 -08:00
|
|
|
func (s *state) concreteEtype(t *Type) EType {
|
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled
For integer types less than a machine register, we have to decide
what the invariants are for the high bits of the register. We used
to set the high bits to the correct extension (sign or zero, as
determined by the type) of the low bits.
This CL makes the compiler ignore the high bits of the register
altogether (they are junk).
On this plus side, this means ops that generate subword results don't
have to worry about correctly extending them. On the minus side,
ops that consume subword arguments have to deal with the input
registers not being correctly extended.
For x86, this tradeoff is probably worth it. Almost all opcodes
have versions that use only the correct subword piece of their
inputs. (The one big exception is array indexing.) Not many opcodes
can correctly sign extend on output.
For other architectures, the tradeoff is probably not so clear, as
they don't have many subword-safe opcodes (e.g. 16-bit compare,
ignoring the high 16/48 bits). Fortunately we can decide whether
we do this per-architecture.
For the machine-independent opcodes, we pretend that the "register"
size is equal to the type width, so sign extension is immaterial.
Opcodes that care about the signedness of the input (e.g. compare,
right shift) have two different variants.
Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d
Reviewed-on: https://go-review.googlesource.com/12600
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
|
|
|
e := t.Etype
|
|
|
|
switch e {
|
|
|
|
default:
|
|
|
|
return e
|
2015-07-19 15:48:20 -07:00
|
|
|
case TINT:
|
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled
For integer types less than a machine register, we have to decide
what the invariants are for the high bits of the register. We used
to set the high bits to the correct extension (sign or zero, as
determined by the type) of the low bits.
This CL makes the compiler ignore the high bits of the register
altogether (they are junk).
On this plus side, this means ops that generate subword results don't
have to worry about correctly extending them. On the minus side,
ops that consume subword arguments have to deal with the input
registers not being correctly extended.
For x86, this tradeoff is probably worth it. Almost all opcodes
have versions that use only the correct subword piece of their
inputs. (The one big exception is array indexing.) Not many opcodes
can correctly sign extend on output.
For other architectures, the tradeoff is probably not so clear, as
they don't have many subword-safe opcodes (e.g. 16-bit compare,
ignoring the high 16/48 bits). Fortunately we can decide whether
we do this per-architecture.
For the machine-independent opcodes, we pretend that the "register"
size is equal to the type width, so sign extension is immaterial.
Opcodes that care about the signedness of the input (e.g. compare,
right shift) have two different variants.
Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d
Reviewed-on: https://go-review.googlesource.com/12600
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
|
|
|
if s.config.IntSize == 8 {
|
|
|
|
return TINT64
|
2015-07-19 15:48:20 -07:00
|
|
|
}
|
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled
For integer types less than a machine register, we have to decide
what the invariants are for the high bits of the register. We used
to set the high bits to the correct extension (sign or zero, as
determined by the type) of the low bits.
This CL makes the compiler ignore the high bits of the register
altogether (they are junk).
On this plus side, this means ops that generate subword results don't
have to worry about correctly extending them. On the minus side,
ops that consume subword arguments have to deal with the input
registers not being correctly extended.
For x86, this tradeoff is probably worth it. Almost all opcodes
have versions that use only the correct subword piece of their
inputs. (The one big exception is array indexing.) Not many opcodes
can correctly sign extend on output.
For other architectures, the tradeoff is probably not so clear, as
they don't have many subword-safe opcodes (e.g. 16-bit compare,
ignoring the high 16/48 bits). Fortunately we can decide whether
we do this per-architecture.
For the machine-independent opcodes, we pretend that the "register"
size is equal to the type width, so sign extension is immaterial.
Opcodes that care about the signedness of the input (e.g. compare,
right shift) have two different variants.
Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d
Reviewed-on: https://go-review.googlesource.com/12600
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
|
|
|
return TINT32
|
2015-07-19 15:48:20 -07:00
|
|
|
case TUINT:
|
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled
For integer types less than a machine register, we have to decide
what the invariants are for the high bits of the register. We used
to set the high bits to the correct extension (sign or zero, as
determined by the type) of the low bits.
This CL makes the compiler ignore the high bits of the register
altogether (they are junk).
On this plus side, this means ops that generate subword results don't
have to worry about correctly extending them. On the minus side,
ops that consume subword arguments have to deal with the input
registers not being correctly extended.
For x86, this tradeoff is probably worth it. Almost all opcodes
have versions that use only the correct subword piece of their
inputs. (The one big exception is array indexing.) Not many opcodes
can correctly sign extend on output.
For other architectures, the tradeoff is probably not so clear, as
they don't have many subword-safe opcodes (e.g. 16-bit compare,
ignoring the high 16/48 bits). Fortunately we can decide whether
we do this per-architecture.
For the machine-independent opcodes, we pretend that the "register"
size is equal to the type width, so sign extension is immaterial.
Opcodes that care about the signedness of the input (e.g. compare,
right shift) have two different variants.
Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d
Reviewed-on: https://go-review.googlesource.com/12600
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
|
|
|
if s.config.IntSize == 8 {
|
|
|
|
return TUINT64
|
|
|
|
}
|
|
|
|
return TUINT32
|
|
|
|
case TUINTPTR:
|
2015-07-19 15:48:20 -07:00
|
|
|
if s.config.PtrSize == 8 {
|
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled
For integer types less than a machine register, we have to decide
what the invariants are for the high bits of the register. We used
to set the high bits to the correct extension (sign or zero, as
determined by the type) of the low bits.
This CL makes the compiler ignore the high bits of the register
altogether (they are junk).
On this plus side, this means ops that generate subword results don't
have to worry about correctly extending them. On the minus side,
ops that consume subword arguments have to deal with the input
registers not being correctly extended.
For x86, this tradeoff is probably worth it. Almost all opcodes
have versions that use only the correct subword piece of their
inputs. (The one big exception is array indexing.) Not many opcodes
can correctly sign extend on output.
For other architectures, the tradeoff is probably not so clear, as
they don't have many subword-safe opcodes (e.g. 16-bit compare,
ignoring the high 16/48 bits). Fortunately we can decide whether
we do this per-architecture.
For the machine-independent opcodes, we pretend that the "register"
size is equal to the type width, so sign extension is immaterial.
Opcodes that care about the signedness of the input (e.g. compare,
right shift) have two different variants.
Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d
Reviewed-on: https://go-review.googlesource.com/12600
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
|
|
|
return TUINT64
|
2015-07-19 15:48:20 -07:00
|
|
|
}
|
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled
For integer types less than a machine register, we have to decide
what the invariants are for the high bits of the register. We used
to set the high bits to the correct extension (sign or zero, as
determined by the type) of the low bits.
This CL makes the compiler ignore the high bits of the register
altogether (they are junk).
On this plus side, this means ops that generate subword results don't
have to worry about correctly extending them. On the minus side,
ops that consume subword arguments have to deal with the input
registers not being correctly extended.
For x86, this tradeoff is probably worth it. Almost all opcodes
have versions that use only the correct subword piece of their
inputs. (The one big exception is array indexing.) Not many opcodes
can correctly sign extend on output.
For other architectures, the tradeoff is probably not so clear, as
they don't have many subword-safe opcodes (e.g. 16-bit compare,
ignoring the high 16/48 bits). Fortunately we can decide whether
we do this per-architecture.
For the machine-independent opcodes, we pretend that the "register"
size is equal to the type width, so sign extension is immaterial.
Opcodes that care about the signedness of the input (e.g. compare,
right shift) have two different variants.
Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d
Reviewed-on: https://go-review.googlesource.com/12600
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
|
|
|
return TUINT32
|
2015-07-19 15:48:20 -07:00
|
|
|
}
|
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled
For integer types less than a machine register, we have to decide
what the invariants are for the high bits of the register. We used
to set the high bits to the correct extension (sign or zero, as
determined by the type) of the low bits.
This CL makes the compiler ignore the high bits of the register
altogether (they are junk).
On this plus side, this means ops that generate subword results don't
have to worry about correctly extending them. On the minus side,
ops that consume subword arguments have to deal with the input
registers not being correctly extended.
For x86, this tradeoff is probably worth it. Almost all opcodes
have versions that use only the correct subword piece of their
inputs. (The one big exception is array indexing.) Not many opcodes
can correctly sign extend on output.
For other architectures, the tradeoff is probably not so clear, as
they don't have many subword-safe opcodes (e.g. 16-bit compare,
ignoring the high 16/48 bits). Fortunately we can decide whether
we do this per-architecture.
For the machine-independent opcodes, we pretend that the "register"
size is equal to the type width, so sign extension is immaterial.
Opcodes that care about the signedness of the input (e.g. compare,
right shift) have two different variants.
Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d
Reviewed-on: https://go-review.googlesource.com/12600
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
|
|
|
}
|
|
|
|
|
2015-11-16 13:20:16 -08:00
|
|
|
func (s *state) ssaOp(op Op, t *Type) ssa.Op {
|
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled
For integer types less than a machine register, we have to decide
what the invariants are for the high bits of the register. We used
to set the high bits to the correct extension (sign or zero, as
determined by the type) of the low bits.
This CL makes the compiler ignore the high bits of the register
altogether (they are junk).
On this plus side, this means ops that generate subword results don't
have to worry about correctly extending them. On the minus side,
ops that consume subword arguments have to deal with the input
registers not being correctly extended.
For x86, this tradeoff is probably worth it. Almost all opcodes
have versions that use only the correct subword piece of their
inputs. (The one big exception is array indexing.) Not many opcodes
can correctly sign extend on output.
For other architectures, the tradeoff is probably not so clear, as
they don't have many subword-safe opcodes (e.g. 16-bit compare,
ignoring the high 16/48 bits). Fortunately we can decide whether
we do this per-architecture.
For the machine-independent opcodes, we pretend that the "register"
size is equal to the type width, so sign extension is immaterial.
Opcodes that care about the signedness of the input (e.g. compare,
right shift) have two different variants.
Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d
Reviewed-on: https://go-review.googlesource.com/12600
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
|
|
|
etype := s.concreteEtype(t)
|
2015-07-19 15:48:20 -07:00
|
|
|
x, ok := opToSSA[opAndType{op, etype}]
|
|
|
|
if !ok {
|
2016-09-14 10:01:05 -07:00
|
|
|
s.Fatalf("unhandled binary op %v %s", op, etype)
|
2015-07-19 15:48:20 -07:00
|
|
|
}
|
|
|
|
return x
|
2015-06-24 17:48:22 -07:00
|
|
|
}
|
|
|
|
|
2015-08-28 14:24:10 -04:00
|
|
|
func floatForComplex(t *Type) *Type {
|
|
|
|
if t.Size() == 8 {
|
|
|
|
return Types[TFLOAT32]
|
|
|
|
} else {
|
|
|
|
return Types[TFLOAT64]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-29 17:07:09 -07:00
|
|
|
type opAndTwoTypes struct {
|
2015-11-16 13:20:16 -08:00
|
|
|
op Op
|
|
|
|
etype1 EType
|
|
|
|
etype2 EType
|
2015-07-29 17:07:09 -07:00
|
|
|
}
|
|
|
|
|
2015-09-01 17:09:00 -04:00
|
|
|
type twoTypes struct {
|
2015-11-16 13:20:16 -08:00
|
|
|
etype1 EType
|
|
|
|
etype2 EType
|
2015-09-01 17:09:00 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
type twoOpsAndType struct {
|
|
|
|
op1 ssa.Op
|
|
|
|
op2 ssa.Op
|
2015-11-16 13:20:16 -08:00
|
|
|
intermediateType EType
|
2015-09-01 17:09:00 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
|
|
|
|
|
|
|
|
twoTypes{TINT8, TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, TINT32},
|
|
|
|
twoTypes{TINT16, TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, TINT32},
|
|
|
|
twoTypes{TINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, TINT32},
|
|
|
|
twoTypes{TINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, TINT64},
|
|
|
|
|
|
|
|
twoTypes{TINT8, TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, TINT32},
|
|
|
|
twoTypes{TINT16, TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, TINT32},
|
|
|
|
twoTypes{TINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, TINT32},
|
|
|
|
twoTypes{TINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, TINT64},
|
|
|
|
|
|
|
|
twoTypes{TFLOAT32, TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
|
|
|
|
twoTypes{TFLOAT32, TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
|
|
|
|
twoTypes{TFLOAT32, TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, TINT32},
|
|
|
|
twoTypes{TFLOAT32, TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, TINT64},
|
|
|
|
|
|
|
|
twoTypes{TFLOAT64, TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
|
|
|
|
twoTypes{TFLOAT64, TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
|
|
|
|
twoTypes{TFLOAT64, TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, TINT32},
|
|
|
|
twoTypes{TFLOAT64, TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, TINT64},
|
|
|
|
// unsigned
|
|
|
|
twoTypes{TUINT8, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, TINT32},
|
|
|
|
twoTypes{TUINT16, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, TINT32},
|
|
|
|
twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, TINT64}, // go wide to dodge unsigned
|
|
|
|
twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto32F, branchy code expansion instead
|
|
|
|
|
|
|
|
twoTypes{TUINT8, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, TINT32},
|
|
|
|
twoTypes{TUINT16, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, TINT32},
|
|
|
|
twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, TINT64}, // go wide to dodge unsigned
|
|
|
|
twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto64F, branchy code expansion instead
|
|
|
|
|
|
|
|
twoTypes{TFLOAT32, TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
|
|
|
|
twoTypes{TFLOAT32, TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
|
|
|
|
twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
|
|
|
|
twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt32Fto64U, branchy code expansion instead
|
|
|
|
|
|
|
|
twoTypes{TFLOAT64, TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
|
|
|
|
twoTypes{TFLOAT64, TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
|
|
|
|
twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
|
|
|
|
twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt64Fto64U, branchy code expansion instead
|
|
|
|
|
|
|
|
// float
|
|
|
|
twoTypes{TFLOAT64, TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, TFLOAT32},
|
2017-02-12 22:12:12 -05:00
|
|
|
twoTypes{TFLOAT64, TFLOAT64}: twoOpsAndType{ssa.OpRound64F, ssa.OpCopy, TFLOAT64},
|
|
|
|
twoTypes{TFLOAT32, TFLOAT32}: twoOpsAndType{ssa.OpRound32F, ssa.OpCopy, TFLOAT32},
|
2015-09-01 17:09:00 -04:00
|
|
|
twoTypes{TFLOAT32, TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64},
|
|
|
|
}
|
|
|
|
|
2016-05-31 11:27:16 -04:00
|
|
|
// this map is used only for 32-bit arch, and only includes the difference
|
|
|
|
// on 32-bit arch, don't use int64<->float conversion for uint32
|
|
|
|
var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{
|
|
|
|
twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, TUINT32},
|
|
|
|
twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, TUINT32},
|
|
|
|
twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, TUINT32},
|
|
|
|
twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, TUINT32},
|
|
|
|
}
|
|
|
|
|
2016-08-16 14:17:33 -04:00
|
|
|
// uint64<->float conversions, only on machines that have intructions for that
|
|
|
|
var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{
|
|
|
|
twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, TUINT64},
|
|
|
|
twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, TUINT64},
|
|
|
|
twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, TUINT64},
|
|
|
|
twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, TUINT64},
|
|
|
|
}
|
|
|
|
|
2015-07-29 17:07:09 -07:00
|
|
|
var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{
|
|
|
|
opAndTwoTypes{OLSH, TINT8, TUINT8}: ssa.OpLsh8x8,
|
|
|
|
opAndTwoTypes{OLSH, TUINT8, TUINT8}: ssa.OpLsh8x8,
|
|
|
|
opAndTwoTypes{OLSH, TINT8, TUINT16}: ssa.OpLsh8x16,
|
|
|
|
opAndTwoTypes{OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16,
|
|
|
|
opAndTwoTypes{OLSH, TINT8, TUINT32}: ssa.OpLsh8x32,
|
|
|
|
opAndTwoTypes{OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32,
|
|
|
|
opAndTwoTypes{OLSH, TINT8, TUINT64}: ssa.OpLsh8x64,
|
|
|
|
opAndTwoTypes{OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64,
|
|
|
|
|
|
|
|
opAndTwoTypes{OLSH, TINT16, TUINT8}: ssa.OpLsh16x8,
|
|
|
|
opAndTwoTypes{OLSH, TUINT16, TUINT8}: ssa.OpLsh16x8,
|
|
|
|
opAndTwoTypes{OLSH, TINT16, TUINT16}: ssa.OpLsh16x16,
|
|
|
|
opAndTwoTypes{OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16,
|
|
|
|
opAndTwoTypes{OLSH, TINT16, TUINT32}: ssa.OpLsh16x32,
|
|
|
|
opAndTwoTypes{OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32,
|
|
|
|
opAndTwoTypes{OLSH, TINT16, TUINT64}: ssa.OpLsh16x64,
|
|
|
|
opAndTwoTypes{OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64,
|
|
|
|
|
|
|
|
opAndTwoTypes{OLSH, TINT32, TUINT8}: ssa.OpLsh32x8,
|
|
|
|
opAndTwoTypes{OLSH, TUINT32, TUINT8}: ssa.OpLsh32x8,
|
|
|
|
opAndTwoTypes{OLSH, TINT32, TUINT16}: ssa.OpLsh32x16,
|
|
|
|
opAndTwoTypes{OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16,
|
|
|
|
opAndTwoTypes{OLSH, TINT32, TUINT32}: ssa.OpLsh32x32,
|
|
|
|
opAndTwoTypes{OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32,
|
|
|
|
opAndTwoTypes{OLSH, TINT32, TUINT64}: ssa.OpLsh32x64,
|
|
|
|
opAndTwoTypes{OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64,
|
|
|
|
|
|
|
|
opAndTwoTypes{OLSH, TINT64, TUINT8}: ssa.OpLsh64x8,
|
|
|
|
opAndTwoTypes{OLSH, TUINT64, TUINT8}: ssa.OpLsh64x8,
|
|
|
|
opAndTwoTypes{OLSH, TINT64, TUINT16}: ssa.OpLsh64x16,
|
|
|
|
opAndTwoTypes{OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16,
|
|
|
|
opAndTwoTypes{OLSH, TINT64, TUINT32}: ssa.OpLsh64x32,
|
|
|
|
opAndTwoTypes{OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32,
|
|
|
|
opAndTwoTypes{OLSH, TINT64, TUINT64}: ssa.OpLsh64x64,
|
|
|
|
opAndTwoTypes{OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64,
|
|
|
|
|
|
|
|
opAndTwoTypes{ORSH, TINT8, TUINT8}: ssa.OpRsh8x8,
|
|
|
|
opAndTwoTypes{ORSH, TUINT8, TUINT8}: ssa.OpRsh8Ux8,
|
|
|
|
opAndTwoTypes{ORSH, TINT8, TUINT16}: ssa.OpRsh8x16,
|
|
|
|
opAndTwoTypes{ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16,
|
|
|
|
opAndTwoTypes{ORSH, TINT8, TUINT32}: ssa.OpRsh8x32,
|
|
|
|
opAndTwoTypes{ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32,
|
|
|
|
opAndTwoTypes{ORSH, TINT8, TUINT64}: ssa.OpRsh8x64,
|
|
|
|
opAndTwoTypes{ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64,
|
|
|
|
|
|
|
|
opAndTwoTypes{ORSH, TINT16, TUINT8}: ssa.OpRsh16x8,
|
|
|
|
opAndTwoTypes{ORSH, TUINT16, TUINT8}: ssa.OpRsh16Ux8,
|
|
|
|
opAndTwoTypes{ORSH, TINT16, TUINT16}: ssa.OpRsh16x16,
|
|
|
|
opAndTwoTypes{ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16,
|
|
|
|
opAndTwoTypes{ORSH, TINT16, TUINT32}: ssa.OpRsh16x32,
|
|
|
|
opAndTwoTypes{ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32,
|
|
|
|
opAndTwoTypes{ORSH, TINT16, TUINT64}: ssa.OpRsh16x64,
|
|
|
|
opAndTwoTypes{ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64,
|
|
|
|
|
|
|
|
opAndTwoTypes{ORSH, TINT32, TUINT8}: ssa.OpRsh32x8,
|
|
|
|
opAndTwoTypes{ORSH, TUINT32, TUINT8}: ssa.OpRsh32Ux8,
|
|
|
|
opAndTwoTypes{ORSH, TINT32, TUINT16}: ssa.OpRsh32x16,
|
|
|
|
opAndTwoTypes{ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16,
|
|
|
|
opAndTwoTypes{ORSH, TINT32, TUINT32}: ssa.OpRsh32x32,
|
|
|
|
opAndTwoTypes{ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32,
|
|
|
|
opAndTwoTypes{ORSH, TINT32, TUINT64}: ssa.OpRsh32x64,
|
|
|
|
opAndTwoTypes{ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64,
|
|
|
|
|
|
|
|
opAndTwoTypes{ORSH, TINT64, TUINT8}: ssa.OpRsh64x8,
|
|
|
|
opAndTwoTypes{ORSH, TUINT64, TUINT8}: ssa.OpRsh64Ux8,
|
|
|
|
opAndTwoTypes{ORSH, TINT64, TUINT16}: ssa.OpRsh64x16,
|
|
|
|
opAndTwoTypes{ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16,
|
|
|
|
opAndTwoTypes{ORSH, TINT64, TUINT32}: ssa.OpRsh64x32,
|
|
|
|
opAndTwoTypes{ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32,
|
|
|
|
opAndTwoTypes{ORSH, TINT64, TUINT64}: ssa.OpRsh64x64,
|
|
|
|
opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64,
|
|
|
|
}
|
|
|
|
|
2015-11-16 13:20:16 -08:00
|
|
|
func (s *state) ssaShiftOp(op Op, t *Type, u *Type) ssa.Op {
|
2015-07-29 17:07:09 -07:00
|
|
|
etype1 := s.concreteEtype(t)
|
|
|
|
etype2 := s.concreteEtype(u)
|
|
|
|
x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}]
|
|
|
|
if !ok {
|
2016-09-14 10:01:05 -07:00
|
|
|
s.Fatalf("unhandled shift op %v etype=%s/%s", op, etype1, etype2)
|
2015-07-29 17:07:09 -07:00
|
|
|
}
|
|
|
|
return x
|
|
|
|
}
|
|
|
|
|
2015-04-15 15:51:25 -07:00
|
|
|
// expr converts the expression n to ssa, adds it to s and returns the ssa result.
|
2015-05-18 16:44:20 -07:00
|
|
|
func (s *state) expr(n *Node) *ssa.Value {
|
2016-04-26 15:22:33 -07:00
|
|
|
if !(n.Op == ONAME || n.Op == OLITERAL && n.Sym != nil) {
|
|
|
|
// ONAMEs and named OLITERALs have the line number
|
|
|
|
// of the decl, not the use. See issue 14742.
|
2016-12-07 17:40:46 -08:00
|
|
|
s.pushLine(n.Pos)
|
2016-04-26 15:22:33 -07:00
|
|
|
defer s.popLine()
|
|
|
|
}
|
2015-05-30 01:03:06 -04:00
|
|
|
|
2015-07-11 11:39:12 -07:00
|
|
|
s.stmtList(n.Ninit)
|
2015-04-15 15:51:25 -07:00
|
|
|
switch n.Op {
|
2016-09-10 22:44:00 +02:00
|
|
|
case OARRAYBYTESTRTMP:
|
|
|
|
slice := s.expr(n.Left)
|
2017-03-18 10:16:03 -07:00
|
|
|
ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
|
2016-09-10 22:44:00 +02:00
|
|
|
len := s.newValue1(ssa.OpSliceLen, Types[TINT], slice)
|
|
|
|
return s.newValue2(ssa.OpStringMake, n.Type, ptr, len)
|
2016-10-27 23:31:38 +02:00
|
|
|
case OSTRARRAYBYTETMP:
|
|
|
|
str := s.expr(n.Left)
|
2017-03-18 10:16:03 -07:00
|
|
|
ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str)
|
2016-10-27 23:31:38 +02:00
|
|
|
len := s.newValue1(ssa.OpStringLen, Types[TINT], str)
|
|
|
|
return s.newValue3(ssa.OpSliceMake, n.Type, ptr, len, len)
|
2015-09-07 19:07:02 -05:00
|
|
|
case OCFUNC:
|
2017-02-06 18:18:49 -08:00
|
|
|
aux := s.lookupSymbol(n, &ssa.ExternSymbol{Typ: n.Type, Sym: Linksym(n.Left.Sym)})
|
2015-09-07 19:07:02 -05:00
|
|
|
return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb)
|
2015-04-15 15:51:25 -07:00
|
|
|
case ONAME:
|
2015-06-10 15:03:06 -07:00
|
|
|
if n.Class == PFUNC {
|
|
|
|
// "value" of a function is the address of the function's closure
|
2017-02-06 18:18:49 -08:00
|
|
|
sym := Linksym(funcsym(n.Sym))
|
2017-03-15 15:34:52 -07:00
|
|
|
aux := s.lookupSymbol(n, &ssa.ExternSymbol{Typ: n.Type, Sym: sym})
|
2017-03-19 09:51:22 +01:00
|
|
|
return s.entryNewValue1A(ssa.OpAddr, typPtr(n.Type), aux, s.sb)
|
2015-06-10 15:03:06 -07:00
|
|
|
}
|
2016-02-27 17:49:31 -08:00
|
|
|
if s.canSSA(n) {
|
2015-06-19 21:02:28 -07:00
|
|
|
return s.variable(n, n.Type)
|
2015-05-12 15:16:52 -07:00
|
|
|
}
|
2017-02-02 19:47:59 -05:00
|
|
|
addr := s.addr(n, false)
|
2015-06-11 21:29:25 -07:00
|
|
|
return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
|
2015-09-11 16:40:05 -04:00
|
|
|
case OCLOSUREVAR:
|
2017-02-02 19:47:59 -05:00
|
|
|
addr := s.addr(n, false)
|
2015-09-11 16:40:05 -04:00
|
|
|
return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
|
2015-04-15 15:51:25 -07:00
|
|
|
case OLITERAL:
|
2016-04-22 12:27:29 -07:00
|
|
|
switch u := n.Val().U.(type) {
|
|
|
|
case *Mpint:
|
|
|
|
i := u.Int64()
|
2015-07-28 14:19:20 -07:00
|
|
|
switch n.Type.Size() {
|
|
|
|
case 1:
|
|
|
|
return s.constInt8(n.Type, int8(i))
|
|
|
|
case 2:
|
|
|
|
return s.constInt16(n.Type, int16(i))
|
|
|
|
case 4:
|
|
|
|
return s.constInt32(n.Type, int32(i))
|
|
|
|
case 8:
|
|
|
|
return s.constInt64(n.Type, i)
|
|
|
|
default:
|
|
|
|
s.Fatalf("bad integer size %d", n.Type.Size())
|
|
|
|
return nil
|
|
|
|
}
|
2016-04-22 12:27:29 -07:00
|
|
|
case string:
|
|
|
|
if u == "" {
|
2016-03-06 18:06:09 -08:00
|
|
|
return s.constEmptyString(n.Type)
|
|
|
|
}
|
2016-04-22 12:27:29 -07:00
|
|
|
return s.entryNewValue0A(ssa.OpConstString, n.Type, u)
|
|
|
|
case bool:
|
2016-04-26 15:22:33 -07:00
|
|
|
return s.constBool(u)
|
2016-04-22 12:27:29 -07:00
|
|
|
case *NilVal:
|
2015-08-18 10:26:28 -07:00
|
|
|
t := n.Type
|
|
|
|
switch {
|
|
|
|
case t.IsSlice():
|
2016-03-06 18:06:09 -08:00
|
|
|
return s.constSlice(t)
|
2015-08-18 10:26:28 -07:00
|
|
|
case t.IsInterface():
|
2016-03-06 18:06:09 -08:00
|
|
|
return s.constInterface(t)
|
2015-08-18 10:26:28 -07:00
|
|
|
default:
|
2016-03-06 18:06:09 -08:00
|
|
|
return s.constNil(t)
|
2015-08-18 10:26:28 -07:00
|
|
|
}
|
2016-04-22 12:27:29 -07:00
|
|
|
case *Mpflt:
|
2015-08-12 16:38:11 -04:00
|
|
|
switch n.Type.Size() {
|
|
|
|
case 4:
|
2016-04-22 12:27:29 -07:00
|
|
|
return s.constFloat32(n.Type, u.Float32())
|
2015-08-12 16:38:11 -04:00
|
|
|
case 8:
|
2016-04-22 12:27:29 -07:00
|
|
|
return s.constFloat64(n.Type, u.Float64())
|
2015-08-12 16:38:11 -04:00
|
|
|
default:
|
|
|
|
s.Fatalf("bad float size %d", n.Type.Size())
|
|
|
|
return nil
|
|
|
|
}
|
2016-04-22 12:27:29 -07:00
|
|
|
case *Mpcplx:
|
|
|
|
r := &u.Real
|
|
|
|
i := &u.Imag
|
2015-08-28 14:24:10 -04:00
|
|
|
switch n.Type.Size() {
|
|
|
|
case 8:
|
2016-04-22 12:27:29 -07:00
|
|
|
pt := Types[TFLOAT32]
|
|
|
|
return s.newValue2(ssa.OpComplexMake, n.Type,
|
|
|
|
s.constFloat32(pt, r.Float32()),
|
|
|
|
s.constFloat32(pt, i.Float32()))
|
2015-08-28 14:24:10 -04:00
|
|
|
case 16:
|
2016-04-22 12:27:29 -07:00
|
|
|
pt := Types[TFLOAT64]
|
|
|
|
return s.newValue2(ssa.OpComplexMake, n.Type,
|
|
|
|
s.constFloat64(pt, r.Float64()),
|
|
|
|
s.constFloat64(pt, i.Float64()))
|
2015-08-28 14:24:10 -04:00
|
|
|
default:
|
|
|
|
s.Fatalf("bad float size %d", n.Type.Size())
|
|
|
|
return nil
|
|
|
|
}
|
2015-08-12 16:38:11 -04:00
|
|
|
|
2015-04-15 15:51:25 -07:00
|
|
|
default:
|
2016-09-14 10:01:05 -07:00
|
|
|
s.Fatalf("unhandled OLITERAL %v", n.Val().Ctype())
|
2015-04-15 15:51:25 -07:00
|
|
|
return nil
|
|
|
|
}
|
2015-06-12 16:24:33 -07:00
|
|
|
case OCONVNOP:
|
2015-07-28 14:31:25 -07:00
|
|
|
to := n.Type
|
|
|
|
from := n.Left.Type
|
|
|
|
|
|
|
|
// Assume everything will work out, so set up our return value.
|
|
|
|
// Anything interesting that happens from here is a fatal.
|
2015-06-12 16:24:33 -07:00
|
|
|
x := s.expr(n.Left)
|
2015-10-19 11:36:07 -04:00
|
|
|
|
|
|
|
// Special case for not confusing GC and liveness.
|
|
|
|
// We don't want pointers accidentally classified
|
|
|
|
// as not-pointers or vice-versa because of copy
|
|
|
|
// elision.
|
2016-03-28 10:55:44 -07:00
|
|
|
if to.IsPtrShaped() != from.IsPtrShaped() {
|
2015-11-10 15:35:36 -08:00
|
|
|
return s.newValue2(ssa.OpConvert, to, x, s.mem())
|
2015-10-19 11:36:07 -04:00
|
|
|
}
|
|
|
|
|
2015-07-28 14:31:25 -07:00
|
|
|
v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type
|
|
|
|
|
2015-09-07 19:07:02 -05:00
|
|
|
// CONVNOP closure
|
2016-03-28 10:55:44 -07:00
|
|
|
if to.Etype == TFUNC && from.IsPtrShaped() {
|
2015-09-07 19:07:02 -05:00
|
|
|
return v
|
|
|
|
}
|
|
|
|
|
2015-07-28 14:31:25 -07:00
|
|
|
// named <--> unnamed type or typed <--> untyped const
|
|
|
|
if from.Etype == to.Etype {
|
|
|
|
return v
|
|
|
|
}
|
2015-10-19 11:36:07 -04:00
|
|
|
|
2015-07-28 14:31:25 -07:00
|
|
|
// unsafe.Pointer <--> *T
|
|
|
|
if to.Etype == TUNSAFEPTR && from.IsPtr() || from.Etype == TUNSAFEPTR && to.IsPtr() {
|
|
|
|
return v
|
|
|
|
}
|
|
|
|
|
|
|
|
dowidth(from)
|
|
|
|
dowidth(to)
|
|
|
|
if from.Width != to.Width {
|
|
|
|
s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if etypesign(from.Etype) != etypesign(to.Etype) {
|
2016-04-22 08:39:56 -07:00
|
|
|
s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Etype, to, to.Etype)
|
2015-07-28 14:31:25 -07:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-03-01 15:17:34 -08:00
|
|
|
if instrumenting {
|
2015-10-09 16:48:30 -04:00
|
|
|
// These appear to be fine, but they fail the
|
|
|
|
// integer constraint below, so okay them here.
|
|
|
|
// Sample non-integer conversion: map[string]string -> *uint8
|
|
|
|
return v
|
2015-07-28 14:31:25 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if etypesign(from.Etype) == 0 {
|
|
|
|
s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// integer, same width, same sign
|
|
|
|
return v
|
|
|
|
|
2015-06-14 11:38:46 -07:00
|
|
|
case OCONV:
|
|
|
|
x := s.expr(n.Left)
|
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled
For integer types less than a machine register, we have to decide
what the invariants are for the high bits of the register. We used
to set the high bits to the correct extension (sign or zero, as
determined by the type) of the low bits.
This CL makes the compiler ignore the high bits of the register
altogether (they are junk).
On this plus side, this means ops that generate subword results don't
have to worry about correctly extending them. On the minus side,
ops that consume subword arguments have to deal with the input
registers not being correctly extended.
For x86, this tradeoff is probably worth it. Almost all opcodes
have versions that use only the correct subword piece of their
inputs. (The one big exception is array indexing.) Not many opcodes
can correctly sign extend on output.
For other architectures, the tradeoff is probably not so clear, as
they don't have many subword-safe opcodes (e.g. 16-bit compare,
ignoring the high 16/48 bits). Fortunately we can decide whether
we do this per-architecture.
For the machine-independent opcodes, we pretend that the "register"
size is equal to the type width, so sign extension is immaterial.
Opcodes that care about the signedness of the input (e.g. compare,
right shift) have two different variants.
Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d
Reviewed-on: https://go-review.googlesource.com/12600
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
|
|
|
ft := n.Left.Type // from type
|
|
|
|
tt := n.Type // to type
|
2017-01-21 19:52:09 -08:00
|
|
|
if ft.IsBoolean() && tt.IsKind(TUINT8) {
|
|
|
|
// Bool -> uint8 is generated internally when indexing into runtime.staticbyte.
|
|
|
|
return s.newValue1(ssa.OpCopy, n.Type, x)
|
|
|
|
}
|
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled
For integer types less than a machine register, we have to decide
what the invariants are for the high bits of the register. We used
to set the high bits to the correct extension (sign or zero, as
determined by the type) of the low bits.
This CL makes the compiler ignore the high bits of the register
altogether (they are junk).
On this plus side, this means ops that generate subword results don't
have to worry about correctly extending them. On the minus side,
ops that consume subword arguments have to deal with the input
registers not being correctly extended.
For x86, this tradeoff is probably worth it. Almost all opcodes
have versions that use only the correct subword piece of their
inputs. (The one big exception is array indexing.) Not many opcodes
can correctly sign extend on output.
For other architectures, the tradeoff is probably not so clear, as
they don't have many subword-safe opcodes (e.g. 16-bit compare,
ignoring the high 16/48 bits). Fortunately we can decide whether
we do this per-architecture.
For the machine-independent opcodes, we pretend that the "register"
size is equal to the type width, so sign extension is immaterial.
Opcodes that care about the signedness of the input (e.g. compare,
right shift) have two different variants.
Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d
Reviewed-on: https://go-review.googlesource.com/12600
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
|
|
|
if ft.IsInteger() && tt.IsInteger() {
|
|
|
|
var op ssa.Op
|
|
|
|
if tt.Size() == ft.Size() {
|
2015-07-28 14:31:25 -07:00
|
|
|
op = ssa.OpCopy
|
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled
For integer types less than a machine register, we have to decide
what the invariants are for the high bits of the register. We used
to set the high bits to the correct extension (sign or zero, as
determined by the type) of the low bits.
This CL makes the compiler ignore the high bits of the register
altogether (they are junk).
On this plus side, this means ops that generate subword results don't
have to worry about correctly extending them. On the minus side,
ops that consume subword arguments have to deal with the input
registers not being correctly extended.
For x86, this tradeoff is probably worth it. Almost all opcodes
have versions that use only the correct subword piece of their
inputs. (The one big exception is array indexing.) Not many opcodes
can correctly sign extend on output.
For other architectures, the tradeoff is probably not so clear, as
they don't have many subword-safe opcodes (e.g. 16-bit compare,
ignoring the high 16/48 bits). Fortunately we can decide whether
we do this per-architecture.
For the machine-independent opcodes, we pretend that the "register"
size is equal to the type width, so sign extension is immaterial.
Opcodes that care about the signedness of the input (e.g. compare,
right shift) have two different variants.
Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d
Reviewed-on: https://go-review.googlesource.com/12600
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
|
|
|
} else if tt.Size() < ft.Size() {
|
|
|
|
// truncation
|
|
|
|
switch 10*ft.Size() + tt.Size() {
|
|
|
|
case 21:
|
|
|
|
op = ssa.OpTrunc16to8
|
|
|
|
case 41:
|
|
|
|
op = ssa.OpTrunc32to8
|
|
|
|
case 42:
|
|
|
|
op = ssa.OpTrunc32to16
|
|
|
|
case 81:
|
|
|
|
op = ssa.OpTrunc64to8
|
|
|
|
case 82:
|
|
|
|
op = ssa.OpTrunc64to16
|
|
|
|
case 84:
|
|
|
|
op = ssa.OpTrunc64to32
|
|
|
|
default:
|
2016-09-09 21:08:46 -07:00
|
|
|
s.Fatalf("weird integer truncation %v -> %v", ft, tt)
|
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled
For integer types less than a machine register, we have to decide
what the invariants are for the high bits of the register. We used
to set the high bits to the correct extension (sign or zero, as
determined by the type) of the low bits.
This CL makes the compiler ignore the high bits of the register
altogether (they are junk).
On this plus side, this means ops that generate subword results don't
have to worry about correctly extending them. On the minus side,
ops that consume subword arguments have to deal with the input
registers not being correctly extended.
For x86, this tradeoff is probably worth it. Almost all opcodes
have versions that use only the correct subword piece of their
inputs. (The one big exception is array indexing.) Not many opcodes
can correctly sign extend on output.
For other architectures, the tradeoff is probably not so clear, as
they don't have many subword-safe opcodes (e.g. 16-bit compare,
ignoring the high 16/48 bits). Fortunately we can decide whether
we do this per-architecture.
For the machine-independent opcodes, we pretend that the "register"
size is equal to the type width, so sign extension is immaterial.
Opcodes that care about the signedness of the input (e.g. compare,
right shift) have two different variants.
Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d
Reviewed-on: https://go-review.googlesource.com/12600
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
|
|
|
}
|
|
|
|
} else if ft.IsSigned() {
|
|
|
|
// sign extension
|
|
|
|
switch 10*ft.Size() + tt.Size() {
|
|
|
|
case 12:
|
|
|
|
op = ssa.OpSignExt8to16
|
|
|
|
case 14:
|
|
|
|
op = ssa.OpSignExt8to32
|
|
|
|
case 18:
|
|
|
|
op = ssa.OpSignExt8to64
|
|
|
|
case 24:
|
|
|
|
op = ssa.OpSignExt16to32
|
|
|
|
case 28:
|
|
|
|
op = ssa.OpSignExt16to64
|
|
|
|
case 48:
|
|
|
|
op = ssa.OpSignExt32to64
|
|
|
|
default:
|
2016-09-09 21:08:46 -07:00
|
|
|
s.Fatalf("bad integer sign extension %v -> %v", ft, tt)
|
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled
For integer types less than a machine register, we have to decide
what the invariants are for the high bits of the register. We used
to set the high bits to the correct extension (sign or zero, as
determined by the type) of the low bits.
This CL makes the compiler ignore the high bits of the register
altogether (they are junk).
On this plus side, this means ops that generate subword results don't
have to worry about correctly extending them. On the minus side,
ops that consume subword arguments have to deal with the input
registers not being correctly extended.
For x86, this tradeoff is probably worth it. Almost all opcodes
have versions that use only the correct subword piece of their
inputs. (The one big exception is array indexing.) Not many opcodes
can correctly sign extend on output.
For other architectures, the tradeoff is probably not so clear, as
they don't have many subword-safe opcodes (e.g. 16-bit compare,
ignoring the high 16/48 bits). Fortunately we can decide whether
we do this per-architecture.
For the machine-independent opcodes, we pretend that the "register"
size is equal to the type width, so sign extension is immaterial.
Opcodes that care about the signedness of the input (e.g. compare,
right shift) have two different variants.
Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d
Reviewed-on: https://go-review.googlesource.com/12600
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// zero extension
|
|
|
|
switch 10*ft.Size() + tt.Size() {
|
|
|
|
case 12:
|
|
|
|
op = ssa.OpZeroExt8to16
|
|
|
|
case 14:
|
|
|
|
op = ssa.OpZeroExt8to32
|
|
|
|
case 18:
|
|
|
|
op = ssa.OpZeroExt8to64
|
|
|
|
case 24:
|
|
|
|
op = ssa.OpZeroExt16to32
|
|
|
|
case 28:
|
|
|
|
op = ssa.OpZeroExt16to64
|
|
|
|
case 48:
|
|
|
|
op = ssa.OpZeroExt32to64
|
|
|
|
default:
|
2016-09-09 21:08:46 -07:00
|
|
|
s.Fatalf("weird integer sign extension %v -> %v", ft, tt)
|
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled
For integer types less than a machine register, we have to decide
what the invariants are for the high bits of the register. We used
to set the high bits to the correct extension (sign or zero, as
determined by the type) of the low bits.
This CL makes the compiler ignore the high bits of the register
altogether (they are junk).
On this plus side, this means ops that generate subword results don't
have to worry about correctly extending them. On the minus side,
ops that consume subword arguments have to deal with the input
registers not being correctly extended.
For x86, this tradeoff is probably worth it. Almost all opcodes
have versions that use only the correct subword piece of their
inputs. (The one big exception is array indexing.) Not many opcodes
can correctly sign extend on output.
For other architectures, the tradeoff is probably not so clear, as
they don't have many subword-safe opcodes (e.g. 16-bit compare,
ignoring the high 16/48 bits). Fortunately we can decide whether
we do this per-architecture.
For the machine-independent opcodes, we pretend that the "register"
size is equal to the type width, so sign extension is immaterial.
Opcodes that care about the signedness of the input (e.g. compare,
right shift) have two different variants.
Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d
Reviewed-on: https://go-review.googlesource.com/12600
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return s.newValue1(op, n.Type, x)
|
|
|
|
}
|
2015-08-20 15:14:20 -04:00
|
|
|
|
2015-09-01 17:09:00 -04:00
|
|
|
if ft.IsFloat() || tt.IsFloat() {
|
|
|
|
conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
|
2017-03-17 13:35:36 -07:00
|
|
|
if s.config.IntSize == 4 && thearch.LinkArch.Name != "amd64p32" && thearch.LinkArch.Family != sys.MIPS {
|
2016-05-31 11:27:16 -04:00
|
|
|
if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
|
|
|
|
conv = conv1
|
|
|
|
}
|
|
|
|
}
|
2017-03-17 13:35:36 -07:00
|
|
|
if thearch.LinkArch.Name == "arm64" {
|
2016-08-16 14:17:33 -04:00
|
|
|
if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
|
|
|
|
conv = conv1
|
|
|
|
}
|
|
|
|
}
|
2016-10-18 23:50:40 +02:00
|
|
|
|
2017-03-17 13:35:36 -07:00
|
|
|
if thearch.LinkArch.Family == sys.MIPS {
|
2016-10-18 23:50:40 +02:00
|
|
|
if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() {
|
|
|
|
// tt is float32 or float64, and ft is also unsigned
|
|
|
|
if tt.Size() == 4 {
|
|
|
|
return s.uint32Tofloat32(n, x, ft, tt)
|
|
|
|
}
|
|
|
|
if tt.Size() == 8 {
|
|
|
|
return s.uint32Tofloat64(n, x, ft, tt)
|
|
|
|
}
|
|
|
|
} else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() {
|
|
|
|
// ft is float32 or float64, and tt is unsigned integer
|
|
|
|
if ft.Size() == 4 {
|
|
|
|
return s.float32ToUint32(n, x, ft, tt)
|
|
|
|
}
|
|
|
|
if ft.Size() == 8 {
|
|
|
|
return s.float64ToUint32(n, x, ft, tt)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-01 17:09:00 -04:00
|
|
|
if !ok {
|
2016-09-09 21:08:46 -07:00
|
|
|
s.Fatalf("weird float conversion %v -> %v", ft, tt)
|
2015-08-20 15:14:20 -04:00
|
|
|
}
|
2015-09-01 17:09:00 -04:00
|
|
|
op1, op2, it := conv.op1, conv.op2, conv.intermediateType
|
|
|
|
|
|
|
|
if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid {
|
|
|
|
// normal case, not tripping over unsigned 64
|
|
|
|
if op1 == ssa.OpCopy {
|
|
|
|
if op2 == ssa.OpCopy {
|
|
|
|
return x
|
|
|
|
}
|
|
|
|
return s.newValue1(op2, n.Type, x)
|
|
|
|
}
|
|
|
|
if op2 == ssa.OpCopy {
|
|
|
|
return s.newValue1(op1, n.Type, x)
|
|
|
|
}
|
|
|
|
return s.newValue1(op2, n.Type, s.newValue1(op1, Types[it], x))
|
|
|
|
}
|
|
|
|
// Tricky 64-bit unsigned cases.
|
|
|
|
if ft.IsInteger() {
|
2016-10-18 23:50:40 +02:00
|
|
|
// tt is float32 or float64, and ft is also unsigned
|
2015-08-20 15:14:20 -04:00
|
|
|
if tt.Size() == 4 {
|
|
|
|
return s.uint64Tofloat32(n, x, ft, tt)
|
|
|
|
}
|
|
|
|
if tt.Size() == 8 {
|
|
|
|
return s.uint64Tofloat64(n, x, ft, tt)
|
|
|
|
}
|
2016-09-09 21:08:46 -07:00
|
|
|
s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt)
|
2015-08-26 14:25:40 -04:00
|
|
|
}
|
2016-10-18 23:50:40 +02:00
|
|
|
// ft is float32 or float64, and tt is unsigned integer
|
2015-08-26 14:25:40 -04:00
|
|
|
if ft.Size() == 4 {
|
2015-09-01 17:09:00 -04:00
|
|
|
return s.float32ToUint64(n, x, ft, tt)
|
2015-08-26 14:25:40 -04:00
|
|
|
}
|
2015-09-01 17:09:00 -04:00
|
|
|
if ft.Size() == 8 {
|
|
|
|
return s.float64ToUint64(n, x, ft, tt)
|
2015-08-26 14:25:40 -04:00
|
|
|
}
|
2016-09-09 21:08:46 -07:00
|
|
|
s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt)
|
2015-09-01 17:09:00 -04:00
|
|
|
return nil
|
2015-08-20 15:14:20 -04:00
|
|
|
}
|
2015-08-28 14:24:10 -04:00
|
|
|
|
|
|
|
if ft.IsComplex() && tt.IsComplex() {
|
|
|
|
var op ssa.Op
|
|
|
|
if ft.Size() == tt.Size() {
|
2017-02-12 22:12:12 -05:00
|
|
|
switch ft.Size() {
|
|
|
|
case 8:
|
|
|
|
op = ssa.OpRound32F
|
|
|
|
case 16:
|
|
|
|
op = ssa.OpRound64F
|
|
|
|
default:
|
|
|
|
s.Fatalf("weird complex conversion %v -> %v", ft, tt)
|
|
|
|
}
|
2015-08-28 14:24:10 -04:00
|
|
|
} else if ft.Size() == 8 && tt.Size() == 16 {
|
|
|
|
op = ssa.OpCvt32Fto64F
|
|
|
|
} else if ft.Size() == 16 && tt.Size() == 8 {
|
|
|
|
op = ssa.OpCvt64Fto32F
|
|
|
|
} else {
|
2016-09-09 21:08:46 -07:00
|
|
|
s.Fatalf("weird complex conversion %v -> %v", ft, tt)
|
2015-08-28 14:24:10 -04:00
|
|
|
}
|
|
|
|
ftp := floatForComplex(ft)
|
|
|
|
ttp := floatForComplex(tt)
|
|
|
|
return s.newValue2(ssa.OpComplexMake, tt,
|
|
|
|
s.newValue1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)),
|
|
|
|
s.newValue1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x)))
|
|
|
|
}
|
2015-08-20 15:14:20 -04:00
|
|
|
|
2016-09-14 10:01:05 -07:00
|
|
|
s.Fatalf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype)
|
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled
For integer types less than a machine register, we have to decide
what the invariants are for the high bits of the register. We used
to set the high bits to the correct extension (sign or zero, as
determined by the type) of the low bits.
This CL makes the compiler ignore the high bits of the register
altogether (they are junk).
On this plus side, this means ops that generate subword results don't
have to worry about correctly extending them. On the minus side,
ops that consume subword arguments have to deal with the input
registers not being correctly extended.
For x86, this tradeoff is probably worth it. Almost all opcodes
have versions that use only the correct subword piece of their
inputs. (The one big exception is array indexing.) Not many opcodes
can correctly sign extend on output.
For other architectures, the tradeoff is probably not so clear, as
they don't have many subword-safe opcodes (e.g. 16-bit compare,
ignoring the high 16/48 bits). Fortunately we can decide whether
we do this per-architecture.
For the machine-independent opcodes, we pretend that the "register"
size is equal to the type width, so sign extension is immaterial.
Opcodes that care about the signedness of the input (e.g. compare,
right shift) have two different variants.
Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d
Reviewed-on: https://go-review.googlesource.com/12600
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
|
|
|
return nil
|
2015-05-18 16:44:20 -07:00
|
|
|
|
2015-09-17 10:31:16 -07:00
|
|
|
case ODOTTYPE:
|
|
|
|
res, _ := s.dottype(n, false)
|
|
|
|
return res
|
|
|
|
|
2015-06-24 17:48:22 -07:00
|
|
|
// binary ops
|
|
|
|
case OLT, OEQ, ONE, OLE, OGE, OGT:
|
2015-05-28 10:47:24 -07:00
|
|
|
a := s.expr(n.Left)
|
|
|
|
b := s.expr(n.Right)
|
2015-09-10 11:05:42 -07:00
|
|
|
if n.Left.Type.IsComplex() {
|
2015-09-10 14:59:00 -07:00
|
|
|
pt := floatForComplex(n.Left.Type)
|
2015-09-10 11:05:42 -07:00
|
|
|
op := s.ssaOp(OEQ, pt)
|
|
|
|
r := s.newValue2(op, Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
|
|
|
|
i := s.newValue2(op, Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
|
2016-09-20 13:59:40 -07:00
|
|
|
c := s.newValue2(ssa.OpAndB, Types[TBOOL], r, i)
|
2015-09-10 11:05:42 -07:00
|
|
|
switch n.Op {
|
|
|
|
case OEQ:
|
|
|
|
return c
|
|
|
|
case ONE:
|
|
|
|
return s.newValue1(ssa.OpNot, Types[TBOOL], c)
|
|
|
|
default:
|
2016-09-09 21:08:46 -07:00
|
|
|
s.Fatalf("ordered complex compare %v", n.Op)
|
2015-09-10 11:05:42 -07:00
|
|
|
}
|
|
|
|
}
|
2015-07-30 11:03:05 -07:00
|
|
|
return s.newValue2(s.ssaOp(n.Op, n.Left.Type), Types[TBOOL], a, b)
|
2015-08-28 14:24:10 -04:00
|
|
|
case OMUL:
|
|
|
|
a := s.expr(n.Left)
|
|
|
|
b := s.expr(n.Right)
|
|
|
|
if n.Type.IsComplex() {
|
|
|
|
mulop := ssa.OpMul64F
|
|
|
|
addop := ssa.OpAdd64F
|
|
|
|
subop := ssa.OpSub64F
|
|
|
|
pt := floatForComplex(n.Type) // Could be Float32 or Float64
|
2016-06-30 19:24:06 +00:00
|
|
|
wt := Types[TFLOAT64] // Compute in Float64 to minimize cancelation error
|
2015-08-28 14:24:10 -04:00
|
|
|
|
|
|
|
areal := s.newValue1(ssa.OpComplexReal, pt, a)
|
|
|
|
breal := s.newValue1(ssa.OpComplexReal, pt, b)
|
|
|
|
aimag := s.newValue1(ssa.OpComplexImag, pt, a)
|
|
|
|
bimag := s.newValue1(ssa.OpComplexImag, pt, b)
|
|
|
|
|
|
|
|
if pt != wt { // Widen for calculation
|
|
|
|
areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal)
|
|
|
|
breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal)
|
|
|
|
aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag)
|
|
|
|
bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag)
|
|
|
|
}
|
|
|
|
|
|
|
|
xreal := s.newValue2(subop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag))
|
|
|
|
ximag := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, bimag), s.newValue2(mulop, wt, aimag, breal))
|
|
|
|
|
|
|
|
if pt != wt { // Narrow to store back
|
|
|
|
xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal)
|
|
|
|
ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag)
|
|
|
|
}
|
|
|
|
|
|
|
|
return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
|
|
|
|
}
|
|
|
|
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
|
|
|
|
|
|
|
|
case ODIV:
|
|
|
|
a := s.expr(n.Left)
|
|
|
|
b := s.expr(n.Right)
|
|
|
|
if n.Type.IsComplex() {
|
|
|
|
// TODO this is not executed because the front-end substitutes a runtime call.
|
|
|
|
// That probably ought to change; with modest optimization the widen/narrow
|
|
|
|
// conversions could all be elided in larger expression trees.
|
|
|
|
mulop := ssa.OpMul64F
|
|
|
|
addop := ssa.OpAdd64F
|
|
|
|
subop := ssa.OpSub64F
|
|
|
|
divop := ssa.OpDiv64F
|
|
|
|
pt := floatForComplex(n.Type) // Could be Float32 or Float64
|
2016-06-30 19:24:06 +00:00
|
|
|
wt := Types[TFLOAT64] // Compute in Float64 to minimize cancelation error
|
2015-08-28 14:24:10 -04:00
|
|
|
|
|
|
|
areal := s.newValue1(ssa.OpComplexReal, pt, a)
|
|
|
|
breal := s.newValue1(ssa.OpComplexReal, pt, b)
|
|
|
|
aimag := s.newValue1(ssa.OpComplexImag, pt, a)
|
|
|
|
bimag := s.newValue1(ssa.OpComplexImag, pt, b)
|
|
|
|
|
|
|
|
if pt != wt { // Widen for calculation
|
|
|
|
areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal)
|
|
|
|
breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal)
|
|
|
|
aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag)
|
|
|
|
bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag)
|
|
|
|
}
|
|
|
|
|
|
|
|
denom := s.newValue2(addop, wt, s.newValue2(mulop, wt, breal, breal), s.newValue2(mulop, wt, bimag, bimag))
|
|
|
|
xreal := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag))
|
|
|
|
ximag := s.newValue2(subop, wt, s.newValue2(mulop, wt, aimag, breal), s.newValue2(mulop, wt, areal, bimag))
|
|
|
|
|
|
|
|
// TODO not sure if this is best done in wide precision or narrow
|
|
|
|
// Double-rounding might be an issue.
|
|
|
|
// Note that the pre-SSA implementation does the entire calculation
|
|
|
|
// in wide format, so wide is compatible.
|
|
|
|
xreal = s.newValue2(divop, wt, xreal, denom)
|
|
|
|
ximag = s.newValue2(divop, wt, ximag, denom)
|
|
|
|
|
|
|
|
if pt != wt { // Narrow to store back
|
|
|
|
xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal)
|
|
|
|
ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag)
|
|
|
|
}
|
|
|
|
return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
|
|
|
|
}
|
2015-10-28 13:55:46 -04:00
|
|
|
if n.Type.IsFloat() {
|
|
|
|
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
|
|
|
|
}
|
2016-09-04 16:59:46 -07:00
|
|
|
return s.intDivide(n, a, b)
|
2015-10-28 13:55:46 -04:00
|
|
|
case OMOD:
|
|
|
|
a := s.expr(n.Left)
|
|
|
|
b := s.expr(n.Right)
|
2016-09-04 16:59:46 -07:00
|
|
|
return s.intDivide(n, a, b)
|
2015-08-28 14:24:10 -04:00
|
|
|
case OADD, OSUB:
|
|
|
|
a := s.expr(n.Left)
|
|
|
|
b := s.expr(n.Right)
|
|
|
|
if n.Type.IsComplex() {
|
|
|
|
pt := floatForComplex(n.Type)
|
|
|
|
op := s.ssaOp(n.Op, pt)
|
|
|
|
return s.newValue2(ssa.OpComplexMake, n.Type,
|
|
|
|
s.newValue2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)),
|
|
|
|
s.newValue2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)))
|
|
|
|
}
|
|
|
|
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
|
2017-03-03 02:43:44 -08:00
|
|
|
case OAND, OOR, OXOR:
|
2015-05-28 10:47:24 -07:00
|
|
|
a := s.expr(n.Left)
|
|
|
|
b := s.expr(n.Right)
|
2015-07-19 15:48:20 -07:00
|
|
|
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
|
2015-07-29 17:07:09 -07:00
|
|
|
case OLSH, ORSH:
|
|
|
|
a := s.expr(n.Left)
|
|
|
|
b := s.expr(n.Right)
|
|
|
|
return s.newValue2(s.ssaShiftOp(n.Op, n.Type, n.Right.Type), a.Type, a, b)
|
2015-07-10 12:58:53 -06:00
|
|
|
case OANDAND, OOROR:
|
|
|
|
// To implement OANDAND (and OOROR), we introduce a
|
|
|
|
// new temporary variable to hold the result. The
|
|
|
|
// variable is associated with the OANDAND node in the
|
|
|
|
// s.vars table (normally variables are only
|
|
|
|
// associated with ONAME nodes). We convert
|
|
|
|
// A && B
|
|
|
|
// to
|
|
|
|
// var = A
|
|
|
|
// if var {
|
|
|
|
// var = B
|
|
|
|
// }
|
|
|
|
// Using var in the subsequent block introduces the
|
|
|
|
// necessary phi variable.
|
|
|
|
el := s.expr(n.Left)
|
|
|
|
s.vars[n] = el
|
|
|
|
|
|
|
|
b := s.endBlock()
|
|
|
|
b.Kind = ssa.BlockIf
|
2016-03-15 20:45:50 -07:00
|
|
|
b.SetControl(el)
|
2015-08-11 17:28:56 -07:00
|
|
|
// In theory, we should set b.Likely here based on context.
|
|
|
|
// However, gc only gives us likeliness hints
|
|
|
|
// in a single place, for plain OIF statements,
|
|
|
|
// and passing around context is finnicky, so don't bother for now.
|
2015-07-10 12:58:53 -06:00
|
|
|
|
|
|
|
bRight := s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
bResult := s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
if n.Op == OANDAND {
|
2015-08-28 21:36:29 -05:00
|
|
|
b.AddEdgeTo(bRight)
|
|
|
|
b.AddEdgeTo(bResult)
|
2015-07-10 12:58:53 -06:00
|
|
|
} else if n.Op == OOROR {
|
2015-08-28 21:36:29 -05:00
|
|
|
b.AddEdgeTo(bResult)
|
|
|
|
b.AddEdgeTo(bRight)
|
2015-07-10 12:58:53 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
s.startBlock(bRight)
|
|
|
|
er := s.expr(n.Right)
|
|
|
|
s.vars[n] = er
|
|
|
|
|
|
|
|
b = s.endBlock()
|
2015-08-28 21:36:29 -05:00
|
|
|
b.AddEdgeTo(bResult)
|
2015-07-10 12:58:53 -06:00
|
|
|
|
|
|
|
s.startBlock(bResult)
|
2015-08-27 10:11:08 -07:00
|
|
|
return s.variable(n, Types[TBOOL])
|
2015-09-12 14:14:02 -07:00
|
|
|
case OCOMPLEX:
|
|
|
|
r := s.expr(n.Left)
|
|
|
|
i := s.expr(n.Right)
|
|
|
|
return s.newValue2(ssa.OpComplexMake, n.Type, r, i)
|
2015-04-15 15:51:25 -07:00
|
|
|
|
2015-09-05 19:28:00 -07:00
|
|
|
// unary ops
|
2015-08-28 14:24:10 -04:00
|
|
|
case OMINUS:
|
|
|
|
a := s.expr(n.Left)
|
|
|
|
if n.Type.IsComplex() {
|
|
|
|
tp := floatForComplex(n.Type)
|
|
|
|
negop := s.ssaOp(n.Op, tp)
|
|
|
|
return s.newValue2(ssa.OpComplexMake, n.Type,
|
|
|
|
s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)),
|
|
|
|
s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a)))
|
|
|
|
}
|
|
|
|
return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
|
2016-11-02 17:20:22 +01:00
|
|
|
case ONOT, OCOM:
|
2015-07-10 11:25:48 -06:00
|
|
|
a := s.expr(n.Left)
|
2015-07-21 16:58:18 +02:00
|
|
|
return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
|
2015-09-10 11:37:09 -07:00
|
|
|
case OIMAG, OREAL:
|
|
|
|
a := s.expr(n.Left)
|
|
|
|
return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a)
|
2015-09-05 19:28:00 -07:00
|
|
|
case OPLUS:
|
|
|
|
return s.expr(n.Left)
|
2015-07-10 11:25:48 -06:00
|
|
|
|
2015-05-18 16:44:20 -07:00
|
|
|
case OADDR:
|
2017-02-27 19:56:38 +02:00
|
|
|
return s.addr(n.Left, n.Bounded())
|
2015-05-18 16:44:20 -07:00
|
|
|
|
2016-10-24 14:33:22 -07:00
|
|
|
case OINDREGSP:
|
2017-03-19 09:51:22 +01:00
|
|
|
addr := s.constOffPtrSP(typPtr(n.Type), n.Xoffset)
|
2015-07-28 12:37:46 -07:00
|
|
|
return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
|
|
|
|
|
2015-04-15 15:51:25 -07:00
|
|
|
case OIND:
|
2016-12-07 17:40:46 -08:00
|
|
|
p := s.exprPtr(n.Left, false, n.Pos)
|
2015-06-11 21:29:25 -07:00
|
|
|
return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
|
2015-05-18 16:44:20 -07:00
|
|
|
|
2015-07-15 21:33:49 -07:00
|
|
|
case ODOT:
|
2016-01-11 21:05:33 -08:00
|
|
|
t := n.Left.Type
|
|
|
|
if canSSAType(t) {
|
|
|
|
v := s.expr(n.Left)
|
2016-03-14 12:45:18 -07:00
|
|
|
return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v)
|
2016-01-11 21:05:33 -08:00
|
|
|
}
|
2017-02-08 15:31:24 -05:00
|
|
|
if n.Left.Op == OSTRUCTLIT {
|
|
|
|
// All literals with nonzero fields have already been
|
|
|
|
// rewritten during walk. Any that remain are just T{}
|
|
|
|
// or equivalents. Use the zero value.
|
|
|
|
if !iszero(n.Left) {
|
|
|
|
Fatalf("literal with nonzero value in SSA: %v", n.Left)
|
|
|
|
}
|
|
|
|
return s.zeroVal(n.Type)
|
|
|
|
}
|
2017-02-02 19:47:59 -05:00
|
|
|
p := s.addr(n, false)
|
2015-10-09 09:33:29 -07:00
|
|
|
return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
|
2015-07-15 21:33:49 -07:00
|
|
|
|
2015-04-15 15:51:25 -07:00
|
|
|
case ODOTPTR:
|
2016-12-07 17:40:46 -08:00
|
|
|
p := s.exprPtr(n.Left, false, n.Pos)
|
2017-03-19 09:51:22 +01:00
|
|
|
p = s.newValue1I(ssa.OpOffPtr, typPtr(n.Type), n.Xoffset, p)
|
2015-06-11 21:29:25 -07:00
|
|
|
return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
|
2015-04-15 15:51:25 -07:00
|
|
|
|
|
|
|
case OINDEX:
|
2015-10-09 09:33:29 -07:00
|
|
|
switch {
|
|
|
|
case n.Left.Type.IsString():
|
2017-02-27 19:56:38 +02:00
|
|
|
if n.Bounded() && Isconst(n.Left, CTSTR) && Isconst(n.Right, CTINT) {
|
2015-10-10 15:24:34 -04:00
|
|
|
// Replace "abc"[1] with 'b'.
|
|
|
|
// Delayed until now because "abc"[1] is not an ideal constant.
|
|
|
|
// See test/fixedbugs/issue11370.go.
|
|
|
|
return s.newValue0I(ssa.OpConst8, Types[TUINT8], int64(int8(n.Left.Val().U.(string)[n.Right.Int64()])))
|
|
|
|
}
|
2015-05-18 16:44:20 -07:00
|
|
|
a := s.expr(n.Left)
|
|
|
|
i := s.expr(n.Right)
|
2016-09-16 00:33:29 +10:00
|
|
|
i = s.extendIndex(i, panicindex)
|
2017-02-27 19:56:38 +02:00
|
|
|
if !n.Bounded() {
|
2015-10-09 09:33:29 -07:00
|
|
|
len := s.newValue1(ssa.OpStringLen, Types[TINT], a)
|
|
|
|
s.boundsCheck(i, len)
|
2015-08-18 14:17:30 -07:00
|
|
|
}
|
2017-03-18 10:16:03 -07:00
|
|
|
ptrtyp := s.f.Config.Types.BytePtr
|
2015-10-09 09:33:29 -07:00
|
|
|
ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
|
2016-03-04 12:34:43 -08:00
|
|
|
if Isconst(n.Right, CTINT) {
|
2016-04-01 14:51:02 -07:00
|
|
|
ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64(), ptr)
|
2016-03-04 12:34:43 -08:00
|
|
|
} else {
|
|
|
|
ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
|
|
|
|
}
|
2015-10-09 09:33:29 -07:00
|
|
|
return s.newValue2(ssa.OpLoad, Types[TUINT8], ptr, s.mem())
|
|
|
|
case n.Left.Type.IsSlice():
|
2017-02-02 19:47:59 -05:00
|
|
|
p := s.addr(n, false)
|
2016-03-30 10:57:47 -07:00
|
|
|
return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem())
|
2015-10-09 09:33:29 -07:00
|
|
|
case n.Left.Type.IsArray():
|
2016-10-30 21:10:03 -07:00
|
|
|
if bound := n.Left.Type.NumElem(); bound <= 1 {
|
|
|
|
// SSA can handle arrays of length at most 1.
|
|
|
|
a := s.expr(n.Left)
|
|
|
|
i := s.expr(n.Right)
|
|
|
|
if bound == 0 {
|
|
|
|
// Bounds check will never succeed. Might as well
|
|
|
|
// use constants for the bounds check.
|
|
|
|
z := s.constInt(Types[TINT], 0)
|
|
|
|
s.boundsCheck(z, z)
|
|
|
|
// The return value won't be live, return junk.
|
|
|
|
return s.newValue0(ssa.OpUnknown, n.Type)
|
|
|
|
}
|
|
|
|
i = s.extendIndex(i, panicindex)
|
|
|
|
s.boundsCheck(i, s.constInt(Types[TINT], bound))
|
|
|
|
return s.newValue1I(ssa.OpArraySelect, n.Type, 0, a)
|
|
|
|
}
|
2017-02-02 19:47:59 -05:00
|
|
|
p := s.addr(n, false)
|
2016-03-30 10:57:47 -07:00
|
|
|
return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem())
|
2015-10-09 09:33:29 -07:00
|
|
|
default:
|
|
|
|
s.Fatalf("bad type for index %v", n.Left.Type)
|
|
|
|
return nil
|
2015-05-18 16:44:20 -07:00
|
|
|
}
|
2015-04-15 15:51:25 -07:00
|
|
|
|
2015-07-10 10:47:28 -06:00
|
|
|
case OLEN, OCAP:
|
2015-07-03 18:41:28 -07:00
|
|
|
switch {
|
2015-07-10 10:47:28 -06:00
|
|
|
case n.Left.Type.IsSlice():
|
|
|
|
op := ssa.OpSliceLen
|
|
|
|
if n.Op == OCAP {
|
|
|
|
op = ssa.OpSliceCap
|
|
|
|
}
|
2015-07-30 11:03:05 -07:00
|
|
|
return s.newValue1(op, Types[TINT], s.expr(n.Left))
|
2015-07-10 10:47:28 -06:00
|
|
|
case n.Left.Type.IsString(): // string; not reachable for OCAP
|
2015-07-30 11:03:05 -07:00
|
|
|
return s.newValue1(ssa.OpStringLen, Types[TINT], s.expr(n.Left))
|
2015-08-28 15:56:43 -05:00
|
|
|
case n.Left.Type.IsMap(), n.Left.Type.IsChan():
|
|
|
|
return s.referenceTypeBuiltin(n, s.expr(n.Left))
|
2015-07-03 18:41:28 -07:00
|
|
|
default: // array
|
2016-03-31 14:46:04 -07:00
|
|
|
return s.constInt(Types[TINT], n.Left.Type.NumElem())
|
2015-07-03 18:41:28 -07:00
|
|
|
}
|
|
|
|
|
2015-08-12 10:12:14 -07:00
|
|
|
case OSPTR:
|
|
|
|
a := s.expr(n.Left)
|
|
|
|
if n.Left.Type.IsSlice() {
|
|
|
|
return s.newValue1(ssa.OpSlicePtr, n.Type, a)
|
|
|
|
} else {
|
|
|
|
return s.newValue1(ssa.OpStringPtr, n.Type, a)
|
|
|
|
}
|
|
|
|
|
2015-08-04 15:47:22 -07:00
|
|
|
case OITAB:
|
|
|
|
a := s.expr(n.Left)
|
|
|
|
return s.newValue1(ssa.OpITab, n.Type, a)
|
|
|
|
|
2016-06-06 12:38:19 -07:00
|
|
|
case OIDATA:
|
|
|
|
a := s.expr(n.Left)
|
|
|
|
return s.newValue1(ssa.OpIData, n.Type, a)
|
|
|
|
|
2015-09-05 19:28:27 -07:00
|
|
|
case OEFACE:
|
|
|
|
tab := s.expr(n.Left)
|
|
|
|
data := s.expr(n.Right)
|
|
|
|
return s.newValue2(ssa.OpIMake, n.Type, tab, data)
|
|
|
|
|
2016-04-21 11:55:33 -07:00
|
|
|
case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR:
|
2015-09-12 23:27:26 -07:00
|
|
|
v := s.expr(n.Left)
|
2016-04-21 11:55:33 -07:00
|
|
|
var i, j, k *ssa.Value
|
|
|
|
low, high, max := n.SliceBounds()
|
|
|
|
if low != nil {
|
2016-05-25 09:49:28 -04:00
|
|
|
i = s.extendIndex(s.expr(low), panicslice)
|
2015-09-12 23:27:26 -07:00
|
|
|
}
|
2016-04-21 11:55:33 -07:00
|
|
|
if high != nil {
|
2016-05-25 09:49:28 -04:00
|
|
|
j = s.extendIndex(s.expr(high), panicslice)
|
2015-09-12 23:27:26 -07:00
|
|
|
}
|
2016-04-21 11:55:33 -07:00
|
|
|
if max != nil {
|
2016-05-25 09:49:28 -04:00
|
|
|
k = s.extendIndex(s.expr(max), panicslice)
|
2016-04-21 11:55:33 -07:00
|
|
|
}
|
|
|
|
p, l, c := s.slice(n.Left.Type, v, i, j, k)
|
2015-09-12 23:27:26 -07:00
|
|
|
return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c)
|
2016-04-21 11:55:33 -07:00
|
|
|
|
2015-08-24 23:52:03 -07:00
|
|
|
case OSLICESTR:
|
2015-09-12 23:27:26 -07:00
|
|
|
v := s.expr(n.Left)
|
|
|
|
var i, j *ssa.Value
|
2016-04-21 11:55:33 -07:00
|
|
|
low, high, _ := n.SliceBounds()
|
|
|
|
if low != nil {
|
2016-05-25 09:49:28 -04:00
|
|
|
i = s.extendIndex(s.expr(low), panicslice)
|
2015-08-24 23:52:03 -07:00
|
|
|
}
|
2016-04-21 11:55:33 -07:00
|
|
|
if high != nil {
|
2016-05-25 09:49:28 -04:00
|
|
|
j = s.extendIndex(s.expr(high), panicslice)
|
2015-08-24 23:52:03 -07:00
|
|
|
}
|
2015-09-12 23:27:26 -07:00
|
|
|
p, l, _ := s.slice(n.Left.Type, v, i, j, nil)
|
|
|
|
return s.newValue2(ssa.OpStringMake, n.Type, p, l)
|
2015-08-24 23:52:03 -07:00
|
|
|
|
2016-03-11 00:10:52 -05:00
|
|
|
case OCALLFUNC:
|
2016-08-23 16:49:28 -07:00
|
|
|
if isIntrinsicCall(n) {
|
|
|
|
return s.intrinsicCall(n)
|
2016-03-11 00:10:52 -05:00
|
|
|
}
|
|
|
|
fallthrough
|
|
|
|
|
|
|
|
case OCALLINTER, OCALLMETH:
|
2016-01-25 17:06:54 -08:00
|
|
|
a := s.call(n, callNormal)
|
|
|
|
return s.newValue2(ssa.OpLoad, n.Type, a, s.mem())
|
2015-08-12 11:22:16 -07:00
|
|
|
|
|
|
|
case OGETG:
|
2015-10-19 18:54:40 -07:00
|
|
|
return s.newValue1(ssa.OpGetG, n.Type, s.mem())
|
2015-08-12 11:22:16 -07:00
|
|
|
|
2015-09-11 11:02:57 -07:00
|
|
|
case OAPPEND:
|
cmd/compile: avoid write barrier in append fast path
When we are writing the result of an append back
to the same slice, we don’t need a write barrier
on the fast path.
This re-implements an optimization that was present
in the old backend.
Updates #14921
Fixes #14969
Sample code:
var x []byte
func p() {
x = append(x, 1, 2, 3)
}
Before:
"".p t=1 size=224 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 199
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x(SB), DX
0x0025 00037 (append.go:19) MOVQ "".x+8(SB), BX
0x002c 00044 (append.go:19) MOVQ BX, "".autotmp_0+64(SP)
0x0031 00049 (append.go:22) LEAQ 3(BX), BP
0x0035 00053 (append.go:22) CMPQ BP, CX
0x0038 00056 (append.go:22) JGT $0, 131
0x003a 00058 (append.go:22) MOVB $1, (DX)(BX*1)
0x003e 00062 (append.go:22) MOVB $2, 1(DX)(BX*1)
0x0043 00067 (append.go:22) MOVB $3, 2(DX)(BX*1)
0x0048 00072 (append.go:22) MOVQ BP, "".x+8(SB)
0x004f 00079 (append.go:22) MOVQ CX, "".x+16(SB)
0x0056 00086 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x005c 00092 (append.go:22) TESTB AL, AL
0x005e 00094 (append.go:22) JNE $0, 108
0x0060 00096 (append.go:22) MOVQ DX, "".x(SB)
0x0067 00103 (append.go:23) ADDQ $72, SP
0x006b 00107 (append.go:23) RET
0x006c 00108 (append.go:22) LEAQ "".x(SB), CX
0x0073 00115 (append.go:22) MOVQ CX, (SP)
0x0077 00119 (append.go:22) MOVQ DX, 8(SP)
0x007c 00124 (append.go:22) PCDATA $0, $0
0x007c 00124 (append.go:22) CALL runtime.writebarrierptr(SB)
0x0081 00129 (append.go:23) JMP 103
0x0083 00131 (append.go:22) LEAQ type.[]uint8(SB), AX
0x008a 00138 (append.go:22) MOVQ AX, (SP)
0x008e 00142 (append.go:22) MOVQ DX, 8(SP)
0x0093 00147 (append.go:22) MOVQ BX, 16(SP)
0x0098 00152 (append.go:22) MOVQ CX, 24(SP)
0x009d 00157 (append.go:22) MOVQ BP, 32(SP)
0x00a2 00162 (append.go:22) PCDATA $0, $0
0x00a2 00162 (append.go:22) CALL runtime.growslice(SB)
0x00a7 00167 (append.go:22) MOVQ 40(SP), DX
0x00ac 00172 (append.go:22) MOVQ 48(SP), AX
0x00b1 00177 (append.go:22) MOVQ 56(SP), CX
0x00b6 00182 (append.go:22) ADDQ $3, AX
0x00ba 00186 (append.go:19) MOVQ "".autotmp_0+64(SP), BX
0x00bf 00191 (append.go:22) MOVQ AX, BP
0x00c2 00194 (append.go:22) JMP 58
0x00c7 00199 (append.go:22) NOP
0x00c7 00199 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00cc 00204 (append.go:21) JMP 0
After:
"".p t=1 size=208 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 191
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x+8(SB), DX
0x0025 00037 (append.go:19) MOVQ DX, "".autotmp_0+64(SP)
0x002a 00042 (append.go:19) MOVQ "".x(SB), BX
0x0031 00049 (append.go:22) LEAQ 3(DX), BP
0x0035 00053 (append.go:22) MOVQ BP, "".x+8(SB)
0x003c 00060 (append.go:22) CMPQ BP, CX
0x003f 00063 (append.go:22) JGT $0, 84
0x0041 00065 (append.go:22) MOVB $1, (BX)(DX*1)
0x0045 00069 (append.go:22) MOVB $2, 1(BX)(DX*1)
0x004a 00074 (append.go:22) MOVB $3, 2(BX)(DX*1)
0x004f 00079 (append.go:23) ADDQ $72, SP
0x0053 00083 (append.go:23) RET
0x0054 00084 (append.go:22) LEAQ type.[]uint8(SB), AX
0x005b 00091 (append.go:22) MOVQ AX, (SP)
0x005f 00095 (append.go:22) MOVQ BX, 8(SP)
0x0064 00100 (append.go:22) MOVQ DX, 16(SP)
0x0069 00105 (append.go:22) MOVQ CX, 24(SP)
0x006e 00110 (append.go:22) MOVQ BP, 32(SP)
0x0073 00115 (append.go:22) PCDATA $0, $0
0x0073 00115 (append.go:22) CALL runtime.growslice(SB)
0x0078 00120 (append.go:22) MOVQ 40(SP), CX
0x007d 00125 (append.go:22) MOVQ 56(SP), AX
0x0082 00130 (append.go:22) MOVQ AX, "".x+16(SB)
0x0089 00137 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x008f 00143 (append.go:22) TESTB AL, AL
0x0091 00145 (append.go:22) JNE $0, 168
0x0093 00147 (append.go:22) MOVQ CX, "".x(SB)
0x009a 00154 (append.go:22) MOVQ "".x(SB), BX
0x00a1 00161 (append.go:19) MOVQ "".autotmp_0+64(SP), DX
0x00a6 00166 (append.go:22) JMP 65
0x00a8 00168 (append.go:22) LEAQ "".x(SB), DX
0x00af 00175 (append.go:22) MOVQ DX, (SP)
0x00b3 00179 (append.go:22) MOVQ CX, 8(SP)
0x00b8 00184 (append.go:22) PCDATA $0, $0
0x00b8 00184 (append.go:22) CALL runtime.writebarrierptr(SB)
0x00bd 00189 (append.go:22) JMP 154
0x00bf 00191 (append.go:22) NOP
0x00bf 00191 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00c4 00196 (append.go:21) JMP 0
Change-Id: I77a41ad3a22557a4bb4654de7d6d24a029efe34a
Reviewed-on: https://go-review.googlesource.com/21813
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-10 09:44:17 -07:00
|
|
|
return s.append(n, false)
|
2015-09-11 11:02:57 -07:00
|
|
|
|
2017-01-16 12:37:11 -05:00
|
|
|
case OSTRUCTLIT, OARRAYLIT:
|
|
|
|
// All literals with nonzero fields have already been
|
|
|
|
// rewritten during walk. Any that remain are just T{}
|
|
|
|
// or equivalents. Use the zero value.
|
|
|
|
if !iszero(n) {
|
|
|
|
Fatalf("literal with nonzero value in SSA: %v", n)
|
|
|
|
}
|
|
|
|
return s.zeroVal(n.Type)
|
|
|
|
|
2016-04-04 10:58:21 -07:00
|
|
|
default:
|
2016-09-14 10:01:05 -07:00
|
|
|
s.Fatalf("unhandled expr %v", n.Op)
|
2016-04-04 10:58:21 -07:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
2015-09-11 11:02:57 -07:00
|
|
|
|
cmd/compile: avoid write barrier in append fast path
When we are writing the result of an append back
to the same slice, we don’t need a write barrier
on the fast path.
This re-implements an optimization that was present
in the old backend.
Updates #14921
Fixes #14969
Sample code:
var x []byte
func p() {
x = append(x, 1, 2, 3)
}
Before:
"".p t=1 size=224 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 199
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x(SB), DX
0x0025 00037 (append.go:19) MOVQ "".x+8(SB), BX
0x002c 00044 (append.go:19) MOVQ BX, "".autotmp_0+64(SP)
0x0031 00049 (append.go:22) LEAQ 3(BX), BP
0x0035 00053 (append.go:22) CMPQ BP, CX
0x0038 00056 (append.go:22) JGT $0, 131
0x003a 00058 (append.go:22) MOVB $1, (DX)(BX*1)
0x003e 00062 (append.go:22) MOVB $2, 1(DX)(BX*1)
0x0043 00067 (append.go:22) MOVB $3, 2(DX)(BX*1)
0x0048 00072 (append.go:22) MOVQ BP, "".x+8(SB)
0x004f 00079 (append.go:22) MOVQ CX, "".x+16(SB)
0x0056 00086 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x005c 00092 (append.go:22) TESTB AL, AL
0x005e 00094 (append.go:22) JNE $0, 108
0x0060 00096 (append.go:22) MOVQ DX, "".x(SB)
0x0067 00103 (append.go:23) ADDQ $72, SP
0x006b 00107 (append.go:23) RET
0x006c 00108 (append.go:22) LEAQ "".x(SB), CX
0x0073 00115 (append.go:22) MOVQ CX, (SP)
0x0077 00119 (append.go:22) MOVQ DX, 8(SP)
0x007c 00124 (append.go:22) PCDATA $0, $0
0x007c 00124 (append.go:22) CALL runtime.writebarrierptr(SB)
0x0081 00129 (append.go:23) JMP 103
0x0083 00131 (append.go:22) LEAQ type.[]uint8(SB), AX
0x008a 00138 (append.go:22) MOVQ AX, (SP)
0x008e 00142 (append.go:22) MOVQ DX, 8(SP)
0x0093 00147 (append.go:22) MOVQ BX, 16(SP)
0x0098 00152 (append.go:22) MOVQ CX, 24(SP)
0x009d 00157 (append.go:22) MOVQ BP, 32(SP)
0x00a2 00162 (append.go:22) PCDATA $0, $0
0x00a2 00162 (append.go:22) CALL runtime.growslice(SB)
0x00a7 00167 (append.go:22) MOVQ 40(SP), DX
0x00ac 00172 (append.go:22) MOVQ 48(SP), AX
0x00b1 00177 (append.go:22) MOVQ 56(SP), CX
0x00b6 00182 (append.go:22) ADDQ $3, AX
0x00ba 00186 (append.go:19) MOVQ "".autotmp_0+64(SP), BX
0x00bf 00191 (append.go:22) MOVQ AX, BP
0x00c2 00194 (append.go:22) JMP 58
0x00c7 00199 (append.go:22) NOP
0x00c7 00199 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00cc 00204 (append.go:21) JMP 0
After:
"".p t=1 size=208 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 191
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x+8(SB), DX
0x0025 00037 (append.go:19) MOVQ DX, "".autotmp_0+64(SP)
0x002a 00042 (append.go:19) MOVQ "".x(SB), BX
0x0031 00049 (append.go:22) LEAQ 3(DX), BP
0x0035 00053 (append.go:22) MOVQ BP, "".x+8(SB)
0x003c 00060 (append.go:22) CMPQ BP, CX
0x003f 00063 (append.go:22) JGT $0, 84
0x0041 00065 (append.go:22) MOVB $1, (BX)(DX*1)
0x0045 00069 (append.go:22) MOVB $2, 1(BX)(DX*1)
0x004a 00074 (append.go:22) MOVB $3, 2(BX)(DX*1)
0x004f 00079 (append.go:23) ADDQ $72, SP
0x0053 00083 (append.go:23) RET
0x0054 00084 (append.go:22) LEAQ type.[]uint8(SB), AX
0x005b 00091 (append.go:22) MOVQ AX, (SP)
0x005f 00095 (append.go:22) MOVQ BX, 8(SP)
0x0064 00100 (append.go:22) MOVQ DX, 16(SP)
0x0069 00105 (append.go:22) MOVQ CX, 24(SP)
0x006e 00110 (append.go:22) MOVQ BP, 32(SP)
0x0073 00115 (append.go:22) PCDATA $0, $0
0x0073 00115 (append.go:22) CALL runtime.growslice(SB)
0x0078 00120 (append.go:22) MOVQ 40(SP), CX
0x007d 00125 (append.go:22) MOVQ 56(SP), AX
0x0082 00130 (append.go:22) MOVQ AX, "".x+16(SB)
0x0089 00137 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x008f 00143 (append.go:22) TESTB AL, AL
0x0091 00145 (append.go:22) JNE $0, 168
0x0093 00147 (append.go:22) MOVQ CX, "".x(SB)
0x009a 00154 (append.go:22) MOVQ "".x(SB), BX
0x00a1 00161 (append.go:19) MOVQ "".autotmp_0+64(SP), DX
0x00a6 00166 (append.go:22) JMP 65
0x00a8 00168 (append.go:22) LEAQ "".x(SB), DX
0x00af 00175 (append.go:22) MOVQ DX, (SP)
0x00b3 00179 (append.go:22) MOVQ CX, 8(SP)
0x00b8 00184 (append.go:22) PCDATA $0, $0
0x00b8 00184 (append.go:22) CALL runtime.writebarrierptr(SB)
0x00bd 00189 (append.go:22) JMP 154
0x00bf 00191 (append.go:22) NOP
0x00bf 00191 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00c4 00196 (append.go:21) JMP 0
Change-Id: I77a41ad3a22557a4bb4654de7d6d24a029efe34a
Reviewed-on: https://go-review.googlesource.com/21813
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-10 09:44:17 -07:00
|
|
|
// append converts an OAPPEND node to SSA.
|
|
|
|
// If inplace is false, it converts the OAPPEND expression n to an ssa.Value,
|
|
|
|
// adds it to s, and returns the Value.
|
|
|
|
// If inplace is true, it writes the result of the OAPPEND expression n
|
|
|
|
// back to the slice being appended to, and returns nil.
|
|
|
|
// inplace MUST be set to false if the slice can be SSA'd.
|
|
|
|
func (s *state) append(n *Node, inplace bool) *ssa.Value {
|
|
|
|
// If inplace is false, process as expression "append(s, e1, e2, e3)":
|
|
|
|
//
|
cmd/compile: avoid a spill in append fast path
Instead of spilling newlen, recalculate it.
This removes a spill from the fast path,
at the cost of a cheap recalculation
on the (rare) growth path.
This uses 8 bytes less of stack space.
It generates two more bytes of code,
but that is due to suboptimal register allocation;
see far below.
Runtime append microbenchmarks are all over the map,
presumably due to incidental code movement.
Sample code:
func s(b []byte) []byte {
b = append(b, 1, 2, 3)
return b
}
Before:
"".s t=1 size=160 args=0x30 locals=0x48
0x0000 00000 (append.go:8) TEXT "".s(SB), $72-48
0x0000 00000 (append.go:8) MOVQ (TLS), CX
0x0009 00009 (append.go:8) CMPQ SP, 16(CX)
0x000d 00013 (append.go:8) JLS 149
0x0013 00019 (append.go:8) SUBQ $72, SP
0x0017 00023 (append.go:8) FUNCDATA $0, gclocals·6432f8c6a0d23fa7bee6c5d96f21a92a(SB)
0x0017 00023 (append.go:8) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:9) MOVQ "".b+88(FP), CX
0x001c 00028 (append.go:9) LEAQ 3(CX), DX
0x0020 00032 (append.go:9) MOVQ DX, "".autotmp_0+64(SP)
0x0025 00037 (append.go:9) MOVQ "".b+96(FP), BX
0x002a 00042 (append.go:9) CMPQ DX, BX
0x002d 00045 (append.go:9) JGT $0, 86
0x002f 00047 (append.go:8) MOVQ "".b+80(FP), AX
0x0034 00052 (append.go:9) MOVB $1, (AX)(CX*1)
0x0038 00056 (append.go:9) MOVB $2, 1(AX)(CX*1)
0x003d 00061 (append.go:9) MOVB $3, 2(AX)(CX*1)
0x0042 00066 (append.go:10) MOVQ AX, "".~r1+104(FP)
0x0047 00071 (append.go:10) MOVQ DX, "".~r1+112(FP)
0x004c 00076 (append.go:10) MOVQ BX, "".~r1+120(FP)
0x0051 00081 (append.go:10) ADDQ $72, SP
0x0055 00085 (append.go:10) RET
0x0056 00086 (append.go:9) LEAQ type.[]uint8(SB), AX
0x005d 00093 (append.go:9) MOVQ AX, (SP)
0x0061 00097 (append.go:9) MOVQ "".b+80(FP), BP
0x0066 00102 (append.go:9) MOVQ BP, 8(SP)
0x006b 00107 (append.go:9) MOVQ CX, 16(SP)
0x0070 00112 (append.go:9) MOVQ BX, 24(SP)
0x0075 00117 (append.go:9) MOVQ DX, 32(SP)
0x007a 00122 (append.go:9) PCDATA $0, $0
0x007a 00122 (append.go:9) CALL runtime.growslice(SB)
0x007f 00127 (append.go:9) MOVQ 40(SP), AX
0x0084 00132 (append.go:9) MOVQ 56(SP), BX
0x0089 00137 (append.go:8) MOVQ "".b+88(FP), CX
0x008e 00142 (append.go:9) MOVQ "".autotmp_0+64(SP), DX
0x0093 00147 (append.go:9) JMP 52
0x0095 00149 (append.go:9) NOP
0x0095 00149 (append.go:8) CALL runtime.morestack_noctxt(SB)
0x009a 00154 (append.go:8) JMP 0
After:
"".s t=1 size=176 args=0x30 locals=0x40
0x0000 00000 (append.go:8) TEXT "".s(SB), $64-48
0x0000 00000 (append.go:8) MOVQ (TLS), CX
0x0009 00009 (append.go:8) CMPQ SP, 16(CX)
0x000d 00013 (append.go:8) JLS 151
0x0013 00019 (append.go:8) SUBQ $64, SP
0x0017 00023 (append.go:8) FUNCDATA $0, gclocals·6432f8c6a0d23fa7bee6c5d96f21a92a(SB)
0x0017 00023 (append.go:8) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:9) MOVQ "".b+80(FP), CX
0x001c 00028 (append.go:9) LEAQ 3(CX), DX
0x0020 00032 (append.go:9) MOVQ "".b+88(FP), BX
0x0025 00037 (append.go:9) CMPQ DX, BX
0x0028 00040 (append.go:9) JGT $0, 81
0x002a 00042 (append.go:8) MOVQ "".b+72(FP), AX
0x002f 00047 (append.go:9) MOVB $1, (AX)(CX*1)
0x0033 00051 (append.go:9) MOVB $2, 1(AX)(CX*1)
0x0038 00056 (append.go:9) MOVB $3, 2(AX)(CX*1)
0x003d 00061 (append.go:10) MOVQ AX, "".~r1+96(FP)
0x0042 00066 (append.go:10) MOVQ DX, "".~r1+104(FP)
0x0047 00071 (append.go:10) MOVQ BX, "".~r1+112(FP)
0x004c 00076 (append.go:10) ADDQ $64, SP
0x0050 00080 (append.go:10) RET
0x0051 00081 (append.go:9) LEAQ type.[]uint8(SB), AX
0x0058 00088 (append.go:9) MOVQ AX, (SP)
0x005c 00092 (append.go:9) MOVQ "".b+72(FP), BP
0x0061 00097 (append.go:9) MOVQ BP, 8(SP)
0x0066 00102 (append.go:9) MOVQ CX, 16(SP)
0x006b 00107 (append.go:9) MOVQ BX, 24(SP)
0x0070 00112 (append.go:9) MOVQ DX, 32(SP)
0x0075 00117 (append.go:9) PCDATA $0, $0
0x0075 00117 (append.go:9) CALL runtime.growslice(SB)
0x007a 00122 (append.go:9) MOVQ 40(SP), AX
0x007f 00127 (append.go:9) MOVQ 48(SP), CX
0x0084 00132 (append.go:9) MOVQ 56(SP), BX
0x0089 00137 (append.go:9) ADDQ $3, CX
0x008d 00141 (append.go:9) MOVQ CX, DX
0x0090 00144 (append.go:8) MOVQ "".b+80(FP), CX
0x0095 00149 (append.go:9) JMP 47
0x0097 00151 (append.go:9) NOP
0x0097 00151 (append.go:8) CALL runtime.morestack_noctxt(SB)
0x009c 00156 (append.go:8) JMP 0
Observe that in the following sequence,
we should use DX directly instead of using
CX as a temporary register, which would make
the new code a strict improvement on the old:
0x007f 00127 (append.go:9) MOVQ 48(SP), CX
0x0084 00132 (append.go:9) MOVQ 56(SP), BX
0x0089 00137 (append.go:9) ADDQ $3, CX
0x008d 00141 (append.go:9) MOVQ CX, DX
0x0090 00144 (append.go:8) MOVQ "".b+80(FP), CX
Change-Id: I4ee50b18fa53865901d2d7f86c2cbb54c6fa6924
Reviewed-on: https://go-review.googlesource.com/21812
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-10 09:08:00 -07:00
|
|
|
// ptr, len, cap := s
|
2016-04-04 10:58:21 -07:00
|
|
|
// newlen := len + 3
|
cmd/compile: avoid write barrier in append fast path
When we are writing the result of an append back
to the same slice, we don’t need a write barrier
on the fast path.
This re-implements an optimization that was present
in the old backend.
Updates #14921
Fixes #14969
Sample code:
var x []byte
func p() {
x = append(x, 1, 2, 3)
}
Before:
"".p t=1 size=224 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 199
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x(SB), DX
0x0025 00037 (append.go:19) MOVQ "".x+8(SB), BX
0x002c 00044 (append.go:19) MOVQ BX, "".autotmp_0+64(SP)
0x0031 00049 (append.go:22) LEAQ 3(BX), BP
0x0035 00053 (append.go:22) CMPQ BP, CX
0x0038 00056 (append.go:22) JGT $0, 131
0x003a 00058 (append.go:22) MOVB $1, (DX)(BX*1)
0x003e 00062 (append.go:22) MOVB $2, 1(DX)(BX*1)
0x0043 00067 (append.go:22) MOVB $3, 2(DX)(BX*1)
0x0048 00072 (append.go:22) MOVQ BP, "".x+8(SB)
0x004f 00079 (append.go:22) MOVQ CX, "".x+16(SB)
0x0056 00086 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x005c 00092 (append.go:22) TESTB AL, AL
0x005e 00094 (append.go:22) JNE $0, 108
0x0060 00096 (append.go:22) MOVQ DX, "".x(SB)
0x0067 00103 (append.go:23) ADDQ $72, SP
0x006b 00107 (append.go:23) RET
0x006c 00108 (append.go:22) LEAQ "".x(SB), CX
0x0073 00115 (append.go:22) MOVQ CX, (SP)
0x0077 00119 (append.go:22) MOVQ DX, 8(SP)
0x007c 00124 (append.go:22) PCDATA $0, $0
0x007c 00124 (append.go:22) CALL runtime.writebarrierptr(SB)
0x0081 00129 (append.go:23) JMP 103
0x0083 00131 (append.go:22) LEAQ type.[]uint8(SB), AX
0x008a 00138 (append.go:22) MOVQ AX, (SP)
0x008e 00142 (append.go:22) MOVQ DX, 8(SP)
0x0093 00147 (append.go:22) MOVQ BX, 16(SP)
0x0098 00152 (append.go:22) MOVQ CX, 24(SP)
0x009d 00157 (append.go:22) MOVQ BP, 32(SP)
0x00a2 00162 (append.go:22) PCDATA $0, $0
0x00a2 00162 (append.go:22) CALL runtime.growslice(SB)
0x00a7 00167 (append.go:22) MOVQ 40(SP), DX
0x00ac 00172 (append.go:22) MOVQ 48(SP), AX
0x00b1 00177 (append.go:22) MOVQ 56(SP), CX
0x00b6 00182 (append.go:22) ADDQ $3, AX
0x00ba 00186 (append.go:19) MOVQ "".autotmp_0+64(SP), BX
0x00bf 00191 (append.go:22) MOVQ AX, BP
0x00c2 00194 (append.go:22) JMP 58
0x00c7 00199 (append.go:22) NOP
0x00c7 00199 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00cc 00204 (append.go:21) JMP 0
After:
"".p t=1 size=208 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 191
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x+8(SB), DX
0x0025 00037 (append.go:19) MOVQ DX, "".autotmp_0+64(SP)
0x002a 00042 (append.go:19) MOVQ "".x(SB), BX
0x0031 00049 (append.go:22) LEAQ 3(DX), BP
0x0035 00053 (append.go:22) MOVQ BP, "".x+8(SB)
0x003c 00060 (append.go:22) CMPQ BP, CX
0x003f 00063 (append.go:22) JGT $0, 84
0x0041 00065 (append.go:22) MOVB $1, (BX)(DX*1)
0x0045 00069 (append.go:22) MOVB $2, 1(BX)(DX*1)
0x004a 00074 (append.go:22) MOVB $3, 2(BX)(DX*1)
0x004f 00079 (append.go:23) ADDQ $72, SP
0x0053 00083 (append.go:23) RET
0x0054 00084 (append.go:22) LEAQ type.[]uint8(SB), AX
0x005b 00091 (append.go:22) MOVQ AX, (SP)
0x005f 00095 (append.go:22) MOVQ BX, 8(SP)
0x0064 00100 (append.go:22) MOVQ DX, 16(SP)
0x0069 00105 (append.go:22) MOVQ CX, 24(SP)
0x006e 00110 (append.go:22) MOVQ BP, 32(SP)
0x0073 00115 (append.go:22) PCDATA $0, $0
0x0073 00115 (append.go:22) CALL runtime.growslice(SB)
0x0078 00120 (append.go:22) MOVQ 40(SP), CX
0x007d 00125 (append.go:22) MOVQ 56(SP), AX
0x0082 00130 (append.go:22) MOVQ AX, "".x+16(SB)
0x0089 00137 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x008f 00143 (append.go:22) TESTB AL, AL
0x0091 00145 (append.go:22) JNE $0, 168
0x0093 00147 (append.go:22) MOVQ CX, "".x(SB)
0x009a 00154 (append.go:22) MOVQ "".x(SB), BX
0x00a1 00161 (append.go:19) MOVQ "".autotmp_0+64(SP), DX
0x00a6 00166 (append.go:22) JMP 65
0x00a8 00168 (append.go:22) LEAQ "".x(SB), DX
0x00af 00175 (append.go:22) MOVQ DX, (SP)
0x00b3 00179 (append.go:22) MOVQ CX, 8(SP)
0x00b8 00184 (append.go:22) PCDATA $0, $0
0x00b8 00184 (append.go:22) CALL runtime.writebarrierptr(SB)
0x00bd 00189 (append.go:22) JMP 154
0x00bf 00191 (append.go:22) NOP
0x00bf 00191 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00c4 00196 (append.go:21) JMP 0
Change-Id: I77a41ad3a22557a4bb4654de7d6d24a029efe34a
Reviewed-on: https://go-review.googlesource.com/21813
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-10 09:44:17 -07:00
|
|
|
// if newlen > cap {
|
cmd/compile: avoid a spill in append fast path
Instead of spilling newlen, recalculate it.
This removes a spill from the fast path,
at the cost of a cheap recalculation
on the (rare) growth path.
This uses 8 bytes less of stack space.
It generates two more bytes of code,
but that is due to suboptimal register allocation;
see far below.
Runtime append microbenchmarks are all over the map,
presumably due to incidental code movement.
Sample code:
func s(b []byte) []byte {
b = append(b, 1, 2, 3)
return b
}
Before:
"".s t=1 size=160 args=0x30 locals=0x48
0x0000 00000 (append.go:8) TEXT "".s(SB), $72-48
0x0000 00000 (append.go:8) MOVQ (TLS), CX
0x0009 00009 (append.go:8) CMPQ SP, 16(CX)
0x000d 00013 (append.go:8) JLS 149
0x0013 00019 (append.go:8) SUBQ $72, SP
0x0017 00023 (append.go:8) FUNCDATA $0, gclocals·6432f8c6a0d23fa7bee6c5d96f21a92a(SB)
0x0017 00023 (append.go:8) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:9) MOVQ "".b+88(FP), CX
0x001c 00028 (append.go:9) LEAQ 3(CX), DX
0x0020 00032 (append.go:9) MOVQ DX, "".autotmp_0+64(SP)
0x0025 00037 (append.go:9) MOVQ "".b+96(FP), BX
0x002a 00042 (append.go:9) CMPQ DX, BX
0x002d 00045 (append.go:9) JGT $0, 86
0x002f 00047 (append.go:8) MOVQ "".b+80(FP), AX
0x0034 00052 (append.go:9) MOVB $1, (AX)(CX*1)
0x0038 00056 (append.go:9) MOVB $2, 1(AX)(CX*1)
0x003d 00061 (append.go:9) MOVB $3, 2(AX)(CX*1)
0x0042 00066 (append.go:10) MOVQ AX, "".~r1+104(FP)
0x0047 00071 (append.go:10) MOVQ DX, "".~r1+112(FP)
0x004c 00076 (append.go:10) MOVQ BX, "".~r1+120(FP)
0x0051 00081 (append.go:10) ADDQ $72, SP
0x0055 00085 (append.go:10) RET
0x0056 00086 (append.go:9) LEAQ type.[]uint8(SB), AX
0x005d 00093 (append.go:9) MOVQ AX, (SP)
0x0061 00097 (append.go:9) MOVQ "".b+80(FP), BP
0x0066 00102 (append.go:9) MOVQ BP, 8(SP)
0x006b 00107 (append.go:9) MOVQ CX, 16(SP)
0x0070 00112 (append.go:9) MOVQ BX, 24(SP)
0x0075 00117 (append.go:9) MOVQ DX, 32(SP)
0x007a 00122 (append.go:9) PCDATA $0, $0
0x007a 00122 (append.go:9) CALL runtime.growslice(SB)
0x007f 00127 (append.go:9) MOVQ 40(SP), AX
0x0084 00132 (append.go:9) MOVQ 56(SP), BX
0x0089 00137 (append.go:8) MOVQ "".b+88(FP), CX
0x008e 00142 (append.go:9) MOVQ "".autotmp_0+64(SP), DX
0x0093 00147 (append.go:9) JMP 52
0x0095 00149 (append.go:9) NOP
0x0095 00149 (append.go:8) CALL runtime.morestack_noctxt(SB)
0x009a 00154 (append.go:8) JMP 0
After:
"".s t=1 size=176 args=0x30 locals=0x40
0x0000 00000 (append.go:8) TEXT "".s(SB), $64-48
0x0000 00000 (append.go:8) MOVQ (TLS), CX
0x0009 00009 (append.go:8) CMPQ SP, 16(CX)
0x000d 00013 (append.go:8) JLS 151
0x0013 00019 (append.go:8) SUBQ $64, SP
0x0017 00023 (append.go:8) FUNCDATA $0, gclocals·6432f8c6a0d23fa7bee6c5d96f21a92a(SB)
0x0017 00023 (append.go:8) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:9) MOVQ "".b+80(FP), CX
0x001c 00028 (append.go:9) LEAQ 3(CX), DX
0x0020 00032 (append.go:9) MOVQ "".b+88(FP), BX
0x0025 00037 (append.go:9) CMPQ DX, BX
0x0028 00040 (append.go:9) JGT $0, 81
0x002a 00042 (append.go:8) MOVQ "".b+72(FP), AX
0x002f 00047 (append.go:9) MOVB $1, (AX)(CX*1)
0x0033 00051 (append.go:9) MOVB $2, 1(AX)(CX*1)
0x0038 00056 (append.go:9) MOVB $3, 2(AX)(CX*1)
0x003d 00061 (append.go:10) MOVQ AX, "".~r1+96(FP)
0x0042 00066 (append.go:10) MOVQ DX, "".~r1+104(FP)
0x0047 00071 (append.go:10) MOVQ BX, "".~r1+112(FP)
0x004c 00076 (append.go:10) ADDQ $64, SP
0x0050 00080 (append.go:10) RET
0x0051 00081 (append.go:9) LEAQ type.[]uint8(SB), AX
0x0058 00088 (append.go:9) MOVQ AX, (SP)
0x005c 00092 (append.go:9) MOVQ "".b+72(FP), BP
0x0061 00097 (append.go:9) MOVQ BP, 8(SP)
0x0066 00102 (append.go:9) MOVQ CX, 16(SP)
0x006b 00107 (append.go:9) MOVQ BX, 24(SP)
0x0070 00112 (append.go:9) MOVQ DX, 32(SP)
0x0075 00117 (append.go:9) PCDATA $0, $0
0x0075 00117 (append.go:9) CALL runtime.growslice(SB)
0x007a 00122 (append.go:9) MOVQ 40(SP), AX
0x007f 00127 (append.go:9) MOVQ 48(SP), CX
0x0084 00132 (append.go:9) MOVQ 56(SP), BX
0x0089 00137 (append.go:9) ADDQ $3, CX
0x008d 00141 (append.go:9) MOVQ CX, DX
0x0090 00144 (append.go:8) MOVQ "".b+80(FP), CX
0x0095 00149 (append.go:9) JMP 47
0x0097 00151 (append.go:9) NOP
0x0097 00151 (append.go:8) CALL runtime.morestack_noctxt(SB)
0x009c 00156 (append.go:8) JMP 0
Observe that in the following sequence,
we should use DX directly instead of using
CX as a temporary register, which would make
the new code a strict improvement on the old:
0x007f 00127 (append.go:9) MOVQ 48(SP), CX
0x0084 00132 (append.go:9) MOVQ 56(SP), BX
0x0089 00137 (append.go:9) ADDQ $3, CX
0x008d 00141 (append.go:9) MOVQ CX, DX
0x0090 00144 (append.go:8) MOVQ "".b+80(FP), CX
Change-Id: I4ee50b18fa53865901d2d7f86c2cbb54c6fa6924
Reviewed-on: https://go-review.googlesource.com/21812
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-10 09:08:00 -07:00
|
|
|
// ptr, len, cap = growslice(s, newlen)
|
|
|
|
// newlen = len + 3 // recalculate to avoid a spill
|
2016-04-04 10:58:21 -07:00
|
|
|
// }
|
cmd/compile: avoid write barrier in append fast path
When we are writing the result of an append back
to the same slice, we don’t need a write barrier
on the fast path.
This re-implements an optimization that was present
in the old backend.
Updates #14921
Fixes #14969
Sample code:
var x []byte
func p() {
x = append(x, 1, 2, 3)
}
Before:
"".p t=1 size=224 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 199
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x(SB), DX
0x0025 00037 (append.go:19) MOVQ "".x+8(SB), BX
0x002c 00044 (append.go:19) MOVQ BX, "".autotmp_0+64(SP)
0x0031 00049 (append.go:22) LEAQ 3(BX), BP
0x0035 00053 (append.go:22) CMPQ BP, CX
0x0038 00056 (append.go:22) JGT $0, 131
0x003a 00058 (append.go:22) MOVB $1, (DX)(BX*1)
0x003e 00062 (append.go:22) MOVB $2, 1(DX)(BX*1)
0x0043 00067 (append.go:22) MOVB $3, 2(DX)(BX*1)
0x0048 00072 (append.go:22) MOVQ BP, "".x+8(SB)
0x004f 00079 (append.go:22) MOVQ CX, "".x+16(SB)
0x0056 00086 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x005c 00092 (append.go:22) TESTB AL, AL
0x005e 00094 (append.go:22) JNE $0, 108
0x0060 00096 (append.go:22) MOVQ DX, "".x(SB)
0x0067 00103 (append.go:23) ADDQ $72, SP
0x006b 00107 (append.go:23) RET
0x006c 00108 (append.go:22) LEAQ "".x(SB), CX
0x0073 00115 (append.go:22) MOVQ CX, (SP)
0x0077 00119 (append.go:22) MOVQ DX, 8(SP)
0x007c 00124 (append.go:22) PCDATA $0, $0
0x007c 00124 (append.go:22) CALL runtime.writebarrierptr(SB)
0x0081 00129 (append.go:23) JMP 103
0x0083 00131 (append.go:22) LEAQ type.[]uint8(SB), AX
0x008a 00138 (append.go:22) MOVQ AX, (SP)
0x008e 00142 (append.go:22) MOVQ DX, 8(SP)
0x0093 00147 (append.go:22) MOVQ BX, 16(SP)
0x0098 00152 (append.go:22) MOVQ CX, 24(SP)
0x009d 00157 (append.go:22) MOVQ BP, 32(SP)
0x00a2 00162 (append.go:22) PCDATA $0, $0
0x00a2 00162 (append.go:22) CALL runtime.growslice(SB)
0x00a7 00167 (append.go:22) MOVQ 40(SP), DX
0x00ac 00172 (append.go:22) MOVQ 48(SP), AX
0x00b1 00177 (append.go:22) MOVQ 56(SP), CX
0x00b6 00182 (append.go:22) ADDQ $3, AX
0x00ba 00186 (append.go:19) MOVQ "".autotmp_0+64(SP), BX
0x00bf 00191 (append.go:22) MOVQ AX, BP
0x00c2 00194 (append.go:22) JMP 58
0x00c7 00199 (append.go:22) NOP
0x00c7 00199 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00cc 00204 (append.go:21) JMP 0
After:
"".p t=1 size=208 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 191
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x+8(SB), DX
0x0025 00037 (append.go:19) MOVQ DX, "".autotmp_0+64(SP)
0x002a 00042 (append.go:19) MOVQ "".x(SB), BX
0x0031 00049 (append.go:22) LEAQ 3(DX), BP
0x0035 00053 (append.go:22) MOVQ BP, "".x+8(SB)
0x003c 00060 (append.go:22) CMPQ BP, CX
0x003f 00063 (append.go:22) JGT $0, 84
0x0041 00065 (append.go:22) MOVB $1, (BX)(DX*1)
0x0045 00069 (append.go:22) MOVB $2, 1(BX)(DX*1)
0x004a 00074 (append.go:22) MOVB $3, 2(BX)(DX*1)
0x004f 00079 (append.go:23) ADDQ $72, SP
0x0053 00083 (append.go:23) RET
0x0054 00084 (append.go:22) LEAQ type.[]uint8(SB), AX
0x005b 00091 (append.go:22) MOVQ AX, (SP)
0x005f 00095 (append.go:22) MOVQ BX, 8(SP)
0x0064 00100 (append.go:22) MOVQ DX, 16(SP)
0x0069 00105 (append.go:22) MOVQ CX, 24(SP)
0x006e 00110 (append.go:22) MOVQ BP, 32(SP)
0x0073 00115 (append.go:22) PCDATA $0, $0
0x0073 00115 (append.go:22) CALL runtime.growslice(SB)
0x0078 00120 (append.go:22) MOVQ 40(SP), CX
0x007d 00125 (append.go:22) MOVQ 56(SP), AX
0x0082 00130 (append.go:22) MOVQ AX, "".x+16(SB)
0x0089 00137 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x008f 00143 (append.go:22) TESTB AL, AL
0x0091 00145 (append.go:22) JNE $0, 168
0x0093 00147 (append.go:22) MOVQ CX, "".x(SB)
0x009a 00154 (append.go:22) MOVQ "".x(SB), BX
0x00a1 00161 (append.go:19) MOVQ "".autotmp_0+64(SP), DX
0x00a6 00166 (append.go:22) JMP 65
0x00a8 00168 (append.go:22) LEAQ "".x(SB), DX
0x00af 00175 (append.go:22) MOVQ DX, (SP)
0x00b3 00179 (append.go:22) MOVQ CX, 8(SP)
0x00b8 00184 (append.go:22) PCDATA $0, $0
0x00b8 00184 (append.go:22) CALL runtime.writebarrierptr(SB)
0x00bd 00189 (append.go:22) JMP 154
0x00bf 00191 (append.go:22) NOP
0x00bf 00191 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00c4 00196 (append.go:21) JMP 0
Change-Id: I77a41ad3a22557a4bb4654de7d6d24a029efe34a
Reviewed-on: https://go-review.googlesource.com/21813
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-10 09:44:17 -07:00
|
|
|
// // with write barriers, if needed:
|
|
|
|
// *(ptr+len) = e1
|
|
|
|
// *(ptr+len+1) = e2
|
|
|
|
// *(ptr+len+2) = e3
|
|
|
|
// return makeslice(ptr, newlen, cap)
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// If inplace is true, process as statement "s = append(s, e1, e2, e3)":
|
|
|
|
//
|
|
|
|
// a := &s
|
|
|
|
// ptr, len, cap := s
|
|
|
|
// newlen := len + 3
|
|
|
|
// if newlen > cap {
|
cmd/compile: re-enable in-place append optimization
CL 21891 was too clever in its attempts to avoid spills.
Storing newlen too early caused uses of append in the runtime
itself to receive an inconsistent view of a slice,
leading to corruption.
This CL makes the generate code much more similar to
the old backend. It spills more than before,
but those spills have been contained to the grow path.
It recalculates newlen unnecessarily on the fast path,
but that's measurably cheaper than spilling it.
CL 21891 caused runtime failures in 6 of 2000 runs
of net/http and crypto/x509 in my test setup.
This CL has gone 6000 runs without a failure.
Benchmarks going from master to this CL:
name old time/op new time/op delta
AppendInPlace/NoGrow/Byte-8 439ns ± 2% 436ns ± 2% -0.72% (p=0.001 n=28+27)
AppendInPlace/NoGrow/1Ptr-8 901ns ± 0% 856ns ± 0% -4.95% (p=0.000 n=26+29)
AppendInPlace/NoGrow/2Ptr-8 2.15µs ± 1% 1.95µs ± 0% -9.07% (p=0.000 n=28+30)
AppendInPlace/NoGrow/3Ptr-8 2.66µs ± 0% 2.45µs ± 0% -7.93% (p=0.000 n=29+26)
AppendInPlace/NoGrow/4Ptr-8 3.24µs ± 1% 3.02µs ± 1% -6.75% (p=0.000 n=28+30)
AppendInPlace/Grow/Byte-8 269ns ± 1% 271ns ± 1% +0.84% (p=0.000 n=30+29)
AppendInPlace/Grow/1Ptr-8 275ns ± 1% 280ns ± 1% +1.75% (p=0.000 n=30+30)
AppendInPlace/Grow/2Ptr-8 384ns ± 0% 391ns ± 0% +1.94% (p=0.000 n=27+30)
AppendInPlace/Grow/3Ptr-8 455ns ± 0% 462ns ± 0% +1.43% (p=0.000 n=29+29)
AppendInPlace/Grow/4Ptr-8 478ns ± 0% 479ns ± 0% +0.23% (p=0.000 n=30+27)
However, for the large no-grow cases, there is still more work to be done.
Going from this CL to the non-SSA backend:
name old time/op new time/op delta
AppendInPlace/NoGrow/Byte-8 436ns ± 2% 436ns ± 2% ~ (p=0.967 n=27+29)
AppendInPlace/NoGrow/1Ptr-8 856ns ± 0% 884ns ± 0% +3.28% (p=0.000 n=29+26)
AppendInPlace/NoGrow/2Ptr-8 1.95µs ± 0% 1.56µs ± 0% -20.28% (p=0.000 n=30+29)
AppendInPlace/NoGrow/3Ptr-8 2.45µs ± 0% 1.89µs ± 0% -22.88% (p=0.000 n=26+28)
AppendInPlace/NoGrow/4Ptr-8 3.02µs ± 1% 2.56µs ± 1% -15.35% (p=0.000 n=30+28)
AppendInPlace/Grow/Byte-8 271ns ± 1% 283ns ± 1% +4.56% (p=0.000 n=29+29)
AppendInPlace/Grow/1Ptr-8 280ns ± 1% 288ns ± 1% +2.99% (p=0.000 n=30+30)
AppendInPlace/Grow/2Ptr-8 391ns ± 0% 409ns ± 0% +4.66% (p=0.000 n=30+29)
AppendInPlace/Grow/3Ptr-8 462ns ± 0% 481ns ± 0% +4.13% (p=0.000 n=29+30)
AppendInPlace/Grow/4Ptr-8 479ns ± 0% 502ns ± 0% +4.81% (p=0.000 n=27+26)
New generated code:
var x []byte
func a() {
x = append(x, 1)
}
"".a t=1 size=208 args=0x0 locals=0x48
0x0000 00000 (a.go:5) TEXT "".a(SB), $72-0
0x0000 00000 (a.go:5) MOVQ (TLS), CX
0x0009 00009 (a.go:5) CMPQ SP, 16(CX)
0x000d 00013 (a.go:5) JLS 190
0x0013 00019 (a.go:5) SUBQ $72, SP
0x0017 00023 (a.go:5) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (a.go:5) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (a.go:6) MOVQ "".x+16(SB), CX
0x001e 00030 (a.go:6) MOVQ "".x+8(SB), DX
0x0025 00037 (a.go:6) MOVQ "".x(SB), BX
0x002c 00044 (a.go:6) LEAQ 1(DX), BP
0x0030 00048 (a.go:6) CMPQ BP, CX
0x0033 00051 (a.go:6) JGT $0, 73
0x0035 00053 (a.go:6) LEAQ 1(DX), AX
0x0039 00057 (a.go:6) MOVQ AX, "".x+8(SB)
0x0040 00064 (a.go:6) MOVB $1, (BX)(DX*1)
0x0044 00068 (a.go:7) ADDQ $72, SP
0x0048 00072 (a.go:7) RET
0x0049 00073 (a.go:6) LEAQ type.[]uint8(SB), AX
0x0050 00080 (a.go:6) MOVQ AX, (SP)
0x0054 00084 (a.go:6) MOVQ BX, 8(SP)
0x0059 00089 (a.go:6) MOVQ DX, 16(SP)
0x005e 00094 (a.go:6) MOVQ CX, 24(SP)
0x0063 00099 (a.go:6) MOVQ BP, 32(SP)
0x0068 00104 (a.go:6) PCDATA $0, $0
0x0068 00104 (a.go:6) CALL runtime.growslice(SB)
0x006d 00109 (a.go:6) MOVQ 40(SP), CX
0x0072 00114 (a.go:6) MOVQ 48(SP), DX
0x0077 00119 (a.go:6) MOVQ DX, "".autotmp_0+64(SP)
0x007c 00124 (a.go:6) MOVQ 56(SP), BX
0x0081 00129 (a.go:6) MOVQ BX, "".x+16(SB)
0x0088 00136 (a.go:6) MOVL runtime.writeBarrier(SB), AX
0x008e 00142 (a.go:6) TESTB AL, AL
0x0090 00144 (a.go:6) JNE $0, 162
0x0092 00146 (a.go:6) MOVQ CX, "".x(SB)
0x0099 00153 (a.go:6) MOVQ "".x(SB), BX
0x00a0 00160 (a.go:6) JMP 53
0x00a2 00162 (a.go:6) LEAQ "".x(SB), BX
0x00a9 00169 (a.go:6) MOVQ BX, (SP)
0x00ad 00173 (a.go:6) MOVQ CX, 8(SP)
0x00b2 00178 (a.go:6) PCDATA $0, $0
0x00b2 00178 (a.go:6) CALL runtime.writebarrierptr(SB)
0x00b7 00183 (a.go:6) MOVQ "".autotmp_0+64(SP), DX
0x00bc 00188 (a.go:6) JMP 153
0x00be 00190 (a.go:6) NOP
0x00be 00190 (a.go:5) CALL runtime.morestack_noctxt(SB)
0x00c3 00195 (a.go:5) JMP 0
Fixes #14969 again
Change-Id: Ia50463b1f506011aad0718a4fef1d4738e43c32d
Reviewed-on: https://go-review.googlesource.com/22197
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-18 09:40:30 -07:00
|
|
|
// newptr, len, newcap = growslice(ptr, len, cap, newlen)
|
|
|
|
// vardef(a) // if necessary, advise liveness we are writing a new a
|
cmd/compile: avoid write barrier in append fast path
When we are writing the result of an append back
to the same slice, we don’t need a write barrier
on the fast path.
This re-implements an optimization that was present
in the old backend.
Updates #14921
Fixes #14969
Sample code:
var x []byte
func p() {
x = append(x, 1, 2, 3)
}
Before:
"".p t=1 size=224 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 199
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x(SB), DX
0x0025 00037 (append.go:19) MOVQ "".x+8(SB), BX
0x002c 00044 (append.go:19) MOVQ BX, "".autotmp_0+64(SP)
0x0031 00049 (append.go:22) LEAQ 3(BX), BP
0x0035 00053 (append.go:22) CMPQ BP, CX
0x0038 00056 (append.go:22) JGT $0, 131
0x003a 00058 (append.go:22) MOVB $1, (DX)(BX*1)
0x003e 00062 (append.go:22) MOVB $2, 1(DX)(BX*1)
0x0043 00067 (append.go:22) MOVB $3, 2(DX)(BX*1)
0x0048 00072 (append.go:22) MOVQ BP, "".x+8(SB)
0x004f 00079 (append.go:22) MOVQ CX, "".x+16(SB)
0x0056 00086 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x005c 00092 (append.go:22) TESTB AL, AL
0x005e 00094 (append.go:22) JNE $0, 108
0x0060 00096 (append.go:22) MOVQ DX, "".x(SB)
0x0067 00103 (append.go:23) ADDQ $72, SP
0x006b 00107 (append.go:23) RET
0x006c 00108 (append.go:22) LEAQ "".x(SB), CX
0x0073 00115 (append.go:22) MOVQ CX, (SP)
0x0077 00119 (append.go:22) MOVQ DX, 8(SP)
0x007c 00124 (append.go:22) PCDATA $0, $0
0x007c 00124 (append.go:22) CALL runtime.writebarrierptr(SB)
0x0081 00129 (append.go:23) JMP 103
0x0083 00131 (append.go:22) LEAQ type.[]uint8(SB), AX
0x008a 00138 (append.go:22) MOVQ AX, (SP)
0x008e 00142 (append.go:22) MOVQ DX, 8(SP)
0x0093 00147 (append.go:22) MOVQ BX, 16(SP)
0x0098 00152 (append.go:22) MOVQ CX, 24(SP)
0x009d 00157 (append.go:22) MOVQ BP, 32(SP)
0x00a2 00162 (append.go:22) PCDATA $0, $0
0x00a2 00162 (append.go:22) CALL runtime.growslice(SB)
0x00a7 00167 (append.go:22) MOVQ 40(SP), DX
0x00ac 00172 (append.go:22) MOVQ 48(SP), AX
0x00b1 00177 (append.go:22) MOVQ 56(SP), CX
0x00b6 00182 (append.go:22) ADDQ $3, AX
0x00ba 00186 (append.go:19) MOVQ "".autotmp_0+64(SP), BX
0x00bf 00191 (append.go:22) MOVQ AX, BP
0x00c2 00194 (append.go:22) JMP 58
0x00c7 00199 (append.go:22) NOP
0x00c7 00199 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00cc 00204 (append.go:21) JMP 0
After:
"".p t=1 size=208 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 191
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x+8(SB), DX
0x0025 00037 (append.go:19) MOVQ DX, "".autotmp_0+64(SP)
0x002a 00042 (append.go:19) MOVQ "".x(SB), BX
0x0031 00049 (append.go:22) LEAQ 3(DX), BP
0x0035 00053 (append.go:22) MOVQ BP, "".x+8(SB)
0x003c 00060 (append.go:22) CMPQ BP, CX
0x003f 00063 (append.go:22) JGT $0, 84
0x0041 00065 (append.go:22) MOVB $1, (BX)(DX*1)
0x0045 00069 (append.go:22) MOVB $2, 1(BX)(DX*1)
0x004a 00074 (append.go:22) MOVB $3, 2(BX)(DX*1)
0x004f 00079 (append.go:23) ADDQ $72, SP
0x0053 00083 (append.go:23) RET
0x0054 00084 (append.go:22) LEAQ type.[]uint8(SB), AX
0x005b 00091 (append.go:22) MOVQ AX, (SP)
0x005f 00095 (append.go:22) MOVQ BX, 8(SP)
0x0064 00100 (append.go:22) MOVQ DX, 16(SP)
0x0069 00105 (append.go:22) MOVQ CX, 24(SP)
0x006e 00110 (append.go:22) MOVQ BP, 32(SP)
0x0073 00115 (append.go:22) PCDATA $0, $0
0x0073 00115 (append.go:22) CALL runtime.growslice(SB)
0x0078 00120 (append.go:22) MOVQ 40(SP), CX
0x007d 00125 (append.go:22) MOVQ 56(SP), AX
0x0082 00130 (append.go:22) MOVQ AX, "".x+16(SB)
0x0089 00137 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x008f 00143 (append.go:22) TESTB AL, AL
0x0091 00145 (append.go:22) JNE $0, 168
0x0093 00147 (append.go:22) MOVQ CX, "".x(SB)
0x009a 00154 (append.go:22) MOVQ "".x(SB), BX
0x00a1 00161 (append.go:19) MOVQ "".autotmp_0+64(SP), DX
0x00a6 00166 (append.go:22) JMP 65
0x00a8 00168 (append.go:22) LEAQ "".x(SB), DX
0x00af 00175 (append.go:22) MOVQ DX, (SP)
0x00b3 00179 (append.go:22) MOVQ CX, 8(SP)
0x00b8 00184 (append.go:22) PCDATA $0, $0
0x00b8 00184 (append.go:22) CALL runtime.writebarrierptr(SB)
0x00bd 00189 (append.go:22) JMP 154
0x00bf 00191 (append.go:22) NOP
0x00bf 00191 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00c4 00196 (append.go:21) JMP 0
Change-Id: I77a41ad3a22557a4bb4654de7d6d24a029efe34a
Reviewed-on: https://go-review.googlesource.com/21813
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-10 09:44:17 -07:00
|
|
|
// *a.cap = newcap // write before ptr to avoid a spill
|
|
|
|
// *a.ptr = newptr // with write barrier
|
|
|
|
// }
|
cmd/compile: re-enable in-place append optimization
CL 21891 was too clever in its attempts to avoid spills.
Storing newlen too early caused uses of append in the runtime
itself to receive an inconsistent view of a slice,
leading to corruption.
This CL makes the generate code much more similar to
the old backend. It spills more than before,
but those spills have been contained to the grow path.
It recalculates newlen unnecessarily on the fast path,
but that's measurably cheaper than spilling it.
CL 21891 caused runtime failures in 6 of 2000 runs
of net/http and crypto/x509 in my test setup.
This CL has gone 6000 runs without a failure.
Benchmarks going from master to this CL:
name old time/op new time/op delta
AppendInPlace/NoGrow/Byte-8 439ns ± 2% 436ns ± 2% -0.72% (p=0.001 n=28+27)
AppendInPlace/NoGrow/1Ptr-8 901ns ± 0% 856ns ± 0% -4.95% (p=0.000 n=26+29)
AppendInPlace/NoGrow/2Ptr-8 2.15µs ± 1% 1.95µs ± 0% -9.07% (p=0.000 n=28+30)
AppendInPlace/NoGrow/3Ptr-8 2.66µs ± 0% 2.45µs ± 0% -7.93% (p=0.000 n=29+26)
AppendInPlace/NoGrow/4Ptr-8 3.24µs ± 1% 3.02µs ± 1% -6.75% (p=0.000 n=28+30)
AppendInPlace/Grow/Byte-8 269ns ± 1% 271ns ± 1% +0.84% (p=0.000 n=30+29)
AppendInPlace/Grow/1Ptr-8 275ns ± 1% 280ns ± 1% +1.75% (p=0.000 n=30+30)
AppendInPlace/Grow/2Ptr-8 384ns ± 0% 391ns ± 0% +1.94% (p=0.000 n=27+30)
AppendInPlace/Grow/3Ptr-8 455ns ± 0% 462ns ± 0% +1.43% (p=0.000 n=29+29)
AppendInPlace/Grow/4Ptr-8 478ns ± 0% 479ns ± 0% +0.23% (p=0.000 n=30+27)
However, for the large no-grow cases, there is still more work to be done.
Going from this CL to the non-SSA backend:
name old time/op new time/op delta
AppendInPlace/NoGrow/Byte-8 436ns ± 2% 436ns ± 2% ~ (p=0.967 n=27+29)
AppendInPlace/NoGrow/1Ptr-8 856ns ± 0% 884ns ± 0% +3.28% (p=0.000 n=29+26)
AppendInPlace/NoGrow/2Ptr-8 1.95µs ± 0% 1.56µs ± 0% -20.28% (p=0.000 n=30+29)
AppendInPlace/NoGrow/3Ptr-8 2.45µs ± 0% 1.89µs ± 0% -22.88% (p=0.000 n=26+28)
AppendInPlace/NoGrow/4Ptr-8 3.02µs ± 1% 2.56µs ± 1% -15.35% (p=0.000 n=30+28)
AppendInPlace/Grow/Byte-8 271ns ± 1% 283ns ± 1% +4.56% (p=0.000 n=29+29)
AppendInPlace/Grow/1Ptr-8 280ns ± 1% 288ns ± 1% +2.99% (p=0.000 n=30+30)
AppendInPlace/Grow/2Ptr-8 391ns ± 0% 409ns ± 0% +4.66% (p=0.000 n=30+29)
AppendInPlace/Grow/3Ptr-8 462ns ± 0% 481ns ± 0% +4.13% (p=0.000 n=29+30)
AppendInPlace/Grow/4Ptr-8 479ns ± 0% 502ns ± 0% +4.81% (p=0.000 n=27+26)
New generated code:
var x []byte
func a() {
x = append(x, 1)
}
"".a t=1 size=208 args=0x0 locals=0x48
0x0000 00000 (a.go:5) TEXT "".a(SB), $72-0
0x0000 00000 (a.go:5) MOVQ (TLS), CX
0x0009 00009 (a.go:5) CMPQ SP, 16(CX)
0x000d 00013 (a.go:5) JLS 190
0x0013 00019 (a.go:5) SUBQ $72, SP
0x0017 00023 (a.go:5) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (a.go:5) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (a.go:6) MOVQ "".x+16(SB), CX
0x001e 00030 (a.go:6) MOVQ "".x+8(SB), DX
0x0025 00037 (a.go:6) MOVQ "".x(SB), BX
0x002c 00044 (a.go:6) LEAQ 1(DX), BP
0x0030 00048 (a.go:6) CMPQ BP, CX
0x0033 00051 (a.go:6) JGT $0, 73
0x0035 00053 (a.go:6) LEAQ 1(DX), AX
0x0039 00057 (a.go:6) MOVQ AX, "".x+8(SB)
0x0040 00064 (a.go:6) MOVB $1, (BX)(DX*1)
0x0044 00068 (a.go:7) ADDQ $72, SP
0x0048 00072 (a.go:7) RET
0x0049 00073 (a.go:6) LEAQ type.[]uint8(SB), AX
0x0050 00080 (a.go:6) MOVQ AX, (SP)
0x0054 00084 (a.go:6) MOVQ BX, 8(SP)
0x0059 00089 (a.go:6) MOVQ DX, 16(SP)
0x005e 00094 (a.go:6) MOVQ CX, 24(SP)
0x0063 00099 (a.go:6) MOVQ BP, 32(SP)
0x0068 00104 (a.go:6) PCDATA $0, $0
0x0068 00104 (a.go:6) CALL runtime.growslice(SB)
0x006d 00109 (a.go:6) MOVQ 40(SP), CX
0x0072 00114 (a.go:6) MOVQ 48(SP), DX
0x0077 00119 (a.go:6) MOVQ DX, "".autotmp_0+64(SP)
0x007c 00124 (a.go:6) MOVQ 56(SP), BX
0x0081 00129 (a.go:6) MOVQ BX, "".x+16(SB)
0x0088 00136 (a.go:6) MOVL runtime.writeBarrier(SB), AX
0x008e 00142 (a.go:6) TESTB AL, AL
0x0090 00144 (a.go:6) JNE $0, 162
0x0092 00146 (a.go:6) MOVQ CX, "".x(SB)
0x0099 00153 (a.go:6) MOVQ "".x(SB), BX
0x00a0 00160 (a.go:6) JMP 53
0x00a2 00162 (a.go:6) LEAQ "".x(SB), BX
0x00a9 00169 (a.go:6) MOVQ BX, (SP)
0x00ad 00173 (a.go:6) MOVQ CX, 8(SP)
0x00b2 00178 (a.go:6) PCDATA $0, $0
0x00b2 00178 (a.go:6) CALL runtime.writebarrierptr(SB)
0x00b7 00183 (a.go:6) MOVQ "".autotmp_0+64(SP), DX
0x00bc 00188 (a.go:6) JMP 153
0x00be 00190 (a.go:6) NOP
0x00be 00190 (a.go:5) CALL runtime.morestack_noctxt(SB)
0x00c3 00195 (a.go:5) JMP 0
Fixes #14969 again
Change-Id: Ia50463b1f506011aad0718a4fef1d4738e43c32d
Reviewed-on: https://go-review.googlesource.com/22197
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-18 09:40:30 -07:00
|
|
|
// newlen = len + 3 // recalculate to avoid a spill
|
|
|
|
// *a.len = newlen
|
cmd/compile: avoid write barrier in append fast path
When we are writing the result of an append back
to the same slice, we don’t need a write barrier
on the fast path.
This re-implements an optimization that was present
in the old backend.
Updates #14921
Fixes #14969
Sample code:
var x []byte
func p() {
x = append(x, 1, 2, 3)
}
Before:
"".p t=1 size=224 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 199
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x(SB), DX
0x0025 00037 (append.go:19) MOVQ "".x+8(SB), BX
0x002c 00044 (append.go:19) MOVQ BX, "".autotmp_0+64(SP)
0x0031 00049 (append.go:22) LEAQ 3(BX), BP
0x0035 00053 (append.go:22) CMPQ BP, CX
0x0038 00056 (append.go:22) JGT $0, 131
0x003a 00058 (append.go:22) MOVB $1, (DX)(BX*1)
0x003e 00062 (append.go:22) MOVB $2, 1(DX)(BX*1)
0x0043 00067 (append.go:22) MOVB $3, 2(DX)(BX*1)
0x0048 00072 (append.go:22) MOVQ BP, "".x+8(SB)
0x004f 00079 (append.go:22) MOVQ CX, "".x+16(SB)
0x0056 00086 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x005c 00092 (append.go:22) TESTB AL, AL
0x005e 00094 (append.go:22) JNE $0, 108
0x0060 00096 (append.go:22) MOVQ DX, "".x(SB)
0x0067 00103 (append.go:23) ADDQ $72, SP
0x006b 00107 (append.go:23) RET
0x006c 00108 (append.go:22) LEAQ "".x(SB), CX
0x0073 00115 (append.go:22) MOVQ CX, (SP)
0x0077 00119 (append.go:22) MOVQ DX, 8(SP)
0x007c 00124 (append.go:22) PCDATA $0, $0
0x007c 00124 (append.go:22) CALL runtime.writebarrierptr(SB)
0x0081 00129 (append.go:23) JMP 103
0x0083 00131 (append.go:22) LEAQ type.[]uint8(SB), AX
0x008a 00138 (append.go:22) MOVQ AX, (SP)
0x008e 00142 (append.go:22) MOVQ DX, 8(SP)
0x0093 00147 (append.go:22) MOVQ BX, 16(SP)
0x0098 00152 (append.go:22) MOVQ CX, 24(SP)
0x009d 00157 (append.go:22) MOVQ BP, 32(SP)
0x00a2 00162 (append.go:22) PCDATA $0, $0
0x00a2 00162 (append.go:22) CALL runtime.growslice(SB)
0x00a7 00167 (append.go:22) MOVQ 40(SP), DX
0x00ac 00172 (append.go:22) MOVQ 48(SP), AX
0x00b1 00177 (append.go:22) MOVQ 56(SP), CX
0x00b6 00182 (append.go:22) ADDQ $3, AX
0x00ba 00186 (append.go:19) MOVQ "".autotmp_0+64(SP), BX
0x00bf 00191 (append.go:22) MOVQ AX, BP
0x00c2 00194 (append.go:22) JMP 58
0x00c7 00199 (append.go:22) NOP
0x00c7 00199 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00cc 00204 (append.go:21) JMP 0
After:
"".p t=1 size=208 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 191
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x+8(SB), DX
0x0025 00037 (append.go:19) MOVQ DX, "".autotmp_0+64(SP)
0x002a 00042 (append.go:19) MOVQ "".x(SB), BX
0x0031 00049 (append.go:22) LEAQ 3(DX), BP
0x0035 00053 (append.go:22) MOVQ BP, "".x+8(SB)
0x003c 00060 (append.go:22) CMPQ BP, CX
0x003f 00063 (append.go:22) JGT $0, 84
0x0041 00065 (append.go:22) MOVB $1, (BX)(DX*1)
0x0045 00069 (append.go:22) MOVB $2, 1(BX)(DX*1)
0x004a 00074 (append.go:22) MOVB $3, 2(BX)(DX*1)
0x004f 00079 (append.go:23) ADDQ $72, SP
0x0053 00083 (append.go:23) RET
0x0054 00084 (append.go:22) LEAQ type.[]uint8(SB), AX
0x005b 00091 (append.go:22) MOVQ AX, (SP)
0x005f 00095 (append.go:22) MOVQ BX, 8(SP)
0x0064 00100 (append.go:22) MOVQ DX, 16(SP)
0x0069 00105 (append.go:22) MOVQ CX, 24(SP)
0x006e 00110 (append.go:22) MOVQ BP, 32(SP)
0x0073 00115 (append.go:22) PCDATA $0, $0
0x0073 00115 (append.go:22) CALL runtime.growslice(SB)
0x0078 00120 (append.go:22) MOVQ 40(SP), CX
0x007d 00125 (append.go:22) MOVQ 56(SP), AX
0x0082 00130 (append.go:22) MOVQ AX, "".x+16(SB)
0x0089 00137 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x008f 00143 (append.go:22) TESTB AL, AL
0x0091 00145 (append.go:22) JNE $0, 168
0x0093 00147 (append.go:22) MOVQ CX, "".x(SB)
0x009a 00154 (append.go:22) MOVQ "".x(SB), BX
0x00a1 00161 (append.go:19) MOVQ "".autotmp_0+64(SP), DX
0x00a6 00166 (append.go:22) JMP 65
0x00a8 00168 (append.go:22) LEAQ "".x(SB), DX
0x00af 00175 (append.go:22) MOVQ DX, (SP)
0x00b3 00179 (append.go:22) MOVQ CX, 8(SP)
0x00b8 00184 (append.go:22) PCDATA $0, $0
0x00b8 00184 (append.go:22) CALL runtime.writebarrierptr(SB)
0x00bd 00189 (append.go:22) JMP 154
0x00bf 00191 (append.go:22) NOP
0x00bf 00191 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00c4 00196 (append.go:21) JMP 0
Change-Id: I77a41ad3a22557a4bb4654de7d6d24a029efe34a
Reviewed-on: https://go-review.googlesource.com/21813
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-10 09:44:17 -07:00
|
|
|
// // with write barriers, if needed:
|
2016-04-04 10:58:21 -07:00
|
|
|
// *(ptr+len) = e1
|
|
|
|
// *(ptr+len+1) = e2
|
|
|
|
// *(ptr+len+2) = e3
|
|
|
|
|
|
|
|
et := n.Type.Elem()
|
2017-03-19 09:51:22 +01:00
|
|
|
pt := typPtr(et)
|
2016-04-04 10:58:21 -07:00
|
|
|
|
|
|
|
// Evaluate slice
|
cmd/compile: avoid write barrier in append fast path
When we are writing the result of an append back
to the same slice, we don’t need a write barrier
on the fast path.
This re-implements an optimization that was present
in the old backend.
Updates #14921
Fixes #14969
Sample code:
var x []byte
func p() {
x = append(x, 1, 2, 3)
}
Before:
"".p t=1 size=224 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 199
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x(SB), DX
0x0025 00037 (append.go:19) MOVQ "".x+8(SB), BX
0x002c 00044 (append.go:19) MOVQ BX, "".autotmp_0+64(SP)
0x0031 00049 (append.go:22) LEAQ 3(BX), BP
0x0035 00053 (append.go:22) CMPQ BP, CX
0x0038 00056 (append.go:22) JGT $0, 131
0x003a 00058 (append.go:22) MOVB $1, (DX)(BX*1)
0x003e 00062 (append.go:22) MOVB $2, 1(DX)(BX*1)
0x0043 00067 (append.go:22) MOVB $3, 2(DX)(BX*1)
0x0048 00072 (append.go:22) MOVQ BP, "".x+8(SB)
0x004f 00079 (append.go:22) MOVQ CX, "".x+16(SB)
0x0056 00086 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x005c 00092 (append.go:22) TESTB AL, AL
0x005e 00094 (append.go:22) JNE $0, 108
0x0060 00096 (append.go:22) MOVQ DX, "".x(SB)
0x0067 00103 (append.go:23) ADDQ $72, SP
0x006b 00107 (append.go:23) RET
0x006c 00108 (append.go:22) LEAQ "".x(SB), CX
0x0073 00115 (append.go:22) MOVQ CX, (SP)
0x0077 00119 (append.go:22) MOVQ DX, 8(SP)
0x007c 00124 (append.go:22) PCDATA $0, $0
0x007c 00124 (append.go:22) CALL runtime.writebarrierptr(SB)
0x0081 00129 (append.go:23) JMP 103
0x0083 00131 (append.go:22) LEAQ type.[]uint8(SB), AX
0x008a 00138 (append.go:22) MOVQ AX, (SP)
0x008e 00142 (append.go:22) MOVQ DX, 8(SP)
0x0093 00147 (append.go:22) MOVQ BX, 16(SP)
0x0098 00152 (append.go:22) MOVQ CX, 24(SP)
0x009d 00157 (append.go:22) MOVQ BP, 32(SP)
0x00a2 00162 (append.go:22) PCDATA $0, $0
0x00a2 00162 (append.go:22) CALL runtime.growslice(SB)
0x00a7 00167 (append.go:22) MOVQ 40(SP), DX
0x00ac 00172 (append.go:22) MOVQ 48(SP), AX
0x00b1 00177 (append.go:22) MOVQ 56(SP), CX
0x00b6 00182 (append.go:22) ADDQ $3, AX
0x00ba 00186 (append.go:19) MOVQ "".autotmp_0+64(SP), BX
0x00bf 00191 (append.go:22) MOVQ AX, BP
0x00c2 00194 (append.go:22) JMP 58
0x00c7 00199 (append.go:22) NOP
0x00c7 00199 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00cc 00204 (append.go:21) JMP 0
After:
"".p t=1 size=208 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 191
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x+8(SB), DX
0x0025 00037 (append.go:19) MOVQ DX, "".autotmp_0+64(SP)
0x002a 00042 (append.go:19) MOVQ "".x(SB), BX
0x0031 00049 (append.go:22) LEAQ 3(DX), BP
0x0035 00053 (append.go:22) MOVQ BP, "".x+8(SB)
0x003c 00060 (append.go:22) CMPQ BP, CX
0x003f 00063 (append.go:22) JGT $0, 84
0x0041 00065 (append.go:22) MOVB $1, (BX)(DX*1)
0x0045 00069 (append.go:22) MOVB $2, 1(BX)(DX*1)
0x004a 00074 (append.go:22) MOVB $3, 2(BX)(DX*1)
0x004f 00079 (append.go:23) ADDQ $72, SP
0x0053 00083 (append.go:23) RET
0x0054 00084 (append.go:22) LEAQ type.[]uint8(SB), AX
0x005b 00091 (append.go:22) MOVQ AX, (SP)
0x005f 00095 (append.go:22) MOVQ BX, 8(SP)
0x0064 00100 (append.go:22) MOVQ DX, 16(SP)
0x0069 00105 (append.go:22) MOVQ CX, 24(SP)
0x006e 00110 (append.go:22) MOVQ BP, 32(SP)
0x0073 00115 (append.go:22) PCDATA $0, $0
0x0073 00115 (append.go:22) CALL runtime.growslice(SB)
0x0078 00120 (append.go:22) MOVQ 40(SP), CX
0x007d 00125 (append.go:22) MOVQ 56(SP), AX
0x0082 00130 (append.go:22) MOVQ AX, "".x+16(SB)
0x0089 00137 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x008f 00143 (append.go:22) TESTB AL, AL
0x0091 00145 (append.go:22) JNE $0, 168
0x0093 00147 (append.go:22) MOVQ CX, "".x(SB)
0x009a 00154 (append.go:22) MOVQ "".x(SB), BX
0x00a1 00161 (append.go:19) MOVQ "".autotmp_0+64(SP), DX
0x00a6 00166 (append.go:22) JMP 65
0x00a8 00168 (append.go:22) LEAQ "".x(SB), DX
0x00af 00175 (append.go:22) MOVQ DX, (SP)
0x00b3 00179 (append.go:22) MOVQ CX, 8(SP)
0x00b8 00184 (append.go:22) PCDATA $0, $0
0x00b8 00184 (append.go:22) CALL runtime.writebarrierptr(SB)
0x00bd 00189 (append.go:22) JMP 154
0x00bf 00191 (append.go:22) NOP
0x00bf 00191 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00c4 00196 (append.go:21) JMP 0
Change-Id: I77a41ad3a22557a4bb4654de7d6d24a029efe34a
Reviewed-on: https://go-review.googlesource.com/21813
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-10 09:44:17 -07:00
|
|
|
sn := n.List.First() // the slice node is the first in the list
|
|
|
|
|
|
|
|
var slice, addr *ssa.Value
|
|
|
|
if inplace {
|
2017-02-02 19:47:59 -05:00
|
|
|
addr = s.addr(sn, false)
|
cmd/compile: avoid write barrier in append fast path
When we are writing the result of an append back
to the same slice, we don’t need a write barrier
on the fast path.
This re-implements an optimization that was present
in the old backend.
Updates #14921
Fixes #14969
Sample code:
var x []byte
func p() {
x = append(x, 1, 2, 3)
}
Before:
"".p t=1 size=224 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 199
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x(SB), DX
0x0025 00037 (append.go:19) MOVQ "".x+8(SB), BX
0x002c 00044 (append.go:19) MOVQ BX, "".autotmp_0+64(SP)
0x0031 00049 (append.go:22) LEAQ 3(BX), BP
0x0035 00053 (append.go:22) CMPQ BP, CX
0x0038 00056 (append.go:22) JGT $0, 131
0x003a 00058 (append.go:22) MOVB $1, (DX)(BX*1)
0x003e 00062 (append.go:22) MOVB $2, 1(DX)(BX*1)
0x0043 00067 (append.go:22) MOVB $3, 2(DX)(BX*1)
0x0048 00072 (append.go:22) MOVQ BP, "".x+8(SB)
0x004f 00079 (append.go:22) MOVQ CX, "".x+16(SB)
0x0056 00086 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x005c 00092 (append.go:22) TESTB AL, AL
0x005e 00094 (append.go:22) JNE $0, 108
0x0060 00096 (append.go:22) MOVQ DX, "".x(SB)
0x0067 00103 (append.go:23) ADDQ $72, SP
0x006b 00107 (append.go:23) RET
0x006c 00108 (append.go:22) LEAQ "".x(SB), CX
0x0073 00115 (append.go:22) MOVQ CX, (SP)
0x0077 00119 (append.go:22) MOVQ DX, 8(SP)
0x007c 00124 (append.go:22) PCDATA $0, $0
0x007c 00124 (append.go:22) CALL runtime.writebarrierptr(SB)
0x0081 00129 (append.go:23) JMP 103
0x0083 00131 (append.go:22) LEAQ type.[]uint8(SB), AX
0x008a 00138 (append.go:22) MOVQ AX, (SP)
0x008e 00142 (append.go:22) MOVQ DX, 8(SP)
0x0093 00147 (append.go:22) MOVQ BX, 16(SP)
0x0098 00152 (append.go:22) MOVQ CX, 24(SP)
0x009d 00157 (append.go:22) MOVQ BP, 32(SP)
0x00a2 00162 (append.go:22) PCDATA $0, $0
0x00a2 00162 (append.go:22) CALL runtime.growslice(SB)
0x00a7 00167 (append.go:22) MOVQ 40(SP), DX
0x00ac 00172 (append.go:22) MOVQ 48(SP), AX
0x00b1 00177 (append.go:22) MOVQ 56(SP), CX
0x00b6 00182 (append.go:22) ADDQ $3, AX
0x00ba 00186 (append.go:19) MOVQ "".autotmp_0+64(SP), BX
0x00bf 00191 (append.go:22) MOVQ AX, BP
0x00c2 00194 (append.go:22) JMP 58
0x00c7 00199 (append.go:22) NOP
0x00c7 00199 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00cc 00204 (append.go:21) JMP 0
After:
"".p t=1 size=208 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 191
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x+8(SB), DX
0x0025 00037 (append.go:19) MOVQ DX, "".autotmp_0+64(SP)
0x002a 00042 (append.go:19) MOVQ "".x(SB), BX
0x0031 00049 (append.go:22) LEAQ 3(DX), BP
0x0035 00053 (append.go:22) MOVQ BP, "".x+8(SB)
0x003c 00060 (append.go:22) CMPQ BP, CX
0x003f 00063 (append.go:22) JGT $0, 84
0x0041 00065 (append.go:22) MOVB $1, (BX)(DX*1)
0x0045 00069 (append.go:22) MOVB $2, 1(BX)(DX*1)
0x004a 00074 (append.go:22) MOVB $3, 2(BX)(DX*1)
0x004f 00079 (append.go:23) ADDQ $72, SP
0x0053 00083 (append.go:23) RET
0x0054 00084 (append.go:22) LEAQ type.[]uint8(SB), AX
0x005b 00091 (append.go:22) MOVQ AX, (SP)
0x005f 00095 (append.go:22) MOVQ BX, 8(SP)
0x0064 00100 (append.go:22) MOVQ DX, 16(SP)
0x0069 00105 (append.go:22) MOVQ CX, 24(SP)
0x006e 00110 (append.go:22) MOVQ BP, 32(SP)
0x0073 00115 (append.go:22) PCDATA $0, $0
0x0073 00115 (append.go:22) CALL runtime.growslice(SB)
0x0078 00120 (append.go:22) MOVQ 40(SP), CX
0x007d 00125 (append.go:22) MOVQ 56(SP), AX
0x0082 00130 (append.go:22) MOVQ AX, "".x+16(SB)
0x0089 00137 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x008f 00143 (append.go:22) TESTB AL, AL
0x0091 00145 (append.go:22) JNE $0, 168
0x0093 00147 (append.go:22) MOVQ CX, "".x(SB)
0x009a 00154 (append.go:22) MOVQ "".x(SB), BX
0x00a1 00161 (append.go:19) MOVQ "".autotmp_0+64(SP), DX
0x00a6 00166 (append.go:22) JMP 65
0x00a8 00168 (append.go:22) LEAQ "".x(SB), DX
0x00af 00175 (append.go:22) MOVQ DX, (SP)
0x00b3 00179 (append.go:22) MOVQ CX, 8(SP)
0x00b8 00184 (append.go:22) PCDATA $0, $0
0x00b8 00184 (append.go:22) CALL runtime.writebarrierptr(SB)
0x00bd 00189 (append.go:22) JMP 154
0x00bf 00191 (append.go:22) NOP
0x00bf 00191 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00c4 00196 (append.go:21) JMP 0
Change-Id: I77a41ad3a22557a4bb4654de7d6d24a029efe34a
Reviewed-on: https://go-review.googlesource.com/21813
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-10 09:44:17 -07:00
|
|
|
slice = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
|
|
|
|
} else {
|
|
|
|
slice = s.expr(sn)
|
|
|
|
}
|
2016-04-04 10:58:21 -07:00
|
|
|
|
|
|
|
// Allocate new blocks
|
|
|
|
grow := s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
assign := s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
|
|
|
|
// Decide if we need to grow
|
|
|
|
nargs := int64(n.List.Len() - 1)
|
|
|
|
p := s.newValue1(ssa.OpSlicePtr, pt, slice)
|
|
|
|
l := s.newValue1(ssa.OpSliceLen, Types[TINT], slice)
|
|
|
|
c := s.newValue1(ssa.OpSliceCap, Types[TINT], slice)
|
|
|
|
nl := s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], l, s.constInt(Types[TINT], nargs))
|
cmd/compile: avoid write barrier in append fast path
When we are writing the result of an append back
to the same slice, we don’t need a write barrier
on the fast path.
This re-implements an optimization that was present
in the old backend.
Updates #14921
Fixes #14969
Sample code:
var x []byte
func p() {
x = append(x, 1, 2, 3)
}
Before:
"".p t=1 size=224 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 199
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x(SB), DX
0x0025 00037 (append.go:19) MOVQ "".x+8(SB), BX
0x002c 00044 (append.go:19) MOVQ BX, "".autotmp_0+64(SP)
0x0031 00049 (append.go:22) LEAQ 3(BX), BP
0x0035 00053 (append.go:22) CMPQ BP, CX
0x0038 00056 (append.go:22) JGT $0, 131
0x003a 00058 (append.go:22) MOVB $1, (DX)(BX*1)
0x003e 00062 (append.go:22) MOVB $2, 1(DX)(BX*1)
0x0043 00067 (append.go:22) MOVB $3, 2(DX)(BX*1)
0x0048 00072 (append.go:22) MOVQ BP, "".x+8(SB)
0x004f 00079 (append.go:22) MOVQ CX, "".x+16(SB)
0x0056 00086 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x005c 00092 (append.go:22) TESTB AL, AL
0x005e 00094 (append.go:22) JNE $0, 108
0x0060 00096 (append.go:22) MOVQ DX, "".x(SB)
0x0067 00103 (append.go:23) ADDQ $72, SP
0x006b 00107 (append.go:23) RET
0x006c 00108 (append.go:22) LEAQ "".x(SB), CX
0x0073 00115 (append.go:22) MOVQ CX, (SP)
0x0077 00119 (append.go:22) MOVQ DX, 8(SP)
0x007c 00124 (append.go:22) PCDATA $0, $0
0x007c 00124 (append.go:22) CALL runtime.writebarrierptr(SB)
0x0081 00129 (append.go:23) JMP 103
0x0083 00131 (append.go:22) LEAQ type.[]uint8(SB), AX
0x008a 00138 (append.go:22) MOVQ AX, (SP)
0x008e 00142 (append.go:22) MOVQ DX, 8(SP)
0x0093 00147 (append.go:22) MOVQ BX, 16(SP)
0x0098 00152 (append.go:22) MOVQ CX, 24(SP)
0x009d 00157 (append.go:22) MOVQ BP, 32(SP)
0x00a2 00162 (append.go:22) PCDATA $0, $0
0x00a2 00162 (append.go:22) CALL runtime.growslice(SB)
0x00a7 00167 (append.go:22) MOVQ 40(SP), DX
0x00ac 00172 (append.go:22) MOVQ 48(SP), AX
0x00b1 00177 (append.go:22) MOVQ 56(SP), CX
0x00b6 00182 (append.go:22) ADDQ $3, AX
0x00ba 00186 (append.go:19) MOVQ "".autotmp_0+64(SP), BX
0x00bf 00191 (append.go:22) MOVQ AX, BP
0x00c2 00194 (append.go:22) JMP 58
0x00c7 00199 (append.go:22) NOP
0x00c7 00199 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00cc 00204 (append.go:21) JMP 0
After:
"".p t=1 size=208 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 191
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x+8(SB), DX
0x0025 00037 (append.go:19) MOVQ DX, "".autotmp_0+64(SP)
0x002a 00042 (append.go:19) MOVQ "".x(SB), BX
0x0031 00049 (append.go:22) LEAQ 3(DX), BP
0x0035 00053 (append.go:22) MOVQ BP, "".x+8(SB)
0x003c 00060 (append.go:22) CMPQ BP, CX
0x003f 00063 (append.go:22) JGT $0, 84
0x0041 00065 (append.go:22) MOVB $1, (BX)(DX*1)
0x0045 00069 (append.go:22) MOVB $2, 1(BX)(DX*1)
0x004a 00074 (append.go:22) MOVB $3, 2(BX)(DX*1)
0x004f 00079 (append.go:23) ADDQ $72, SP
0x0053 00083 (append.go:23) RET
0x0054 00084 (append.go:22) LEAQ type.[]uint8(SB), AX
0x005b 00091 (append.go:22) MOVQ AX, (SP)
0x005f 00095 (append.go:22) MOVQ BX, 8(SP)
0x0064 00100 (append.go:22) MOVQ DX, 16(SP)
0x0069 00105 (append.go:22) MOVQ CX, 24(SP)
0x006e 00110 (append.go:22) MOVQ BP, 32(SP)
0x0073 00115 (append.go:22) PCDATA $0, $0
0x0073 00115 (append.go:22) CALL runtime.growslice(SB)
0x0078 00120 (append.go:22) MOVQ 40(SP), CX
0x007d 00125 (append.go:22) MOVQ 56(SP), AX
0x0082 00130 (append.go:22) MOVQ AX, "".x+16(SB)
0x0089 00137 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x008f 00143 (append.go:22) TESTB AL, AL
0x0091 00145 (append.go:22) JNE $0, 168
0x0093 00147 (append.go:22) MOVQ CX, "".x(SB)
0x009a 00154 (append.go:22) MOVQ "".x(SB), BX
0x00a1 00161 (append.go:19) MOVQ "".autotmp_0+64(SP), DX
0x00a6 00166 (append.go:22) JMP 65
0x00a8 00168 (append.go:22) LEAQ "".x(SB), DX
0x00af 00175 (append.go:22) MOVQ DX, (SP)
0x00b3 00179 (append.go:22) MOVQ CX, 8(SP)
0x00b8 00184 (append.go:22) PCDATA $0, $0
0x00b8 00184 (append.go:22) CALL runtime.writebarrierptr(SB)
0x00bd 00189 (append.go:22) JMP 154
0x00bf 00191 (append.go:22) NOP
0x00bf 00191 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00c4 00196 (append.go:21) JMP 0
Change-Id: I77a41ad3a22557a4bb4654de7d6d24a029efe34a
Reviewed-on: https://go-review.googlesource.com/21813
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-10 09:44:17 -07:00
|
|
|
|
2016-04-04 10:58:21 -07:00
|
|
|
cmp := s.newValue2(s.ssaOp(OGT, Types[TINT]), Types[TBOOL], nl, c)
|
|
|
|
s.vars[&ptrVar] = p
|
cmd/compile: avoid write barrier in append fast path
When we are writing the result of an append back
to the same slice, we don’t need a write barrier
on the fast path.
This re-implements an optimization that was present
in the old backend.
Updates #14921
Fixes #14969
Sample code:
var x []byte
func p() {
x = append(x, 1, 2, 3)
}
Before:
"".p t=1 size=224 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 199
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x(SB), DX
0x0025 00037 (append.go:19) MOVQ "".x+8(SB), BX
0x002c 00044 (append.go:19) MOVQ BX, "".autotmp_0+64(SP)
0x0031 00049 (append.go:22) LEAQ 3(BX), BP
0x0035 00053 (append.go:22) CMPQ BP, CX
0x0038 00056 (append.go:22) JGT $0, 131
0x003a 00058 (append.go:22) MOVB $1, (DX)(BX*1)
0x003e 00062 (append.go:22) MOVB $2, 1(DX)(BX*1)
0x0043 00067 (append.go:22) MOVB $3, 2(DX)(BX*1)
0x0048 00072 (append.go:22) MOVQ BP, "".x+8(SB)
0x004f 00079 (append.go:22) MOVQ CX, "".x+16(SB)
0x0056 00086 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x005c 00092 (append.go:22) TESTB AL, AL
0x005e 00094 (append.go:22) JNE $0, 108
0x0060 00096 (append.go:22) MOVQ DX, "".x(SB)
0x0067 00103 (append.go:23) ADDQ $72, SP
0x006b 00107 (append.go:23) RET
0x006c 00108 (append.go:22) LEAQ "".x(SB), CX
0x0073 00115 (append.go:22) MOVQ CX, (SP)
0x0077 00119 (append.go:22) MOVQ DX, 8(SP)
0x007c 00124 (append.go:22) PCDATA $0, $0
0x007c 00124 (append.go:22) CALL runtime.writebarrierptr(SB)
0x0081 00129 (append.go:23) JMP 103
0x0083 00131 (append.go:22) LEAQ type.[]uint8(SB), AX
0x008a 00138 (append.go:22) MOVQ AX, (SP)
0x008e 00142 (append.go:22) MOVQ DX, 8(SP)
0x0093 00147 (append.go:22) MOVQ BX, 16(SP)
0x0098 00152 (append.go:22) MOVQ CX, 24(SP)
0x009d 00157 (append.go:22) MOVQ BP, 32(SP)
0x00a2 00162 (append.go:22) PCDATA $0, $0
0x00a2 00162 (append.go:22) CALL runtime.growslice(SB)
0x00a7 00167 (append.go:22) MOVQ 40(SP), DX
0x00ac 00172 (append.go:22) MOVQ 48(SP), AX
0x00b1 00177 (append.go:22) MOVQ 56(SP), CX
0x00b6 00182 (append.go:22) ADDQ $3, AX
0x00ba 00186 (append.go:19) MOVQ "".autotmp_0+64(SP), BX
0x00bf 00191 (append.go:22) MOVQ AX, BP
0x00c2 00194 (append.go:22) JMP 58
0x00c7 00199 (append.go:22) NOP
0x00c7 00199 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00cc 00204 (append.go:21) JMP 0
After:
"".p t=1 size=208 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 191
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x+8(SB), DX
0x0025 00037 (append.go:19) MOVQ DX, "".autotmp_0+64(SP)
0x002a 00042 (append.go:19) MOVQ "".x(SB), BX
0x0031 00049 (append.go:22) LEAQ 3(DX), BP
0x0035 00053 (append.go:22) MOVQ BP, "".x+8(SB)
0x003c 00060 (append.go:22) CMPQ BP, CX
0x003f 00063 (append.go:22) JGT $0, 84
0x0041 00065 (append.go:22) MOVB $1, (BX)(DX*1)
0x0045 00069 (append.go:22) MOVB $2, 1(BX)(DX*1)
0x004a 00074 (append.go:22) MOVB $3, 2(BX)(DX*1)
0x004f 00079 (append.go:23) ADDQ $72, SP
0x0053 00083 (append.go:23) RET
0x0054 00084 (append.go:22) LEAQ type.[]uint8(SB), AX
0x005b 00091 (append.go:22) MOVQ AX, (SP)
0x005f 00095 (append.go:22) MOVQ BX, 8(SP)
0x0064 00100 (append.go:22) MOVQ DX, 16(SP)
0x0069 00105 (append.go:22) MOVQ CX, 24(SP)
0x006e 00110 (append.go:22) MOVQ BP, 32(SP)
0x0073 00115 (append.go:22) PCDATA $0, $0
0x0073 00115 (append.go:22) CALL runtime.growslice(SB)
0x0078 00120 (append.go:22) MOVQ 40(SP), CX
0x007d 00125 (append.go:22) MOVQ 56(SP), AX
0x0082 00130 (append.go:22) MOVQ AX, "".x+16(SB)
0x0089 00137 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x008f 00143 (append.go:22) TESTB AL, AL
0x0091 00145 (append.go:22) JNE $0, 168
0x0093 00147 (append.go:22) MOVQ CX, "".x(SB)
0x009a 00154 (append.go:22) MOVQ "".x(SB), BX
0x00a1 00161 (append.go:19) MOVQ "".autotmp_0+64(SP), DX
0x00a6 00166 (append.go:22) JMP 65
0x00a8 00168 (append.go:22) LEAQ "".x(SB), DX
0x00af 00175 (append.go:22) MOVQ DX, (SP)
0x00b3 00179 (append.go:22) MOVQ CX, 8(SP)
0x00b8 00184 (append.go:22) PCDATA $0, $0
0x00b8 00184 (append.go:22) CALL runtime.writebarrierptr(SB)
0x00bd 00189 (append.go:22) JMP 154
0x00bf 00191 (append.go:22) NOP
0x00bf 00191 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00c4 00196 (append.go:21) JMP 0
Change-Id: I77a41ad3a22557a4bb4654de7d6d24a029efe34a
Reviewed-on: https://go-review.googlesource.com/21813
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-10 09:44:17 -07:00
|
|
|
|
|
|
|
if !inplace {
|
|
|
|
s.vars[&newlenVar] = nl
|
|
|
|
s.vars[&capVar] = c
|
cmd/compile: re-enable in-place append optimization
CL 21891 was too clever in its attempts to avoid spills.
Storing newlen too early caused uses of append in the runtime
itself to receive an inconsistent view of a slice,
leading to corruption.
This CL makes the generate code much more similar to
the old backend. It spills more than before,
but those spills have been contained to the grow path.
It recalculates newlen unnecessarily on the fast path,
but that's measurably cheaper than spilling it.
CL 21891 caused runtime failures in 6 of 2000 runs
of net/http and crypto/x509 in my test setup.
This CL has gone 6000 runs without a failure.
Benchmarks going from master to this CL:
name old time/op new time/op delta
AppendInPlace/NoGrow/Byte-8 439ns ± 2% 436ns ± 2% -0.72% (p=0.001 n=28+27)
AppendInPlace/NoGrow/1Ptr-8 901ns ± 0% 856ns ± 0% -4.95% (p=0.000 n=26+29)
AppendInPlace/NoGrow/2Ptr-8 2.15µs ± 1% 1.95µs ± 0% -9.07% (p=0.000 n=28+30)
AppendInPlace/NoGrow/3Ptr-8 2.66µs ± 0% 2.45µs ± 0% -7.93% (p=0.000 n=29+26)
AppendInPlace/NoGrow/4Ptr-8 3.24µs ± 1% 3.02µs ± 1% -6.75% (p=0.000 n=28+30)
AppendInPlace/Grow/Byte-8 269ns ± 1% 271ns ± 1% +0.84% (p=0.000 n=30+29)
AppendInPlace/Grow/1Ptr-8 275ns ± 1% 280ns ± 1% +1.75% (p=0.000 n=30+30)
AppendInPlace/Grow/2Ptr-8 384ns ± 0% 391ns ± 0% +1.94% (p=0.000 n=27+30)
AppendInPlace/Grow/3Ptr-8 455ns ± 0% 462ns ± 0% +1.43% (p=0.000 n=29+29)
AppendInPlace/Grow/4Ptr-8 478ns ± 0% 479ns ± 0% +0.23% (p=0.000 n=30+27)
However, for the large no-grow cases, there is still more work to be done.
Going from this CL to the non-SSA backend:
name old time/op new time/op delta
AppendInPlace/NoGrow/Byte-8 436ns ± 2% 436ns ± 2% ~ (p=0.967 n=27+29)
AppendInPlace/NoGrow/1Ptr-8 856ns ± 0% 884ns ± 0% +3.28% (p=0.000 n=29+26)
AppendInPlace/NoGrow/2Ptr-8 1.95µs ± 0% 1.56µs ± 0% -20.28% (p=0.000 n=30+29)
AppendInPlace/NoGrow/3Ptr-8 2.45µs ± 0% 1.89µs ± 0% -22.88% (p=0.000 n=26+28)
AppendInPlace/NoGrow/4Ptr-8 3.02µs ± 1% 2.56µs ± 1% -15.35% (p=0.000 n=30+28)
AppendInPlace/Grow/Byte-8 271ns ± 1% 283ns ± 1% +4.56% (p=0.000 n=29+29)
AppendInPlace/Grow/1Ptr-8 280ns ± 1% 288ns ± 1% +2.99% (p=0.000 n=30+30)
AppendInPlace/Grow/2Ptr-8 391ns ± 0% 409ns ± 0% +4.66% (p=0.000 n=30+29)
AppendInPlace/Grow/3Ptr-8 462ns ± 0% 481ns ± 0% +4.13% (p=0.000 n=29+30)
AppendInPlace/Grow/4Ptr-8 479ns ± 0% 502ns ± 0% +4.81% (p=0.000 n=27+26)
New generated code:
var x []byte
func a() {
x = append(x, 1)
}
"".a t=1 size=208 args=0x0 locals=0x48
0x0000 00000 (a.go:5) TEXT "".a(SB), $72-0
0x0000 00000 (a.go:5) MOVQ (TLS), CX
0x0009 00009 (a.go:5) CMPQ SP, 16(CX)
0x000d 00013 (a.go:5) JLS 190
0x0013 00019 (a.go:5) SUBQ $72, SP
0x0017 00023 (a.go:5) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (a.go:5) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (a.go:6) MOVQ "".x+16(SB), CX
0x001e 00030 (a.go:6) MOVQ "".x+8(SB), DX
0x0025 00037 (a.go:6) MOVQ "".x(SB), BX
0x002c 00044 (a.go:6) LEAQ 1(DX), BP
0x0030 00048 (a.go:6) CMPQ BP, CX
0x0033 00051 (a.go:6) JGT $0, 73
0x0035 00053 (a.go:6) LEAQ 1(DX), AX
0x0039 00057 (a.go:6) MOVQ AX, "".x+8(SB)
0x0040 00064 (a.go:6) MOVB $1, (BX)(DX*1)
0x0044 00068 (a.go:7) ADDQ $72, SP
0x0048 00072 (a.go:7) RET
0x0049 00073 (a.go:6) LEAQ type.[]uint8(SB), AX
0x0050 00080 (a.go:6) MOVQ AX, (SP)
0x0054 00084 (a.go:6) MOVQ BX, 8(SP)
0x0059 00089 (a.go:6) MOVQ DX, 16(SP)
0x005e 00094 (a.go:6) MOVQ CX, 24(SP)
0x0063 00099 (a.go:6) MOVQ BP, 32(SP)
0x0068 00104 (a.go:6) PCDATA $0, $0
0x0068 00104 (a.go:6) CALL runtime.growslice(SB)
0x006d 00109 (a.go:6) MOVQ 40(SP), CX
0x0072 00114 (a.go:6) MOVQ 48(SP), DX
0x0077 00119 (a.go:6) MOVQ DX, "".autotmp_0+64(SP)
0x007c 00124 (a.go:6) MOVQ 56(SP), BX
0x0081 00129 (a.go:6) MOVQ BX, "".x+16(SB)
0x0088 00136 (a.go:6) MOVL runtime.writeBarrier(SB), AX
0x008e 00142 (a.go:6) TESTB AL, AL
0x0090 00144 (a.go:6) JNE $0, 162
0x0092 00146 (a.go:6) MOVQ CX, "".x(SB)
0x0099 00153 (a.go:6) MOVQ "".x(SB), BX
0x00a0 00160 (a.go:6) JMP 53
0x00a2 00162 (a.go:6) LEAQ "".x(SB), BX
0x00a9 00169 (a.go:6) MOVQ BX, (SP)
0x00ad 00173 (a.go:6) MOVQ CX, 8(SP)
0x00b2 00178 (a.go:6) PCDATA $0, $0
0x00b2 00178 (a.go:6) CALL runtime.writebarrierptr(SB)
0x00b7 00183 (a.go:6) MOVQ "".autotmp_0+64(SP), DX
0x00bc 00188 (a.go:6) JMP 153
0x00be 00190 (a.go:6) NOP
0x00be 00190 (a.go:5) CALL runtime.morestack_noctxt(SB)
0x00c3 00195 (a.go:5) JMP 0
Fixes #14969 again
Change-Id: Ia50463b1f506011aad0718a4fef1d4738e43c32d
Reviewed-on: https://go-review.googlesource.com/22197
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-18 09:40:30 -07:00
|
|
|
} else {
|
|
|
|
s.vars[&lenVar] = l
|
cmd/compile: avoid write barrier in append fast path
When we are writing the result of an append back
to the same slice, we don’t need a write barrier
on the fast path.
This re-implements an optimization that was present
in the old backend.
Updates #14921
Fixes #14969
Sample code:
var x []byte
func p() {
x = append(x, 1, 2, 3)
}
Before:
"".p t=1 size=224 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 199
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x(SB), DX
0x0025 00037 (append.go:19) MOVQ "".x+8(SB), BX
0x002c 00044 (append.go:19) MOVQ BX, "".autotmp_0+64(SP)
0x0031 00049 (append.go:22) LEAQ 3(BX), BP
0x0035 00053 (append.go:22) CMPQ BP, CX
0x0038 00056 (append.go:22) JGT $0, 131
0x003a 00058 (append.go:22) MOVB $1, (DX)(BX*1)
0x003e 00062 (append.go:22) MOVB $2, 1(DX)(BX*1)
0x0043 00067 (append.go:22) MOVB $3, 2(DX)(BX*1)
0x0048 00072 (append.go:22) MOVQ BP, "".x+8(SB)
0x004f 00079 (append.go:22) MOVQ CX, "".x+16(SB)
0x0056 00086 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x005c 00092 (append.go:22) TESTB AL, AL
0x005e 00094 (append.go:22) JNE $0, 108
0x0060 00096 (append.go:22) MOVQ DX, "".x(SB)
0x0067 00103 (append.go:23) ADDQ $72, SP
0x006b 00107 (append.go:23) RET
0x006c 00108 (append.go:22) LEAQ "".x(SB), CX
0x0073 00115 (append.go:22) MOVQ CX, (SP)
0x0077 00119 (append.go:22) MOVQ DX, 8(SP)
0x007c 00124 (append.go:22) PCDATA $0, $0
0x007c 00124 (append.go:22) CALL runtime.writebarrierptr(SB)
0x0081 00129 (append.go:23) JMP 103
0x0083 00131 (append.go:22) LEAQ type.[]uint8(SB), AX
0x008a 00138 (append.go:22) MOVQ AX, (SP)
0x008e 00142 (append.go:22) MOVQ DX, 8(SP)
0x0093 00147 (append.go:22) MOVQ BX, 16(SP)
0x0098 00152 (append.go:22) MOVQ CX, 24(SP)
0x009d 00157 (append.go:22) MOVQ BP, 32(SP)
0x00a2 00162 (append.go:22) PCDATA $0, $0
0x00a2 00162 (append.go:22) CALL runtime.growslice(SB)
0x00a7 00167 (append.go:22) MOVQ 40(SP), DX
0x00ac 00172 (append.go:22) MOVQ 48(SP), AX
0x00b1 00177 (append.go:22) MOVQ 56(SP), CX
0x00b6 00182 (append.go:22) ADDQ $3, AX
0x00ba 00186 (append.go:19) MOVQ "".autotmp_0+64(SP), BX
0x00bf 00191 (append.go:22) MOVQ AX, BP
0x00c2 00194 (append.go:22) JMP 58
0x00c7 00199 (append.go:22) NOP
0x00c7 00199 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00cc 00204 (append.go:21) JMP 0
After:
"".p t=1 size=208 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 191
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x+8(SB), DX
0x0025 00037 (append.go:19) MOVQ DX, "".autotmp_0+64(SP)
0x002a 00042 (append.go:19) MOVQ "".x(SB), BX
0x0031 00049 (append.go:22) LEAQ 3(DX), BP
0x0035 00053 (append.go:22) MOVQ BP, "".x+8(SB)
0x003c 00060 (append.go:22) CMPQ BP, CX
0x003f 00063 (append.go:22) JGT $0, 84
0x0041 00065 (append.go:22) MOVB $1, (BX)(DX*1)
0x0045 00069 (append.go:22) MOVB $2, 1(BX)(DX*1)
0x004a 00074 (append.go:22) MOVB $3, 2(BX)(DX*1)
0x004f 00079 (append.go:23) ADDQ $72, SP
0x0053 00083 (append.go:23) RET
0x0054 00084 (append.go:22) LEAQ type.[]uint8(SB), AX
0x005b 00091 (append.go:22) MOVQ AX, (SP)
0x005f 00095 (append.go:22) MOVQ BX, 8(SP)
0x0064 00100 (append.go:22) MOVQ DX, 16(SP)
0x0069 00105 (append.go:22) MOVQ CX, 24(SP)
0x006e 00110 (append.go:22) MOVQ BP, 32(SP)
0x0073 00115 (append.go:22) PCDATA $0, $0
0x0073 00115 (append.go:22) CALL runtime.growslice(SB)
0x0078 00120 (append.go:22) MOVQ 40(SP), CX
0x007d 00125 (append.go:22) MOVQ 56(SP), AX
0x0082 00130 (append.go:22) MOVQ AX, "".x+16(SB)
0x0089 00137 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x008f 00143 (append.go:22) TESTB AL, AL
0x0091 00145 (append.go:22) JNE $0, 168
0x0093 00147 (append.go:22) MOVQ CX, "".x(SB)
0x009a 00154 (append.go:22) MOVQ "".x(SB), BX
0x00a1 00161 (append.go:19) MOVQ "".autotmp_0+64(SP), DX
0x00a6 00166 (append.go:22) JMP 65
0x00a8 00168 (append.go:22) LEAQ "".x(SB), DX
0x00af 00175 (append.go:22) MOVQ DX, (SP)
0x00b3 00179 (append.go:22) MOVQ CX, 8(SP)
0x00b8 00184 (append.go:22) PCDATA $0, $0
0x00b8 00184 (append.go:22) CALL runtime.writebarrierptr(SB)
0x00bd 00189 (append.go:22) JMP 154
0x00bf 00191 (append.go:22) NOP
0x00bf 00191 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00c4 00196 (append.go:21) JMP 0
Change-Id: I77a41ad3a22557a4bb4654de7d6d24a029efe34a
Reviewed-on: https://go-review.googlesource.com/21813
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-10 09:44:17 -07:00
|
|
|
}
|
|
|
|
|
2016-04-04 10:58:21 -07:00
|
|
|
b := s.endBlock()
|
|
|
|
b.Kind = ssa.BlockIf
|
|
|
|
b.Likely = ssa.BranchUnlikely
|
|
|
|
b.SetControl(cmp)
|
|
|
|
b.AddEdgeTo(grow)
|
|
|
|
b.AddEdgeTo(assign)
|
|
|
|
|
|
|
|
// Call growslice
|
|
|
|
s.startBlock(grow)
|
2017-03-15 15:34:52 -07:00
|
|
|
sym := s.lookupSymbol(n, &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: Linksym(typenamesym(n.Type.Elem()))})
|
|
|
|
taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], sym, s.sb)
|
2016-04-04 10:58:21 -07:00
|
|
|
|
|
|
|
r := s.rtcall(growslice, true, []*Type{pt, Types[TINT], Types[TINT]}, taddr, p, l, c, nl)
|
|
|
|
|
cmd/compile: avoid write barrier in append fast path
When we are writing the result of an append back
to the same slice, we don’t need a write barrier
on the fast path.
This re-implements an optimization that was present
in the old backend.
Updates #14921
Fixes #14969
Sample code:
var x []byte
func p() {
x = append(x, 1, 2, 3)
}
Before:
"".p t=1 size=224 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 199
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x(SB), DX
0x0025 00037 (append.go:19) MOVQ "".x+8(SB), BX
0x002c 00044 (append.go:19) MOVQ BX, "".autotmp_0+64(SP)
0x0031 00049 (append.go:22) LEAQ 3(BX), BP
0x0035 00053 (append.go:22) CMPQ BP, CX
0x0038 00056 (append.go:22) JGT $0, 131
0x003a 00058 (append.go:22) MOVB $1, (DX)(BX*1)
0x003e 00062 (append.go:22) MOVB $2, 1(DX)(BX*1)
0x0043 00067 (append.go:22) MOVB $3, 2(DX)(BX*1)
0x0048 00072 (append.go:22) MOVQ BP, "".x+8(SB)
0x004f 00079 (append.go:22) MOVQ CX, "".x+16(SB)
0x0056 00086 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x005c 00092 (append.go:22) TESTB AL, AL
0x005e 00094 (append.go:22) JNE $0, 108
0x0060 00096 (append.go:22) MOVQ DX, "".x(SB)
0x0067 00103 (append.go:23) ADDQ $72, SP
0x006b 00107 (append.go:23) RET
0x006c 00108 (append.go:22) LEAQ "".x(SB), CX
0x0073 00115 (append.go:22) MOVQ CX, (SP)
0x0077 00119 (append.go:22) MOVQ DX, 8(SP)
0x007c 00124 (append.go:22) PCDATA $0, $0
0x007c 00124 (append.go:22) CALL runtime.writebarrierptr(SB)
0x0081 00129 (append.go:23) JMP 103
0x0083 00131 (append.go:22) LEAQ type.[]uint8(SB), AX
0x008a 00138 (append.go:22) MOVQ AX, (SP)
0x008e 00142 (append.go:22) MOVQ DX, 8(SP)
0x0093 00147 (append.go:22) MOVQ BX, 16(SP)
0x0098 00152 (append.go:22) MOVQ CX, 24(SP)
0x009d 00157 (append.go:22) MOVQ BP, 32(SP)
0x00a2 00162 (append.go:22) PCDATA $0, $0
0x00a2 00162 (append.go:22) CALL runtime.growslice(SB)
0x00a7 00167 (append.go:22) MOVQ 40(SP), DX
0x00ac 00172 (append.go:22) MOVQ 48(SP), AX
0x00b1 00177 (append.go:22) MOVQ 56(SP), CX
0x00b6 00182 (append.go:22) ADDQ $3, AX
0x00ba 00186 (append.go:19) MOVQ "".autotmp_0+64(SP), BX
0x00bf 00191 (append.go:22) MOVQ AX, BP
0x00c2 00194 (append.go:22) JMP 58
0x00c7 00199 (append.go:22) NOP
0x00c7 00199 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00cc 00204 (append.go:21) JMP 0
After:
"".p t=1 size=208 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 191
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x+8(SB), DX
0x0025 00037 (append.go:19) MOVQ DX, "".autotmp_0+64(SP)
0x002a 00042 (append.go:19) MOVQ "".x(SB), BX
0x0031 00049 (append.go:22) LEAQ 3(DX), BP
0x0035 00053 (append.go:22) MOVQ BP, "".x+8(SB)
0x003c 00060 (append.go:22) CMPQ BP, CX
0x003f 00063 (append.go:22) JGT $0, 84
0x0041 00065 (append.go:22) MOVB $1, (BX)(DX*1)
0x0045 00069 (append.go:22) MOVB $2, 1(BX)(DX*1)
0x004a 00074 (append.go:22) MOVB $3, 2(BX)(DX*1)
0x004f 00079 (append.go:23) ADDQ $72, SP
0x0053 00083 (append.go:23) RET
0x0054 00084 (append.go:22) LEAQ type.[]uint8(SB), AX
0x005b 00091 (append.go:22) MOVQ AX, (SP)
0x005f 00095 (append.go:22) MOVQ BX, 8(SP)
0x0064 00100 (append.go:22) MOVQ DX, 16(SP)
0x0069 00105 (append.go:22) MOVQ CX, 24(SP)
0x006e 00110 (append.go:22) MOVQ BP, 32(SP)
0x0073 00115 (append.go:22) PCDATA $0, $0
0x0073 00115 (append.go:22) CALL runtime.growslice(SB)
0x0078 00120 (append.go:22) MOVQ 40(SP), CX
0x007d 00125 (append.go:22) MOVQ 56(SP), AX
0x0082 00130 (append.go:22) MOVQ AX, "".x+16(SB)
0x0089 00137 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x008f 00143 (append.go:22) TESTB AL, AL
0x0091 00145 (append.go:22) JNE $0, 168
0x0093 00147 (append.go:22) MOVQ CX, "".x(SB)
0x009a 00154 (append.go:22) MOVQ "".x(SB), BX
0x00a1 00161 (append.go:19) MOVQ "".autotmp_0+64(SP), DX
0x00a6 00166 (append.go:22) JMP 65
0x00a8 00168 (append.go:22) LEAQ "".x(SB), DX
0x00af 00175 (append.go:22) MOVQ DX, (SP)
0x00b3 00179 (append.go:22) MOVQ CX, 8(SP)
0x00b8 00184 (append.go:22) PCDATA $0, $0
0x00b8 00184 (append.go:22) CALL runtime.writebarrierptr(SB)
0x00bd 00189 (append.go:22) JMP 154
0x00bf 00191 (append.go:22) NOP
0x00bf 00191 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00c4 00196 (append.go:21) JMP 0
Change-Id: I77a41ad3a22557a4bb4654de7d6d24a029efe34a
Reviewed-on: https://go-review.googlesource.com/21813
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-10 09:44:17 -07:00
|
|
|
if inplace {
|
cmd/compile: re-enable in-place append optimization
CL 21891 was too clever in its attempts to avoid spills.
Storing newlen too early caused uses of append in the runtime
itself to receive an inconsistent view of a slice,
leading to corruption.
This CL makes the generate code much more similar to
the old backend. It spills more than before,
but those spills have been contained to the grow path.
It recalculates newlen unnecessarily on the fast path,
but that's measurably cheaper than spilling it.
CL 21891 caused runtime failures in 6 of 2000 runs
of net/http and crypto/x509 in my test setup.
This CL has gone 6000 runs without a failure.
Benchmarks going from master to this CL:
name old time/op new time/op delta
AppendInPlace/NoGrow/Byte-8 439ns ± 2% 436ns ± 2% -0.72% (p=0.001 n=28+27)
AppendInPlace/NoGrow/1Ptr-8 901ns ± 0% 856ns ± 0% -4.95% (p=0.000 n=26+29)
AppendInPlace/NoGrow/2Ptr-8 2.15µs ± 1% 1.95µs ± 0% -9.07% (p=0.000 n=28+30)
AppendInPlace/NoGrow/3Ptr-8 2.66µs ± 0% 2.45µs ± 0% -7.93% (p=0.000 n=29+26)
AppendInPlace/NoGrow/4Ptr-8 3.24µs ± 1% 3.02µs ± 1% -6.75% (p=0.000 n=28+30)
AppendInPlace/Grow/Byte-8 269ns ± 1% 271ns ± 1% +0.84% (p=0.000 n=30+29)
AppendInPlace/Grow/1Ptr-8 275ns ± 1% 280ns ± 1% +1.75% (p=0.000 n=30+30)
AppendInPlace/Grow/2Ptr-8 384ns ± 0% 391ns ± 0% +1.94% (p=0.000 n=27+30)
AppendInPlace/Grow/3Ptr-8 455ns ± 0% 462ns ± 0% +1.43% (p=0.000 n=29+29)
AppendInPlace/Grow/4Ptr-8 478ns ± 0% 479ns ± 0% +0.23% (p=0.000 n=30+27)
However, for the large no-grow cases, there is still more work to be done.
Going from this CL to the non-SSA backend:
name old time/op new time/op delta
AppendInPlace/NoGrow/Byte-8 436ns ± 2% 436ns ± 2% ~ (p=0.967 n=27+29)
AppendInPlace/NoGrow/1Ptr-8 856ns ± 0% 884ns ± 0% +3.28% (p=0.000 n=29+26)
AppendInPlace/NoGrow/2Ptr-8 1.95µs ± 0% 1.56µs ± 0% -20.28% (p=0.000 n=30+29)
AppendInPlace/NoGrow/3Ptr-8 2.45µs ± 0% 1.89µs ± 0% -22.88% (p=0.000 n=26+28)
AppendInPlace/NoGrow/4Ptr-8 3.02µs ± 1% 2.56µs ± 1% -15.35% (p=0.000 n=30+28)
AppendInPlace/Grow/Byte-8 271ns ± 1% 283ns ± 1% +4.56% (p=0.000 n=29+29)
AppendInPlace/Grow/1Ptr-8 280ns ± 1% 288ns ± 1% +2.99% (p=0.000 n=30+30)
AppendInPlace/Grow/2Ptr-8 391ns ± 0% 409ns ± 0% +4.66% (p=0.000 n=30+29)
AppendInPlace/Grow/3Ptr-8 462ns ± 0% 481ns ± 0% +4.13% (p=0.000 n=29+30)
AppendInPlace/Grow/4Ptr-8 479ns ± 0% 502ns ± 0% +4.81% (p=0.000 n=27+26)
New generated code:
var x []byte
func a() {
x = append(x, 1)
}
"".a t=1 size=208 args=0x0 locals=0x48
0x0000 00000 (a.go:5) TEXT "".a(SB), $72-0
0x0000 00000 (a.go:5) MOVQ (TLS), CX
0x0009 00009 (a.go:5) CMPQ SP, 16(CX)
0x000d 00013 (a.go:5) JLS 190
0x0013 00019 (a.go:5) SUBQ $72, SP
0x0017 00023 (a.go:5) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (a.go:5) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (a.go:6) MOVQ "".x+16(SB), CX
0x001e 00030 (a.go:6) MOVQ "".x+8(SB), DX
0x0025 00037 (a.go:6) MOVQ "".x(SB), BX
0x002c 00044 (a.go:6) LEAQ 1(DX), BP
0x0030 00048 (a.go:6) CMPQ BP, CX
0x0033 00051 (a.go:6) JGT $0, 73
0x0035 00053 (a.go:6) LEAQ 1(DX), AX
0x0039 00057 (a.go:6) MOVQ AX, "".x+8(SB)
0x0040 00064 (a.go:6) MOVB $1, (BX)(DX*1)
0x0044 00068 (a.go:7) ADDQ $72, SP
0x0048 00072 (a.go:7) RET
0x0049 00073 (a.go:6) LEAQ type.[]uint8(SB), AX
0x0050 00080 (a.go:6) MOVQ AX, (SP)
0x0054 00084 (a.go:6) MOVQ BX, 8(SP)
0x0059 00089 (a.go:6) MOVQ DX, 16(SP)
0x005e 00094 (a.go:6) MOVQ CX, 24(SP)
0x0063 00099 (a.go:6) MOVQ BP, 32(SP)
0x0068 00104 (a.go:6) PCDATA $0, $0
0x0068 00104 (a.go:6) CALL runtime.growslice(SB)
0x006d 00109 (a.go:6) MOVQ 40(SP), CX
0x0072 00114 (a.go:6) MOVQ 48(SP), DX
0x0077 00119 (a.go:6) MOVQ DX, "".autotmp_0+64(SP)
0x007c 00124 (a.go:6) MOVQ 56(SP), BX
0x0081 00129 (a.go:6) MOVQ BX, "".x+16(SB)
0x0088 00136 (a.go:6) MOVL runtime.writeBarrier(SB), AX
0x008e 00142 (a.go:6) TESTB AL, AL
0x0090 00144 (a.go:6) JNE $0, 162
0x0092 00146 (a.go:6) MOVQ CX, "".x(SB)
0x0099 00153 (a.go:6) MOVQ "".x(SB), BX
0x00a0 00160 (a.go:6) JMP 53
0x00a2 00162 (a.go:6) LEAQ "".x(SB), BX
0x00a9 00169 (a.go:6) MOVQ BX, (SP)
0x00ad 00173 (a.go:6) MOVQ CX, 8(SP)
0x00b2 00178 (a.go:6) PCDATA $0, $0
0x00b2 00178 (a.go:6) CALL runtime.writebarrierptr(SB)
0x00b7 00183 (a.go:6) MOVQ "".autotmp_0+64(SP), DX
0x00bc 00188 (a.go:6) JMP 153
0x00be 00190 (a.go:6) NOP
0x00be 00190 (a.go:5) CALL runtime.morestack_noctxt(SB)
0x00c3 00195 (a.go:5) JMP 0
Fixes #14969 again
Change-Id: Ia50463b1f506011aad0718a4fef1d4738e43c32d
Reviewed-on: https://go-review.googlesource.com/22197
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-18 09:40:30 -07:00
|
|
|
if sn.Op == ONAME {
|
|
|
|
// Tell liveness we're about to build a new slice
|
|
|
|
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, sn, s.mem())
|
|
|
|
}
|
2017-03-18 10:16:03 -07:00
|
|
|
capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, int64(array_cap), addr)
|
2017-03-13 21:51:08 -04:00
|
|
|
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, Types[TINT], capaddr, r[2], s.mem())
|
|
|
|
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, pt, addr, r[0], s.mem())
|
cmd/compile: avoid write barrier in append fast path
When we are writing the result of an append back
to the same slice, we don’t need a write barrier
on the fast path.
This re-implements an optimization that was present
in the old backend.
Updates #14921
Fixes #14969
Sample code:
var x []byte
func p() {
x = append(x, 1, 2, 3)
}
Before:
"".p t=1 size=224 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 199
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x(SB), DX
0x0025 00037 (append.go:19) MOVQ "".x+8(SB), BX
0x002c 00044 (append.go:19) MOVQ BX, "".autotmp_0+64(SP)
0x0031 00049 (append.go:22) LEAQ 3(BX), BP
0x0035 00053 (append.go:22) CMPQ BP, CX
0x0038 00056 (append.go:22) JGT $0, 131
0x003a 00058 (append.go:22) MOVB $1, (DX)(BX*1)
0x003e 00062 (append.go:22) MOVB $2, 1(DX)(BX*1)
0x0043 00067 (append.go:22) MOVB $3, 2(DX)(BX*1)
0x0048 00072 (append.go:22) MOVQ BP, "".x+8(SB)
0x004f 00079 (append.go:22) MOVQ CX, "".x+16(SB)
0x0056 00086 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x005c 00092 (append.go:22) TESTB AL, AL
0x005e 00094 (append.go:22) JNE $0, 108
0x0060 00096 (append.go:22) MOVQ DX, "".x(SB)
0x0067 00103 (append.go:23) ADDQ $72, SP
0x006b 00107 (append.go:23) RET
0x006c 00108 (append.go:22) LEAQ "".x(SB), CX
0x0073 00115 (append.go:22) MOVQ CX, (SP)
0x0077 00119 (append.go:22) MOVQ DX, 8(SP)
0x007c 00124 (append.go:22) PCDATA $0, $0
0x007c 00124 (append.go:22) CALL runtime.writebarrierptr(SB)
0x0081 00129 (append.go:23) JMP 103
0x0083 00131 (append.go:22) LEAQ type.[]uint8(SB), AX
0x008a 00138 (append.go:22) MOVQ AX, (SP)
0x008e 00142 (append.go:22) MOVQ DX, 8(SP)
0x0093 00147 (append.go:22) MOVQ BX, 16(SP)
0x0098 00152 (append.go:22) MOVQ CX, 24(SP)
0x009d 00157 (append.go:22) MOVQ BP, 32(SP)
0x00a2 00162 (append.go:22) PCDATA $0, $0
0x00a2 00162 (append.go:22) CALL runtime.growslice(SB)
0x00a7 00167 (append.go:22) MOVQ 40(SP), DX
0x00ac 00172 (append.go:22) MOVQ 48(SP), AX
0x00b1 00177 (append.go:22) MOVQ 56(SP), CX
0x00b6 00182 (append.go:22) ADDQ $3, AX
0x00ba 00186 (append.go:19) MOVQ "".autotmp_0+64(SP), BX
0x00bf 00191 (append.go:22) MOVQ AX, BP
0x00c2 00194 (append.go:22) JMP 58
0x00c7 00199 (append.go:22) NOP
0x00c7 00199 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00cc 00204 (append.go:21) JMP 0
After:
"".p t=1 size=208 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 191
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x+8(SB), DX
0x0025 00037 (append.go:19) MOVQ DX, "".autotmp_0+64(SP)
0x002a 00042 (append.go:19) MOVQ "".x(SB), BX
0x0031 00049 (append.go:22) LEAQ 3(DX), BP
0x0035 00053 (append.go:22) MOVQ BP, "".x+8(SB)
0x003c 00060 (append.go:22) CMPQ BP, CX
0x003f 00063 (append.go:22) JGT $0, 84
0x0041 00065 (append.go:22) MOVB $1, (BX)(DX*1)
0x0045 00069 (append.go:22) MOVB $2, 1(BX)(DX*1)
0x004a 00074 (append.go:22) MOVB $3, 2(BX)(DX*1)
0x004f 00079 (append.go:23) ADDQ $72, SP
0x0053 00083 (append.go:23) RET
0x0054 00084 (append.go:22) LEAQ type.[]uint8(SB), AX
0x005b 00091 (append.go:22) MOVQ AX, (SP)
0x005f 00095 (append.go:22) MOVQ BX, 8(SP)
0x0064 00100 (append.go:22) MOVQ DX, 16(SP)
0x0069 00105 (append.go:22) MOVQ CX, 24(SP)
0x006e 00110 (append.go:22) MOVQ BP, 32(SP)
0x0073 00115 (append.go:22) PCDATA $0, $0
0x0073 00115 (append.go:22) CALL runtime.growslice(SB)
0x0078 00120 (append.go:22) MOVQ 40(SP), CX
0x007d 00125 (append.go:22) MOVQ 56(SP), AX
0x0082 00130 (append.go:22) MOVQ AX, "".x+16(SB)
0x0089 00137 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x008f 00143 (append.go:22) TESTB AL, AL
0x0091 00145 (append.go:22) JNE $0, 168
0x0093 00147 (append.go:22) MOVQ CX, "".x(SB)
0x009a 00154 (append.go:22) MOVQ "".x(SB), BX
0x00a1 00161 (append.go:19) MOVQ "".autotmp_0+64(SP), DX
0x00a6 00166 (append.go:22) JMP 65
0x00a8 00168 (append.go:22) LEAQ "".x(SB), DX
0x00af 00175 (append.go:22) MOVQ DX, (SP)
0x00b3 00179 (append.go:22) MOVQ CX, 8(SP)
0x00b8 00184 (append.go:22) PCDATA $0, $0
0x00b8 00184 (append.go:22) CALL runtime.writebarrierptr(SB)
0x00bd 00189 (append.go:22) JMP 154
0x00bf 00191 (append.go:22) NOP
0x00bf 00191 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00c4 00196 (append.go:21) JMP 0
Change-Id: I77a41ad3a22557a4bb4654de7d6d24a029efe34a
Reviewed-on: https://go-review.googlesource.com/21813
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-10 09:44:17 -07:00
|
|
|
// load the value we just stored to avoid having to spill it
|
|
|
|
s.vars[&ptrVar] = s.newValue2(ssa.OpLoad, pt, addr, s.mem())
|
cmd/compile: re-enable in-place append optimization
CL 21891 was too clever in its attempts to avoid spills.
Storing newlen too early caused uses of append in the runtime
itself to receive an inconsistent view of a slice,
leading to corruption.
This CL makes the generate code much more similar to
the old backend. It spills more than before,
but those spills have been contained to the grow path.
It recalculates newlen unnecessarily on the fast path,
but that's measurably cheaper than spilling it.
CL 21891 caused runtime failures in 6 of 2000 runs
of net/http and crypto/x509 in my test setup.
This CL has gone 6000 runs without a failure.
Benchmarks going from master to this CL:
name old time/op new time/op delta
AppendInPlace/NoGrow/Byte-8 439ns ± 2% 436ns ± 2% -0.72% (p=0.001 n=28+27)
AppendInPlace/NoGrow/1Ptr-8 901ns ± 0% 856ns ± 0% -4.95% (p=0.000 n=26+29)
AppendInPlace/NoGrow/2Ptr-8 2.15µs ± 1% 1.95µs ± 0% -9.07% (p=0.000 n=28+30)
AppendInPlace/NoGrow/3Ptr-8 2.66µs ± 0% 2.45µs ± 0% -7.93% (p=0.000 n=29+26)
AppendInPlace/NoGrow/4Ptr-8 3.24µs ± 1% 3.02µs ± 1% -6.75% (p=0.000 n=28+30)
AppendInPlace/Grow/Byte-8 269ns ± 1% 271ns ± 1% +0.84% (p=0.000 n=30+29)
AppendInPlace/Grow/1Ptr-8 275ns ± 1% 280ns ± 1% +1.75% (p=0.000 n=30+30)
AppendInPlace/Grow/2Ptr-8 384ns ± 0% 391ns ± 0% +1.94% (p=0.000 n=27+30)
AppendInPlace/Grow/3Ptr-8 455ns ± 0% 462ns ± 0% +1.43% (p=0.000 n=29+29)
AppendInPlace/Grow/4Ptr-8 478ns ± 0% 479ns ± 0% +0.23% (p=0.000 n=30+27)
However, for the large no-grow cases, there is still more work to be done.
Going from this CL to the non-SSA backend:
name old time/op new time/op delta
AppendInPlace/NoGrow/Byte-8 436ns ± 2% 436ns ± 2% ~ (p=0.967 n=27+29)
AppendInPlace/NoGrow/1Ptr-8 856ns ± 0% 884ns ± 0% +3.28% (p=0.000 n=29+26)
AppendInPlace/NoGrow/2Ptr-8 1.95µs ± 0% 1.56µs ± 0% -20.28% (p=0.000 n=30+29)
AppendInPlace/NoGrow/3Ptr-8 2.45µs ± 0% 1.89µs ± 0% -22.88% (p=0.000 n=26+28)
AppendInPlace/NoGrow/4Ptr-8 3.02µs ± 1% 2.56µs ± 1% -15.35% (p=0.000 n=30+28)
AppendInPlace/Grow/Byte-8 271ns ± 1% 283ns ± 1% +4.56% (p=0.000 n=29+29)
AppendInPlace/Grow/1Ptr-8 280ns ± 1% 288ns ± 1% +2.99% (p=0.000 n=30+30)
AppendInPlace/Grow/2Ptr-8 391ns ± 0% 409ns ± 0% +4.66% (p=0.000 n=30+29)
AppendInPlace/Grow/3Ptr-8 462ns ± 0% 481ns ± 0% +4.13% (p=0.000 n=29+30)
AppendInPlace/Grow/4Ptr-8 479ns ± 0% 502ns ± 0% +4.81% (p=0.000 n=27+26)
New generated code:
var x []byte
func a() {
x = append(x, 1)
}
"".a t=1 size=208 args=0x0 locals=0x48
0x0000 00000 (a.go:5) TEXT "".a(SB), $72-0
0x0000 00000 (a.go:5) MOVQ (TLS), CX
0x0009 00009 (a.go:5) CMPQ SP, 16(CX)
0x000d 00013 (a.go:5) JLS 190
0x0013 00019 (a.go:5) SUBQ $72, SP
0x0017 00023 (a.go:5) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (a.go:5) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (a.go:6) MOVQ "".x+16(SB), CX
0x001e 00030 (a.go:6) MOVQ "".x+8(SB), DX
0x0025 00037 (a.go:6) MOVQ "".x(SB), BX
0x002c 00044 (a.go:6) LEAQ 1(DX), BP
0x0030 00048 (a.go:6) CMPQ BP, CX
0x0033 00051 (a.go:6) JGT $0, 73
0x0035 00053 (a.go:6) LEAQ 1(DX), AX
0x0039 00057 (a.go:6) MOVQ AX, "".x+8(SB)
0x0040 00064 (a.go:6) MOVB $1, (BX)(DX*1)
0x0044 00068 (a.go:7) ADDQ $72, SP
0x0048 00072 (a.go:7) RET
0x0049 00073 (a.go:6) LEAQ type.[]uint8(SB), AX
0x0050 00080 (a.go:6) MOVQ AX, (SP)
0x0054 00084 (a.go:6) MOVQ BX, 8(SP)
0x0059 00089 (a.go:6) MOVQ DX, 16(SP)
0x005e 00094 (a.go:6) MOVQ CX, 24(SP)
0x0063 00099 (a.go:6) MOVQ BP, 32(SP)
0x0068 00104 (a.go:6) PCDATA $0, $0
0x0068 00104 (a.go:6) CALL runtime.growslice(SB)
0x006d 00109 (a.go:6) MOVQ 40(SP), CX
0x0072 00114 (a.go:6) MOVQ 48(SP), DX
0x0077 00119 (a.go:6) MOVQ DX, "".autotmp_0+64(SP)
0x007c 00124 (a.go:6) MOVQ 56(SP), BX
0x0081 00129 (a.go:6) MOVQ BX, "".x+16(SB)
0x0088 00136 (a.go:6) MOVL runtime.writeBarrier(SB), AX
0x008e 00142 (a.go:6) TESTB AL, AL
0x0090 00144 (a.go:6) JNE $0, 162
0x0092 00146 (a.go:6) MOVQ CX, "".x(SB)
0x0099 00153 (a.go:6) MOVQ "".x(SB), BX
0x00a0 00160 (a.go:6) JMP 53
0x00a2 00162 (a.go:6) LEAQ "".x(SB), BX
0x00a9 00169 (a.go:6) MOVQ BX, (SP)
0x00ad 00173 (a.go:6) MOVQ CX, 8(SP)
0x00b2 00178 (a.go:6) PCDATA $0, $0
0x00b2 00178 (a.go:6) CALL runtime.writebarrierptr(SB)
0x00b7 00183 (a.go:6) MOVQ "".autotmp_0+64(SP), DX
0x00bc 00188 (a.go:6) JMP 153
0x00be 00190 (a.go:6) NOP
0x00be 00190 (a.go:5) CALL runtime.morestack_noctxt(SB)
0x00c3 00195 (a.go:5) JMP 0
Fixes #14969 again
Change-Id: Ia50463b1f506011aad0718a4fef1d4738e43c32d
Reviewed-on: https://go-review.googlesource.com/22197
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-18 09:40:30 -07:00
|
|
|
s.vars[&lenVar] = r[1] // avoid a spill in the fast path
|
cmd/compile: avoid write barrier in append fast path
When we are writing the result of an append back
to the same slice, we don’t need a write barrier
on the fast path.
This re-implements an optimization that was present
in the old backend.
Updates #14921
Fixes #14969
Sample code:
var x []byte
func p() {
x = append(x, 1, 2, 3)
}
Before:
"".p t=1 size=224 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 199
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x(SB), DX
0x0025 00037 (append.go:19) MOVQ "".x+8(SB), BX
0x002c 00044 (append.go:19) MOVQ BX, "".autotmp_0+64(SP)
0x0031 00049 (append.go:22) LEAQ 3(BX), BP
0x0035 00053 (append.go:22) CMPQ BP, CX
0x0038 00056 (append.go:22) JGT $0, 131
0x003a 00058 (append.go:22) MOVB $1, (DX)(BX*1)
0x003e 00062 (append.go:22) MOVB $2, 1(DX)(BX*1)
0x0043 00067 (append.go:22) MOVB $3, 2(DX)(BX*1)
0x0048 00072 (append.go:22) MOVQ BP, "".x+8(SB)
0x004f 00079 (append.go:22) MOVQ CX, "".x+16(SB)
0x0056 00086 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x005c 00092 (append.go:22) TESTB AL, AL
0x005e 00094 (append.go:22) JNE $0, 108
0x0060 00096 (append.go:22) MOVQ DX, "".x(SB)
0x0067 00103 (append.go:23) ADDQ $72, SP
0x006b 00107 (append.go:23) RET
0x006c 00108 (append.go:22) LEAQ "".x(SB), CX
0x0073 00115 (append.go:22) MOVQ CX, (SP)
0x0077 00119 (append.go:22) MOVQ DX, 8(SP)
0x007c 00124 (append.go:22) PCDATA $0, $0
0x007c 00124 (append.go:22) CALL runtime.writebarrierptr(SB)
0x0081 00129 (append.go:23) JMP 103
0x0083 00131 (append.go:22) LEAQ type.[]uint8(SB), AX
0x008a 00138 (append.go:22) MOVQ AX, (SP)
0x008e 00142 (append.go:22) MOVQ DX, 8(SP)
0x0093 00147 (append.go:22) MOVQ BX, 16(SP)
0x0098 00152 (append.go:22) MOVQ CX, 24(SP)
0x009d 00157 (append.go:22) MOVQ BP, 32(SP)
0x00a2 00162 (append.go:22) PCDATA $0, $0
0x00a2 00162 (append.go:22) CALL runtime.growslice(SB)
0x00a7 00167 (append.go:22) MOVQ 40(SP), DX
0x00ac 00172 (append.go:22) MOVQ 48(SP), AX
0x00b1 00177 (append.go:22) MOVQ 56(SP), CX
0x00b6 00182 (append.go:22) ADDQ $3, AX
0x00ba 00186 (append.go:19) MOVQ "".autotmp_0+64(SP), BX
0x00bf 00191 (append.go:22) MOVQ AX, BP
0x00c2 00194 (append.go:22) JMP 58
0x00c7 00199 (append.go:22) NOP
0x00c7 00199 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00cc 00204 (append.go:21) JMP 0
After:
"".p t=1 size=208 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 191
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x+8(SB), DX
0x0025 00037 (append.go:19) MOVQ DX, "".autotmp_0+64(SP)
0x002a 00042 (append.go:19) MOVQ "".x(SB), BX
0x0031 00049 (append.go:22) LEAQ 3(DX), BP
0x0035 00053 (append.go:22) MOVQ BP, "".x+8(SB)
0x003c 00060 (append.go:22) CMPQ BP, CX
0x003f 00063 (append.go:22) JGT $0, 84
0x0041 00065 (append.go:22) MOVB $1, (BX)(DX*1)
0x0045 00069 (append.go:22) MOVB $2, 1(BX)(DX*1)
0x004a 00074 (append.go:22) MOVB $3, 2(BX)(DX*1)
0x004f 00079 (append.go:23) ADDQ $72, SP
0x0053 00083 (append.go:23) RET
0x0054 00084 (append.go:22) LEAQ type.[]uint8(SB), AX
0x005b 00091 (append.go:22) MOVQ AX, (SP)
0x005f 00095 (append.go:22) MOVQ BX, 8(SP)
0x0064 00100 (append.go:22) MOVQ DX, 16(SP)
0x0069 00105 (append.go:22) MOVQ CX, 24(SP)
0x006e 00110 (append.go:22) MOVQ BP, 32(SP)
0x0073 00115 (append.go:22) PCDATA $0, $0
0x0073 00115 (append.go:22) CALL runtime.growslice(SB)
0x0078 00120 (append.go:22) MOVQ 40(SP), CX
0x007d 00125 (append.go:22) MOVQ 56(SP), AX
0x0082 00130 (append.go:22) MOVQ AX, "".x+16(SB)
0x0089 00137 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x008f 00143 (append.go:22) TESTB AL, AL
0x0091 00145 (append.go:22) JNE $0, 168
0x0093 00147 (append.go:22) MOVQ CX, "".x(SB)
0x009a 00154 (append.go:22) MOVQ "".x(SB), BX
0x00a1 00161 (append.go:19) MOVQ "".autotmp_0+64(SP), DX
0x00a6 00166 (append.go:22) JMP 65
0x00a8 00168 (append.go:22) LEAQ "".x(SB), DX
0x00af 00175 (append.go:22) MOVQ DX, (SP)
0x00b3 00179 (append.go:22) MOVQ CX, 8(SP)
0x00b8 00184 (append.go:22) PCDATA $0, $0
0x00b8 00184 (append.go:22) CALL runtime.writebarrierptr(SB)
0x00bd 00189 (append.go:22) JMP 154
0x00bf 00191 (append.go:22) NOP
0x00bf 00191 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00c4 00196 (append.go:21) JMP 0
Change-Id: I77a41ad3a22557a4bb4654de7d6d24a029efe34a
Reviewed-on: https://go-review.googlesource.com/21813
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-10 09:44:17 -07:00
|
|
|
} else {
|
|
|
|
s.vars[&ptrVar] = r[0]
|
|
|
|
s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], r[1], s.constInt(Types[TINT], nargs))
|
|
|
|
s.vars[&capVar] = r[2]
|
|
|
|
}
|
|
|
|
|
2016-04-04 10:58:21 -07:00
|
|
|
b = s.endBlock()
|
|
|
|
b.AddEdgeTo(assign)
|
|
|
|
|
|
|
|
// assign new elements to slots
|
|
|
|
s.startBlock(assign)
|
|
|
|
|
cmd/compile: re-enable in-place append optimization
CL 21891 was too clever in its attempts to avoid spills.
Storing newlen too early caused uses of append in the runtime
itself to receive an inconsistent view of a slice,
leading to corruption.
This CL makes the generate code much more similar to
the old backend. It spills more than before,
but those spills have been contained to the grow path.
It recalculates newlen unnecessarily on the fast path,
but that's measurably cheaper than spilling it.
CL 21891 caused runtime failures in 6 of 2000 runs
of net/http and crypto/x509 in my test setup.
This CL has gone 6000 runs without a failure.
Benchmarks going from master to this CL:
name old time/op new time/op delta
AppendInPlace/NoGrow/Byte-8 439ns ± 2% 436ns ± 2% -0.72% (p=0.001 n=28+27)
AppendInPlace/NoGrow/1Ptr-8 901ns ± 0% 856ns ± 0% -4.95% (p=0.000 n=26+29)
AppendInPlace/NoGrow/2Ptr-8 2.15µs ± 1% 1.95µs ± 0% -9.07% (p=0.000 n=28+30)
AppendInPlace/NoGrow/3Ptr-8 2.66µs ± 0% 2.45µs ± 0% -7.93% (p=0.000 n=29+26)
AppendInPlace/NoGrow/4Ptr-8 3.24µs ± 1% 3.02µs ± 1% -6.75% (p=0.000 n=28+30)
AppendInPlace/Grow/Byte-8 269ns ± 1% 271ns ± 1% +0.84% (p=0.000 n=30+29)
AppendInPlace/Grow/1Ptr-8 275ns ± 1% 280ns ± 1% +1.75% (p=0.000 n=30+30)
AppendInPlace/Grow/2Ptr-8 384ns ± 0% 391ns ± 0% +1.94% (p=0.000 n=27+30)
AppendInPlace/Grow/3Ptr-8 455ns ± 0% 462ns ± 0% +1.43% (p=0.000 n=29+29)
AppendInPlace/Grow/4Ptr-8 478ns ± 0% 479ns ± 0% +0.23% (p=0.000 n=30+27)
However, for the large no-grow cases, there is still more work to be done.
Going from this CL to the non-SSA backend:
name old time/op new time/op delta
AppendInPlace/NoGrow/Byte-8 436ns ± 2% 436ns ± 2% ~ (p=0.967 n=27+29)
AppendInPlace/NoGrow/1Ptr-8 856ns ± 0% 884ns ± 0% +3.28% (p=0.000 n=29+26)
AppendInPlace/NoGrow/2Ptr-8 1.95µs ± 0% 1.56µs ± 0% -20.28% (p=0.000 n=30+29)
AppendInPlace/NoGrow/3Ptr-8 2.45µs ± 0% 1.89µs ± 0% -22.88% (p=0.000 n=26+28)
AppendInPlace/NoGrow/4Ptr-8 3.02µs ± 1% 2.56µs ± 1% -15.35% (p=0.000 n=30+28)
AppendInPlace/Grow/Byte-8 271ns ± 1% 283ns ± 1% +4.56% (p=0.000 n=29+29)
AppendInPlace/Grow/1Ptr-8 280ns ± 1% 288ns ± 1% +2.99% (p=0.000 n=30+30)
AppendInPlace/Grow/2Ptr-8 391ns ± 0% 409ns ± 0% +4.66% (p=0.000 n=30+29)
AppendInPlace/Grow/3Ptr-8 462ns ± 0% 481ns ± 0% +4.13% (p=0.000 n=29+30)
AppendInPlace/Grow/4Ptr-8 479ns ± 0% 502ns ± 0% +4.81% (p=0.000 n=27+26)
New generated code:
var x []byte
func a() {
x = append(x, 1)
}
"".a t=1 size=208 args=0x0 locals=0x48
0x0000 00000 (a.go:5) TEXT "".a(SB), $72-0
0x0000 00000 (a.go:5) MOVQ (TLS), CX
0x0009 00009 (a.go:5) CMPQ SP, 16(CX)
0x000d 00013 (a.go:5) JLS 190
0x0013 00019 (a.go:5) SUBQ $72, SP
0x0017 00023 (a.go:5) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (a.go:5) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (a.go:6) MOVQ "".x+16(SB), CX
0x001e 00030 (a.go:6) MOVQ "".x+8(SB), DX
0x0025 00037 (a.go:6) MOVQ "".x(SB), BX
0x002c 00044 (a.go:6) LEAQ 1(DX), BP
0x0030 00048 (a.go:6) CMPQ BP, CX
0x0033 00051 (a.go:6) JGT $0, 73
0x0035 00053 (a.go:6) LEAQ 1(DX), AX
0x0039 00057 (a.go:6) MOVQ AX, "".x+8(SB)
0x0040 00064 (a.go:6) MOVB $1, (BX)(DX*1)
0x0044 00068 (a.go:7) ADDQ $72, SP
0x0048 00072 (a.go:7) RET
0x0049 00073 (a.go:6) LEAQ type.[]uint8(SB), AX
0x0050 00080 (a.go:6) MOVQ AX, (SP)
0x0054 00084 (a.go:6) MOVQ BX, 8(SP)
0x0059 00089 (a.go:6) MOVQ DX, 16(SP)
0x005e 00094 (a.go:6) MOVQ CX, 24(SP)
0x0063 00099 (a.go:6) MOVQ BP, 32(SP)
0x0068 00104 (a.go:6) PCDATA $0, $0
0x0068 00104 (a.go:6) CALL runtime.growslice(SB)
0x006d 00109 (a.go:6) MOVQ 40(SP), CX
0x0072 00114 (a.go:6) MOVQ 48(SP), DX
0x0077 00119 (a.go:6) MOVQ DX, "".autotmp_0+64(SP)
0x007c 00124 (a.go:6) MOVQ 56(SP), BX
0x0081 00129 (a.go:6) MOVQ BX, "".x+16(SB)
0x0088 00136 (a.go:6) MOVL runtime.writeBarrier(SB), AX
0x008e 00142 (a.go:6) TESTB AL, AL
0x0090 00144 (a.go:6) JNE $0, 162
0x0092 00146 (a.go:6) MOVQ CX, "".x(SB)
0x0099 00153 (a.go:6) MOVQ "".x(SB), BX
0x00a0 00160 (a.go:6) JMP 53
0x00a2 00162 (a.go:6) LEAQ "".x(SB), BX
0x00a9 00169 (a.go:6) MOVQ BX, (SP)
0x00ad 00173 (a.go:6) MOVQ CX, 8(SP)
0x00b2 00178 (a.go:6) PCDATA $0, $0
0x00b2 00178 (a.go:6) CALL runtime.writebarrierptr(SB)
0x00b7 00183 (a.go:6) MOVQ "".autotmp_0+64(SP), DX
0x00bc 00188 (a.go:6) JMP 153
0x00be 00190 (a.go:6) NOP
0x00be 00190 (a.go:5) CALL runtime.morestack_noctxt(SB)
0x00c3 00195 (a.go:5) JMP 0
Fixes #14969 again
Change-Id: Ia50463b1f506011aad0718a4fef1d4738e43c32d
Reviewed-on: https://go-review.googlesource.com/22197
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-18 09:40:30 -07:00
|
|
|
if inplace {
|
|
|
|
l = s.variable(&lenVar, Types[TINT]) // generates phi for len
|
|
|
|
nl = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], l, s.constInt(Types[TINT], nargs))
|
2017-03-18 10:16:03 -07:00
|
|
|
lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, int64(array_nel), addr)
|
2017-03-13 21:51:08 -04:00
|
|
|
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, Types[TINT], lenaddr, nl, s.mem())
|
cmd/compile: re-enable in-place append optimization
CL 21891 was too clever in its attempts to avoid spills.
Storing newlen too early caused uses of append in the runtime
itself to receive an inconsistent view of a slice,
leading to corruption.
This CL makes the generate code much more similar to
the old backend. It spills more than before,
but those spills have been contained to the grow path.
It recalculates newlen unnecessarily on the fast path,
but that's measurably cheaper than spilling it.
CL 21891 caused runtime failures in 6 of 2000 runs
of net/http and crypto/x509 in my test setup.
This CL has gone 6000 runs without a failure.
Benchmarks going from master to this CL:
name old time/op new time/op delta
AppendInPlace/NoGrow/Byte-8 439ns ± 2% 436ns ± 2% -0.72% (p=0.001 n=28+27)
AppendInPlace/NoGrow/1Ptr-8 901ns ± 0% 856ns ± 0% -4.95% (p=0.000 n=26+29)
AppendInPlace/NoGrow/2Ptr-8 2.15µs ± 1% 1.95µs ± 0% -9.07% (p=0.000 n=28+30)
AppendInPlace/NoGrow/3Ptr-8 2.66µs ± 0% 2.45µs ± 0% -7.93% (p=0.000 n=29+26)
AppendInPlace/NoGrow/4Ptr-8 3.24µs ± 1% 3.02µs ± 1% -6.75% (p=0.000 n=28+30)
AppendInPlace/Grow/Byte-8 269ns ± 1% 271ns ± 1% +0.84% (p=0.000 n=30+29)
AppendInPlace/Grow/1Ptr-8 275ns ± 1% 280ns ± 1% +1.75% (p=0.000 n=30+30)
AppendInPlace/Grow/2Ptr-8 384ns ± 0% 391ns ± 0% +1.94% (p=0.000 n=27+30)
AppendInPlace/Grow/3Ptr-8 455ns ± 0% 462ns ± 0% +1.43% (p=0.000 n=29+29)
AppendInPlace/Grow/4Ptr-8 478ns ± 0% 479ns ± 0% +0.23% (p=0.000 n=30+27)
However, for the large no-grow cases, there is still more work to be done.
Going from this CL to the non-SSA backend:
name old time/op new time/op delta
AppendInPlace/NoGrow/Byte-8 436ns ± 2% 436ns ± 2% ~ (p=0.967 n=27+29)
AppendInPlace/NoGrow/1Ptr-8 856ns ± 0% 884ns ± 0% +3.28% (p=0.000 n=29+26)
AppendInPlace/NoGrow/2Ptr-8 1.95µs ± 0% 1.56µs ± 0% -20.28% (p=0.000 n=30+29)
AppendInPlace/NoGrow/3Ptr-8 2.45µs ± 0% 1.89µs ± 0% -22.88% (p=0.000 n=26+28)
AppendInPlace/NoGrow/4Ptr-8 3.02µs ± 1% 2.56µs ± 1% -15.35% (p=0.000 n=30+28)
AppendInPlace/Grow/Byte-8 271ns ± 1% 283ns ± 1% +4.56% (p=0.000 n=29+29)
AppendInPlace/Grow/1Ptr-8 280ns ± 1% 288ns ± 1% +2.99% (p=0.000 n=30+30)
AppendInPlace/Grow/2Ptr-8 391ns ± 0% 409ns ± 0% +4.66% (p=0.000 n=30+29)
AppendInPlace/Grow/3Ptr-8 462ns ± 0% 481ns ± 0% +4.13% (p=0.000 n=29+30)
AppendInPlace/Grow/4Ptr-8 479ns ± 0% 502ns ± 0% +4.81% (p=0.000 n=27+26)
New generated code:
var x []byte
func a() {
x = append(x, 1)
}
"".a t=1 size=208 args=0x0 locals=0x48
0x0000 00000 (a.go:5) TEXT "".a(SB), $72-0
0x0000 00000 (a.go:5) MOVQ (TLS), CX
0x0009 00009 (a.go:5) CMPQ SP, 16(CX)
0x000d 00013 (a.go:5) JLS 190
0x0013 00019 (a.go:5) SUBQ $72, SP
0x0017 00023 (a.go:5) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (a.go:5) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (a.go:6) MOVQ "".x+16(SB), CX
0x001e 00030 (a.go:6) MOVQ "".x+8(SB), DX
0x0025 00037 (a.go:6) MOVQ "".x(SB), BX
0x002c 00044 (a.go:6) LEAQ 1(DX), BP
0x0030 00048 (a.go:6) CMPQ BP, CX
0x0033 00051 (a.go:6) JGT $0, 73
0x0035 00053 (a.go:6) LEAQ 1(DX), AX
0x0039 00057 (a.go:6) MOVQ AX, "".x+8(SB)
0x0040 00064 (a.go:6) MOVB $1, (BX)(DX*1)
0x0044 00068 (a.go:7) ADDQ $72, SP
0x0048 00072 (a.go:7) RET
0x0049 00073 (a.go:6) LEAQ type.[]uint8(SB), AX
0x0050 00080 (a.go:6) MOVQ AX, (SP)
0x0054 00084 (a.go:6) MOVQ BX, 8(SP)
0x0059 00089 (a.go:6) MOVQ DX, 16(SP)
0x005e 00094 (a.go:6) MOVQ CX, 24(SP)
0x0063 00099 (a.go:6) MOVQ BP, 32(SP)
0x0068 00104 (a.go:6) PCDATA $0, $0
0x0068 00104 (a.go:6) CALL runtime.growslice(SB)
0x006d 00109 (a.go:6) MOVQ 40(SP), CX
0x0072 00114 (a.go:6) MOVQ 48(SP), DX
0x0077 00119 (a.go:6) MOVQ DX, "".autotmp_0+64(SP)
0x007c 00124 (a.go:6) MOVQ 56(SP), BX
0x0081 00129 (a.go:6) MOVQ BX, "".x+16(SB)
0x0088 00136 (a.go:6) MOVL runtime.writeBarrier(SB), AX
0x008e 00142 (a.go:6) TESTB AL, AL
0x0090 00144 (a.go:6) JNE $0, 162
0x0092 00146 (a.go:6) MOVQ CX, "".x(SB)
0x0099 00153 (a.go:6) MOVQ "".x(SB), BX
0x00a0 00160 (a.go:6) JMP 53
0x00a2 00162 (a.go:6) LEAQ "".x(SB), BX
0x00a9 00169 (a.go:6) MOVQ BX, (SP)
0x00ad 00173 (a.go:6) MOVQ CX, 8(SP)
0x00b2 00178 (a.go:6) PCDATA $0, $0
0x00b2 00178 (a.go:6) CALL runtime.writebarrierptr(SB)
0x00b7 00183 (a.go:6) MOVQ "".autotmp_0+64(SP), DX
0x00bc 00188 (a.go:6) JMP 153
0x00be 00190 (a.go:6) NOP
0x00be 00190 (a.go:5) CALL runtime.morestack_noctxt(SB)
0x00c3 00195 (a.go:5) JMP 0
Fixes #14969 again
Change-Id: Ia50463b1f506011aad0718a4fef1d4738e43c32d
Reviewed-on: https://go-review.googlesource.com/22197
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-18 09:40:30 -07:00
|
|
|
}
|
|
|
|
|
2016-04-04 10:58:21 -07:00
|
|
|
// Evaluate args
|
2016-05-27 14:07:37 -07:00
|
|
|
type argRec struct {
|
|
|
|
// if store is true, we're appending the value v. If false, we're appending the
|
2017-02-02 19:47:59 -05:00
|
|
|
// value at *v.
|
|
|
|
v *ssa.Value
|
|
|
|
store bool
|
2016-05-27 14:07:37 -07:00
|
|
|
}
|
|
|
|
args := make([]argRec, 0, nargs)
|
2016-04-04 10:58:21 -07:00
|
|
|
for _, n := range n.List.Slice()[1:] {
|
|
|
|
if canSSAType(n.Type) {
|
2016-05-27 14:07:37 -07:00
|
|
|
args = append(args, argRec{v: s.expr(n), store: true})
|
2016-04-04 10:58:21 -07:00
|
|
|
} else {
|
2017-02-02 19:47:59 -05:00
|
|
|
v := s.addr(n, false)
|
|
|
|
args = append(args, argRec{v: v})
|
2016-04-04 10:58:21 -07:00
|
|
|
}
|
|
|
|
}
|
2015-09-18 15:11:30 -07:00
|
|
|
|
cmd/compile: avoid write barrier in append fast path
When we are writing the result of an append back
to the same slice, we don’t need a write barrier
on the fast path.
This re-implements an optimization that was present
in the old backend.
Updates #14921
Fixes #14969
Sample code:
var x []byte
func p() {
x = append(x, 1, 2, 3)
}
Before:
"".p t=1 size=224 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 199
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x(SB), DX
0x0025 00037 (append.go:19) MOVQ "".x+8(SB), BX
0x002c 00044 (append.go:19) MOVQ BX, "".autotmp_0+64(SP)
0x0031 00049 (append.go:22) LEAQ 3(BX), BP
0x0035 00053 (append.go:22) CMPQ BP, CX
0x0038 00056 (append.go:22) JGT $0, 131
0x003a 00058 (append.go:22) MOVB $1, (DX)(BX*1)
0x003e 00062 (append.go:22) MOVB $2, 1(DX)(BX*1)
0x0043 00067 (append.go:22) MOVB $3, 2(DX)(BX*1)
0x0048 00072 (append.go:22) MOVQ BP, "".x+8(SB)
0x004f 00079 (append.go:22) MOVQ CX, "".x+16(SB)
0x0056 00086 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x005c 00092 (append.go:22) TESTB AL, AL
0x005e 00094 (append.go:22) JNE $0, 108
0x0060 00096 (append.go:22) MOVQ DX, "".x(SB)
0x0067 00103 (append.go:23) ADDQ $72, SP
0x006b 00107 (append.go:23) RET
0x006c 00108 (append.go:22) LEAQ "".x(SB), CX
0x0073 00115 (append.go:22) MOVQ CX, (SP)
0x0077 00119 (append.go:22) MOVQ DX, 8(SP)
0x007c 00124 (append.go:22) PCDATA $0, $0
0x007c 00124 (append.go:22) CALL runtime.writebarrierptr(SB)
0x0081 00129 (append.go:23) JMP 103
0x0083 00131 (append.go:22) LEAQ type.[]uint8(SB), AX
0x008a 00138 (append.go:22) MOVQ AX, (SP)
0x008e 00142 (append.go:22) MOVQ DX, 8(SP)
0x0093 00147 (append.go:22) MOVQ BX, 16(SP)
0x0098 00152 (append.go:22) MOVQ CX, 24(SP)
0x009d 00157 (append.go:22) MOVQ BP, 32(SP)
0x00a2 00162 (append.go:22) PCDATA $0, $0
0x00a2 00162 (append.go:22) CALL runtime.growslice(SB)
0x00a7 00167 (append.go:22) MOVQ 40(SP), DX
0x00ac 00172 (append.go:22) MOVQ 48(SP), AX
0x00b1 00177 (append.go:22) MOVQ 56(SP), CX
0x00b6 00182 (append.go:22) ADDQ $3, AX
0x00ba 00186 (append.go:19) MOVQ "".autotmp_0+64(SP), BX
0x00bf 00191 (append.go:22) MOVQ AX, BP
0x00c2 00194 (append.go:22) JMP 58
0x00c7 00199 (append.go:22) NOP
0x00c7 00199 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00cc 00204 (append.go:21) JMP 0
After:
"".p t=1 size=208 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 191
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x+8(SB), DX
0x0025 00037 (append.go:19) MOVQ DX, "".autotmp_0+64(SP)
0x002a 00042 (append.go:19) MOVQ "".x(SB), BX
0x0031 00049 (append.go:22) LEAQ 3(DX), BP
0x0035 00053 (append.go:22) MOVQ BP, "".x+8(SB)
0x003c 00060 (append.go:22) CMPQ BP, CX
0x003f 00063 (append.go:22) JGT $0, 84
0x0041 00065 (append.go:22) MOVB $1, (BX)(DX*1)
0x0045 00069 (append.go:22) MOVB $2, 1(BX)(DX*1)
0x004a 00074 (append.go:22) MOVB $3, 2(BX)(DX*1)
0x004f 00079 (append.go:23) ADDQ $72, SP
0x0053 00083 (append.go:23) RET
0x0054 00084 (append.go:22) LEAQ type.[]uint8(SB), AX
0x005b 00091 (append.go:22) MOVQ AX, (SP)
0x005f 00095 (append.go:22) MOVQ BX, 8(SP)
0x0064 00100 (append.go:22) MOVQ DX, 16(SP)
0x0069 00105 (append.go:22) MOVQ CX, 24(SP)
0x006e 00110 (append.go:22) MOVQ BP, 32(SP)
0x0073 00115 (append.go:22) PCDATA $0, $0
0x0073 00115 (append.go:22) CALL runtime.growslice(SB)
0x0078 00120 (append.go:22) MOVQ 40(SP), CX
0x007d 00125 (append.go:22) MOVQ 56(SP), AX
0x0082 00130 (append.go:22) MOVQ AX, "".x+16(SB)
0x0089 00137 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x008f 00143 (append.go:22) TESTB AL, AL
0x0091 00145 (append.go:22) JNE $0, 168
0x0093 00147 (append.go:22) MOVQ CX, "".x(SB)
0x009a 00154 (append.go:22) MOVQ "".x(SB), BX
0x00a1 00161 (append.go:19) MOVQ "".autotmp_0+64(SP), DX
0x00a6 00166 (append.go:22) JMP 65
0x00a8 00168 (append.go:22) LEAQ "".x(SB), DX
0x00af 00175 (append.go:22) MOVQ DX, (SP)
0x00b3 00179 (append.go:22) MOVQ CX, 8(SP)
0x00b8 00184 (append.go:22) PCDATA $0, $0
0x00b8 00184 (append.go:22) CALL runtime.writebarrierptr(SB)
0x00bd 00189 (append.go:22) JMP 154
0x00bf 00191 (append.go:22) NOP
0x00bf 00191 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00c4 00196 (append.go:21) JMP 0
Change-Id: I77a41ad3a22557a4bb4654de7d6d24a029efe34a
Reviewed-on: https://go-review.googlesource.com/21813
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-10 09:44:17 -07:00
|
|
|
p = s.variable(&ptrVar, pt) // generates phi for ptr
|
|
|
|
if !inplace {
|
|
|
|
nl = s.variable(&newlenVar, Types[TINT]) // generates phi for nl
|
|
|
|
c = s.variable(&capVar, Types[TINT]) // generates phi for cap
|
|
|
|
}
|
2016-04-04 10:58:21 -07:00
|
|
|
p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l)
|
|
|
|
for i, arg := range args {
|
|
|
|
addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(Types[TINT], int64(i)))
|
2016-05-27 14:07:37 -07:00
|
|
|
if arg.store {
|
2017-02-10 10:15:10 -05:00
|
|
|
s.storeType(et, addr, arg.v, 0)
|
2016-04-04 10:58:21 -07:00
|
|
|
} else {
|
2017-03-13 21:51:08 -04:00
|
|
|
store := s.newValue3I(ssa.OpMove, ssa.TypeMem, et.Size(), addr, arg.v, s.mem())
|
2017-02-10 10:15:10 -05:00
|
|
|
store.Aux = et
|
|
|
|
s.vars[&memVar] = store
|
2015-09-11 11:02:57 -07:00
|
|
|
}
|
2015-04-15 15:51:25 -07:00
|
|
|
}
|
2016-04-04 10:58:21 -07:00
|
|
|
|
|
|
|
delete(s.vars, &ptrVar)
|
cmd/compile: avoid write barrier in append fast path
When we are writing the result of an append back
to the same slice, we don’t need a write barrier
on the fast path.
This re-implements an optimization that was present
in the old backend.
Updates #14921
Fixes #14969
Sample code:
var x []byte
func p() {
x = append(x, 1, 2, 3)
}
Before:
"".p t=1 size=224 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 199
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x(SB), DX
0x0025 00037 (append.go:19) MOVQ "".x+8(SB), BX
0x002c 00044 (append.go:19) MOVQ BX, "".autotmp_0+64(SP)
0x0031 00049 (append.go:22) LEAQ 3(BX), BP
0x0035 00053 (append.go:22) CMPQ BP, CX
0x0038 00056 (append.go:22) JGT $0, 131
0x003a 00058 (append.go:22) MOVB $1, (DX)(BX*1)
0x003e 00062 (append.go:22) MOVB $2, 1(DX)(BX*1)
0x0043 00067 (append.go:22) MOVB $3, 2(DX)(BX*1)
0x0048 00072 (append.go:22) MOVQ BP, "".x+8(SB)
0x004f 00079 (append.go:22) MOVQ CX, "".x+16(SB)
0x0056 00086 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x005c 00092 (append.go:22) TESTB AL, AL
0x005e 00094 (append.go:22) JNE $0, 108
0x0060 00096 (append.go:22) MOVQ DX, "".x(SB)
0x0067 00103 (append.go:23) ADDQ $72, SP
0x006b 00107 (append.go:23) RET
0x006c 00108 (append.go:22) LEAQ "".x(SB), CX
0x0073 00115 (append.go:22) MOVQ CX, (SP)
0x0077 00119 (append.go:22) MOVQ DX, 8(SP)
0x007c 00124 (append.go:22) PCDATA $0, $0
0x007c 00124 (append.go:22) CALL runtime.writebarrierptr(SB)
0x0081 00129 (append.go:23) JMP 103
0x0083 00131 (append.go:22) LEAQ type.[]uint8(SB), AX
0x008a 00138 (append.go:22) MOVQ AX, (SP)
0x008e 00142 (append.go:22) MOVQ DX, 8(SP)
0x0093 00147 (append.go:22) MOVQ BX, 16(SP)
0x0098 00152 (append.go:22) MOVQ CX, 24(SP)
0x009d 00157 (append.go:22) MOVQ BP, 32(SP)
0x00a2 00162 (append.go:22) PCDATA $0, $0
0x00a2 00162 (append.go:22) CALL runtime.growslice(SB)
0x00a7 00167 (append.go:22) MOVQ 40(SP), DX
0x00ac 00172 (append.go:22) MOVQ 48(SP), AX
0x00b1 00177 (append.go:22) MOVQ 56(SP), CX
0x00b6 00182 (append.go:22) ADDQ $3, AX
0x00ba 00186 (append.go:19) MOVQ "".autotmp_0+64(SP), BX
0x00bf 00191 (append.go:22) MOVQ AX, BP
0x00c2 00194 (append.go:22) JMP 58
0x00c7 00199 (append.go:22) NOP
0x00c7 00199 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00cc 00204 (append.go:21) JMP 0
After:
"".p t=1 size=208 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 191
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x+8(SB), DX
0x0025 00037 (append.go:19) MOVQ DX, "".autotmp_0+64(SP)
0x002a 00042 (append.go:19) MOVQ "".x(SB), BX
0x0031 00049 (append.go:22) LEAQ 3(DX), BP
0x0035 00053 (append.go:22) MOVQ BP, "".x+8(SB)
0x003c 00060 (append.go:22) CMPQ BP, CX
0x003f 00063 (append.go:22) JGT $0, 84
0x0041 00065 (append.go:22) MOVB $1, (BX)(DX*1)
0x0045 00069 (append.go:22) MOVB $2, 1(BX)(DX*1)
0x004a 00074 (append.go:22) MOVB $3, 2(BX)(DX*1)
0x004f 00079 (append.go:23) ADDQ $72, SP
0x0053 00083 (append.go:23) RET
0x0054 00084 (append.go:22) LEAQ type.[]uint8(SB), AX
0x005b 00091 (append.go:22) MOVQ AX, (SP)
0x005f 00095 (append.go:22) MOVQ BX, 8(SP)
0x0064 00100 (append.go:22) MOVQ DX, 16(SP)
0x0069 00105 (append.go:22) MOVQ CX, 24(SP)
0x006e 00110 (append.go:22) MOVQ BP, 32(SP)
0x0073 00115 (append.go:22) PCDATA $0, $0
0x0073 00115 (append.go:22) CALL runtime.growslice(SB)
0x0078 00120 (append.go:22) MOVQ 40(SP), CX
0x007d 00125 (append.go:22) MOVQ 56(SP), AX
0x0082 00130 (append.go:22) MOVQ AX, "".x+16(SB)
0x0089 00137 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x008f 00143 (append.go:22) TESTB AL, AL
0x0091 00145 (append.go:22) JNE $0, 168
0x0093 00147 (append.go:22) MOVQ CX, "".x(SB)
0x009a 00154 (append.go:22) MOVQ "".x(SB), BX
0x00a1 00161 (append.go:19) MOVQ "".autotmp_0+64(SP), DX
0x00a6 00166 (append.go:22) JMP 65
0x00a8 00168 (append.go:22) LEAQ "".x(SB), DX
0x00af 00175 (append.go:22) MOVQ DX, (SP)
0x00b3 00179 (append.go:22) MOVQ CX, 8(SP)
0x00b8 00184 (append.go:22) PCDATA $0, $0
0x00b8 00184 (append.go:22) CALL runtime.writebarrierptr(SB)
0x00bd 00189 (append.go:22) JMP 154
0x00bf 00191 (append.go:22) NOP
0x00bf 00191 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00c4 00196 (append.go:21) JMP 0
Change-Id: I77a41ad3a22557a4bb4654de7d6d24a029efe34a
Reviewed-on: https://go-review.googlesource.com/21813
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-10 09:44:17 -07:00
|
|
|
if inplace {
|
cmd/compile: re-enable in-place append optimization
CL 21891 was too clever in its attempts to avoid spills.
Storing newlen too early caused uses of append in the runtime
itself to receive an inconsistent view of a slice,
leading to corruption.
This CL makes the generate code much more similar to
the old backend. It spills more than before,
but those spills have been contained to the grow path.
It recalculates newlen unnecessarily on the fast path,
but that's measurably cheaper than spilling it.
CL 21891 caused runtime failures in 6 of 2000 runs
of net/http and crypto/x509 in my test setup.
This CL has gone 6000 runs without a failure.
Benchmarks going from master to this CL:
name old time/op new time/op delta
AppendInPlace/NoGrow/Byte-8 439ns ± 2% 436ns ± 2% -0.72% (p=0.001 n=28+27)
AppendInPlace/NoGrow/1Ptr-8 901ns ± 0% 856ns ± 0% -4.95% (p=0.000 n=26+29)
AppendInPlace/NoGrow/2Ptr-8 2.15µs ± 1% 1.95µs ± 0% -9.07% (p=0.000 n=28+30)
AppendInPlace/NoGrow/3Ptr-8 2.66µs ± 0% 2.45µs ± 0% -7.93% (p=0.000 n=29+26)
AppendInPlace/NoGrow/4Ptr-8 3.24µs ± 1% 3.02µs ± 1% -6.75% (p=0.000 n=28+30)
AppendInPlace/Grow/Byte-8 269ns ± 1% 271ns ± 1% +0.84% (p=0.000 n=30+29)
AppendInPlace/Grow/1Ptr-8 275ns ± 1% 280ns ± 1% +1.75% (p=0.000 n=30+30)
AppendInPlace/Grow/2Ptr-8 384ns ± 0% 391ns ± 0% +1.94% (p=0.000 n=27+30)
AppendInPlace/Grow/3Ptr-8 455ns ± 0% 462ns ± 0% +1.43% (p=0.000 n=29+29)
AppendInPlace/Grow/4Ptr-8 478ns ± 0% 479ns ± 0% +0.23% (p=0.000 n=30+27)
However, for the large no-grow cases, there is still more work to be done.
Going from this CL to the non-SSA backend:
name old time/op new time/op delta
AppendInPlace/NoGrow/Byte-8 436ns ± 2% 436ns ± 2% ~ (p=0.967 n=27+29)
AppendInPlace/NoGrow/1Ptr-8 856ns ± 0% 884ns ± 0% +3.28% (p=0.000 n=29+26)
AppendInPlace/NoGrow/2Ptr-8 1.95µs ± 0% 1.56µs ± 0% -20.28% (p=0.000 n=30+29)
AppendInPlace/NoGrow/3Ptr-8 2.45µs ± 0% 1.89µs ± 0% -22.88% (p=0.000 n=26+28)
AppendInPlace/NoGrow/4Ptr-8 3.02µs ± 1% 2.56µs ± 1% -15.35% (p=0.000 n=30+28)
AppendInPlace/Grow/Byte-8 271ns ± 1% 283ns ± 1% +4.56% (p=0.000 n=29+29)
AppendInPlace/Grow/1Ptr-8 280ns ± 1% 288ns ± 1% +2.99% (p=0.000 n=30+30)
AppendInPlace/Grow/2Ptr-8 391ns ± 0% 409ns ± 0% +4.66% (p=0.000 n=30+29)
AppendInPlace/Grow/3Ptr-8 462ns ± 0% 481ns ± 0% +4.13% (p=0.000 n=29+30)
AppendInPlace/Grow/4Ptr-8 479ns ± 0% 502ns ± 0% +4.81% (p=0.000 n=27+26)
New generated code:
var x []byte
func a() {
x = append(x, 1)
}
"".a t=1 size=208 args=0x0 locals=0x48
0x0000 00000 (a.go:5) TEXT "".a(SB), $72-0
0x0000 00000 (a.go:5) MOVQ (TLS), CX
0x0009 00009 (a.go:5) CMPQ SP, 16(CX)
0x000d 00013 (a.go:5) JLS 190
0x0013 00019 (a.go:5) SUBQ $72, SP
0x0017 00023 (a.go:5) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (a.go:5) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (a.go:6) MOVQ "".x+16(SB), CX
0x001e 00030 (a.go:6) MOVQ "".x+8(SB), DX
0x0025 00037 (a.go:6) MOVQ "".x(SB), BX
0x002c 00044 (a.go:6) LEAQ 1(DX), BP
0x0030 00048 (a.go:6) CMPQ BP, CX
0x0033 00051 (a.go:6) JGT $0, 73
0x0035 00053 (a.go:6) LEAQ 1(DX), AX
0x0039 00057 (a.go:6) MOVQ AX, "".x+8(SB)
0x0040 00064 (a.go:6) MOVB $1, (BX)(DX*1)
0x0044 00068 (a.go:7) ADDQ $72, SP
0x0048 00072 (a.go:7) RET
0x0049 00073 (a.go:6) LEAQ type.[]uint8(SB), AX
0x0050 00080 (a.go:6) MOVQ AX, (SP)
0x0054 00084 (a.go:6) MOVQ BX, 8(SP)
0x0059 00089 (a.go:6) MOVQ DX, 16(SP)
0x005e 00094 (a.go:6) MOVQ CX, 24(SP)
0x0063 00099 (a.go:6) MOVQ BP, 32(SP)
0x0068 00104 (a.go:6) PCDATA $0, $0
0x0068 00104 (a.go:6) CALL runtime.growslice(SB)
0x006d 00109 (a.go:6) MOVQ 40(SP), CX
0x0072 00114 (a.go:6) MOVQ 48(SP), DX
0x0077 00119 (a.go:6) MOVQ DX, "".autotmp_0+64(SP)
0x007c 00124 (a.go:6) MOVQ 56(SP), BX
0x0081 00129 (a.go:6) MOVQ BX, "".x+16(SB)
0x0088 00136 (a.go:6) MOVL runtime.writeBarrier(SB), AX
0x008e 00142 (a.go:6) TESTB AL, AL
0x0090 00144 (a.go:6) JNE $0, 162
0x0092 00146 (a.go:6) MOVQ CX, "".x(SB)
0x0099 00153 (a.go:6) MOVQ "".x(SB), BX
0x00a0 00160 (a.go:6) JMP 53
0x00a2 00162 (a.go:6) LEAQ "".x(SB), BX
0x00a9 00169 (a.go:6) MOVQ BX, (SP)
0x00ad 00173 (a.go:6) MOVQ CX, 8(SP)
0x00b2 00178 (a.go:6) PCDATA $0, $0
0x00b2 00178 (a.go:6) CALL runtime.writebarrierptr(SB)
0x00b7 00183 (a.go:6) MOVQ "".autotmp_0+64(SP), DX
0x00bc 00188 (a.go:6) JMP 153
0x00be 00190 (a.go:6) NOP
0x00be 00190 (a.go:5) CALL runtime.morestack_noctxt(SB)
0x00c3 00195 (a.go:5) JMP 0
Fixes #14969 again
Change-Id: Ia50463b1f506011aad0718a4fef1d4738e43c32d
Reviewed-on: https://go-review.googlesource.com/22197
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-18 09:40:30 -07:00
|
|
|
delete(s.vars, &lenVar)
|
cmd/compile: avoid write barrier in append fast path
When we are writing the result of an append back
to the same slice, we don’t need a write barrier
on the fast path.
This re-implements an optimization that was present
in the old backend.
Updates #14921
Fixes #14969
Sample code:
var x []byte
func p() {
x = append(x, 1, 2, 3)
}
Before:
"".p t=1 size=224 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 199
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x(SB), DX
0x0025 00037 (append.go:19) MOVQ "".x+8(SB), BX
0x002c 00044 (append.go:19) MOVQ BX, "".autotmp_0+64(SP)
0x0031 00049 (append.go:22) LEAQ 3(BX), BP
0x0035 00053 (append.go:22) CMPQ BP, CX
0x0038 00056 (append.go:22) JGT $0, 131
0x003a 00058 (append.go:22) MOVB $1, (DX)(BX*1)
0x003e 00062 (append.go:22) MOVB $2, 1(DX)(BX*1)
0x0043 00067 (append.go:22) MOVB $3, 2(DX)(BX*1)
0x0048 00072 (append.go:22) MOVQ BP, "".x+8(SB)
0x004f 00079 (append.go:22) MOVQ CX, "".x+16(SB)
0x0056 00086 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x005c 00092 (append.go:22) TESTB AL, AL
0x005e 00094 (append.go:22) JNE $0, 108
0x0060 00096 (append.go:22) MOVQ DX, "".x(SB)
0x0067 00103 (append.go:23) ADDQ $72, SP
0x006b 00107 (append.go:23) RET
0x006c 00108 (append.go:22) LEAQ "".x(SB), CX
0x0073 00115 (append.go:22) MOVQ CX, (SP)
0x0077 00119 (append.go:22) MOVQ DX, 8(SP)
0x007c 00124 (append.go:22) PCDATA $0, $0
0x007c 00124 (append.go:22) CALL runtime.writebarrierptr(SB)
0x0081 00129 (append.go:23) JMP 103
0x0083 00131 (append.go:22) LEAQ type.[]uint8(SB), AX
0x008a 00138 (append.go:22) MOVQ AX, (SP)
0x008e 00142 (append.go:22) MOVQ DX, 8(SP)
0x0093 00147 (append.go:22) MOVQ BX, 16(SP)
0x0098 00152 (append.go:22) MOVQ CX, 24(SP)
0x009d 00157 (append.go:22) MOVQ BP, 32(SP)
0x00a2 00162 (append.go:22) PCDATA $0, $0
0x00a2 00162 (append.go:22) CALL runtime.growslice(SB)
0x00a7 00167 (append.go:22) MOVQ 40(SP), DX
0x00ac 00172 (append.go:22) MOVQ 48(SP), AX
0x00b1 00177 (append.go:22) MOVQ 56(SP), CX
0x00b6 00182 (append.go:22) ADDQ $3, AX
0x00ba 00186 (append.go:19) MOVQ "".autotmp_0+64(SP), BX
0x00bf 00191 (append.go:22) MOVQ AX, BP
0x00c2 00194 (append.go:22) JMP 58
0x00c7 00199 (append.go:22) NOP
0x00c7 00199 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00cc 00204 (append.go:21) JMP 0
After:
"".p t=1 size=208 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 191
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x+8(SB), DX
0x0025 00037 (append.go:19) MOVQ DX, "".autotmp_0+64(SP)
0x002a 00042 (append.go:19) MOVQ "".x(SB), BX
0x0031 00049 (append.go:22) LEAQ 3(DX), BP
0x0035 00053 (append.go:22) MOVQ BP, "".x+8(SB)
0x003c 00060 (append.go:22) CMPQ BP, CX
0x003f 00063 (append.go:22) JGT $0, 84
0x0041 00065 (append.go:22) MOVB $1, (BX)(DX*1)
0x0045 00069 (append.go:22) MOVB $2, 1(BX)(DX*1)
0x004a 00074 (append.go:22) MOVB $3, 2(BX)(DX*1)
0x004f 00079 (append.go:23) ADDQ $72, SP
0x0053 00083 (append.go:23) RET
0x0054 00084 (append.go:22) LEAQ type.[]uint8(SB), AX
0x005b 00091 (append.go:22) MOVQ AX, (SP)
0x005f 00095 (append.go:22) MOVQ BX, 8(SP)
0x0064 00100 (append.go:22) MOVQ DX, 16(SP)
0x0069 00105 (append.go:22) MOVQ CX, 24(SP)
0x006e 00110 (append.go:22) MOVQ BP, 32(SP)
0x0073 00115 (append.go:22) PCDATA $0, $0
0x0073 00115 (append.go:22) CALL runtime.growslice(SB)
0x0078 00120 (append.go:22) MOVQ 40(SP), CX
0x007d 00125 (append.go:22) MOVQ 56(SP), AX
0x0082 00130 (append.go:22) MOVQ AX, "".x+16(SB)
0x0089 00137 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x008f 00143 (append.go:22) TESTB AL, AL
0x0091 00145 (append.go:22) JNE $0, 168
0x0093 00147 (append.go:22) MOVQ CX, "".x(SB)
0x009a 00154 (append.go:22) MOVQ "".x(SB), BX
0x00a1 00161 (append.go:19) MOVQ "".autotmp_0+64(SP), DX
0x00a6 00166 (append.go:22) JMP 65
0x00a8 00168 (append.go:22) LEAQ "".x(SB), DX
0x00af 00175 (append.go:22) MOVQ DX, (SP)
0x00b3 00179 (append.go:22) MOVQ CX, 8(SP)
0x00b8 00184 (append.go:22) PCDATA $0, $0
0x00b8 00184 (append.go:22) CALL runtime.writebarrierptr(SB)
0x00bd 00189 (append.go:22) JMP 154
0x00bf 00191 (append.go:22) NOP
0x00bf 00191 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00c4 00196 (append.go:21) JMP 0
Change-Id: I77a41ad3a22557a4bb4654de7d6d24a029efe34a
Reviewed-on: https://go-review.googlesource.com/21813
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-10 09:44:17 -07:00
|
|
|
return nil
|
|
|
|
}
|
cmd/compile: avoid a spill in append fast path
Instead of spilling newlen, recalculate it.
This removes a spill from the fast path,
at the cost of a cheap recalculation
on the (rare) growth path.
This uses 8 bytes less of stack space.
It generates two more bytes of code,
but that is due to suboptimal register allocation;
see far below.
Runtime append microbenchmarks are all over the map,
presumably due to incidental code movement.
Sample code:
func s(b []byte) []byte {
b = append(b, 1, 2, 3)
return b
}
Before:
"".s t=1 size=160 args=0x30 locals=0x48
0x0000 00000 (append.go:8) TEXT "".s(SB), $72-48
0x0000 00000 (append.go:8) MOVQ (TLS), CX
0x0009 00009 (append.go:8) CMPQ SP, 16(CX)
0x000d 00013 (append.go:8) JLS 149
0x0013 00019 (append.go:8) SUBQ $72, SP
0x0017 00023 (append.go:8) FUNCDATA $0, gclocals·6432f8c6a0d23fa7bee6c5d96f21a92a(SB)
0x0017 00023 (append.go:8) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:9) MOVQ "".b+88(FP), CX
0x001c 00028 (append.go:9) LEAQ 3(CX), DX
0x0020 00032 (append.go:9) MOVQ DX, "".autotmp_0+64(SP)
0x0025 00037 (append.go:9) MOVQ "".b+96(FP), BX
0x002a 00042 (append.go:9) CMPQ DX, BX
0x002d 00045 (append.go:9) JGT $0, 86
0x002f 00047 (append.go:8) MOVQ "".b+80(FP), AX
0x0034 00052 (append.go:9) MOVB $1, (AX)(CX*1)
0x0038 00056 (append.go:9) MOVB $2, 1(AX)(CX*1)
0x003d 00061 (append.go:9) MOVB $3, 2(AX)(CX*1)
0x0042 00066 (append.go:10) MOVQ AX, "".~r1+104(FP)
0x0047 00071 (append.go:10) MOVQ DX, "".~r1+112(FP)
0x004c 00076 (append.go:10) MOVQ BX, "".~r1+120(FP)
0x0051 00081 (append.go:10) ADDQ $72, SP
0x0055 00085 (append.go:10) RET
0x0056 00086 (append.go:9) LEAQ type.[]uint8(SB), AX
0x005d 00093 (append.go:9) MOVQ AX, (SP)
0x0061 00097 (append.go:9) MOVQ "".b+80(FP), BP
0x0066 00102 (append.go:9) MOVQ BP, 8(SP)
0x006b 00107 (append.go:9) MOVQ CX, 16(SP)
0x0070 00112 (append.go:9) MOVQ BX, 24(SP)
0x0075 00117 (append.go:9) MOVQ DX, 32(SP)
0x007a 00122 (append.go:9) PCDATA $0, $0
0x007a 00122 (append.go:9) CALL runtime.growslice(SB)
0x007f 00127 (append.go:9) MOVQ 40(SP), AX
0x0084 00132 (append.go:9) MOVQ 56(SP), BX
0x0089 00137 (append.go:8) MOVQ "".b+88(FP), CX
0x008e 00142 (append.go:9) MOVQ "".autotmp_0+64(SP), DX
0x0093 00147 (append.go:9) JMP 52
0x0095 00149 (append.go:9) NOP
0x0095 00149 (append.go:8) CALL runtime.morestack_noctxt(SB)
0x009a 00154 (append.go:8) JMP 0
After:
"".s t=1 size=176 args=0x30 locals=0x40
0x0000 00000 (append.go:8) TEXT "".s(SB), $64-48
0x0000 00000 (append.go:8) MOVQ (TLS), CX
0x0009 00009 (append.go:8) CMPQ SP, 16(CX)
0x000d 00013 (append.go:8) JLS 151
0x0013 00019 (append.go:8) SUBQ $64, SP
0x0017 00023 (append.go:8) FUNCDATA $0, gclocals·6432f8c6a0d23fa7bee6c5d96f21a92a(SB)
0x0017 00023 (append.go:8) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:9) MOVQ "".b+80(FP), CX
0x001c 00028 (append.go:9) LEAQ 3(CX), DX
0x0020 00032 (append.go:9) MOVQ "".b+88(FP), BX
0x0025 00037 (append.go:9) CMPQ DX, BX
0x0028 00040 (append.go:9) JGT $0, 81
0x002a 00042 (append.go:8) MOVQ "".b+72(FP), AX
0x002f 00047 (append.go:9) MOVB $1, (AX)(CX*1)
0x0033 00051 (append.go:9) MOVB $2, 1(AX)(CX*1)
0x0038 00056 (append.go:9) MOVB $3, 2(AX)(CX*1)
0x003d 00061 (append.go:10) MOVQ AX, "".~r1+96(FP)
0x0042 00066 (append.go:10) MOVQ DX, "".~r1+104(FP)
0x0047 00071 (append.go:10) MOVQ BX, "".~r1+112(FP)
0x004c 00076 (append.go:10) ADDQ $64, SP
0x0050 00080 (append.go:10) RET
0x0051 00081 (append.go:9) LEAQ type.[]uint8(SB), AX
0x0058 00088 (append.go:9) MOVQ AX, (SP)
0x005c 00092 (append.go:9) MOVQ "".b+72(FP), BP
0x0061 00097 (append.go:9) MOVQ BP, 8(SP)
0x0066 00102 (append.go:9) MOVQ CX, 16(SP)
0x006b 00107 (append.go:9) MOVQ BX, 24(SP)
0x0070 00112 (append.go:9) MOVQ DX, 32(SP)
0x0075 00117 (append.go:9) PCDATA $0, $0
0x0075 00117 (append.go:9) CALL runtime.growslice(SB)
0x007a 00122 (append.go:9) MOVQ 40(SP), AX
0x007f 00127 (append.go:9) MOVQ 48(SP), CX
0x0084 00132 (append.go:9) MOVQ 56(SP), BX
0x0089 00137 (append.go:9) ADDQ $3, CX
0x008d 00141 (append.go:9) MOVQ CX, DX
0x0090 00144 (append.go:8) MOVQ "".b+80(FP), CX
0x0095 00149 (append.go:9) JMP 47
0x0097 00151 (append.go:9) NOP
0x0097 00151 (append.go:8) CALL runtime.morestack_noctxt(SB)
0x009c 00156 (append.go:8) JMP 0
Observe that in the following sequence,
we should use DX directly instead of using
CX as a temporary register, which would make
the new code a strict improvement on the old:
0x007f 00127 (append.go:9) MOVQ 48(SP), CX
0x0084 00132 (append.go:9) MOVQ 56(SP), BX
0x0089 00137 (append.go:9) ADDQ $3, CX
0x008d 00141 (append.go:9) MOVQ CX, DX
0x0090 00144 (append.go:8) MOVQ "".b+80(FP), CX
Change-Id: I4ee50b18fa53865901d2d7f86c2cbb54c6fa6924
Reviewed-on: https://go-review.googlesource.com/21812
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-10 09:08:00 -07:00
|
|
|
delete(s.vars, &newlenVar)
|
2016-04-04 10:58:21 -07:00
|
|
|
delete(s.vars, &capVar)
|
cmd/compile: avoid write barrier in append fast path
When we are writing the result of an append back
to the same slice, we don’t need a write barrier
on the fast path.
This re-implements an optimization that was present
in the old backend.
Updates #14921
Fixes #14969
Sample code:
var x []byte
func p() {
x = append(x, 1, 2, 3)
}
Before:
"".p t=1 size=224 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 199
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x(SB), DX
0x0025 00037 (append.go:19) MOVQ "".x+8(SB), BX
0x002c 00044 (append.go:19) MOVQ BX, "".autotmp_0+64(SP)
0x0031 00049 (append.go:22) LEAQ 3(BX), BP
0x0035 00053 (append.go:22) CMPQ BP, CX
0x0038 00056 (append.go:22) JGT $0, 131
0x003a 00058 (append.go:22) MOVB $1, (DX)(BX*1)
0x003e 00062 (append.go:22) MOVB $2, 1(DX)(BX*1)
0x0043 00067 (append.go:22) MOVB $3, 2(DX)(BX*1)
0x0048 00072 (append.go:22) MOVQ BP, "".x+8(SB)
0x004f 00079 (append.go:22) MOVQ CX, "".x+16(SB)
0x0056 00086 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x005c 00092 (append.go:22) TESTB AL, AL
0x005e 00094 (append.go:22) JNE $0, 108
0x0060 00096 (append.go:22) MOVQ DX, "".x(SB)
0x0067 00103 (append.go:23) ADDQ $72, SP
0x006b 00107 (append.go:23) RET
0x006c 00108 (append.go:22) LEAQ "".x(SB), CX
0x0073 00115 (append.go:22) MOVQ CX, (SP)
0x0077 00119 (append.go:22) MOVQ DX, 8(SP)
0x007c 00124 (append.go:22) PCDATA $0, $0
0x007c 00124 (append.go:22) CALL runtime.writebarrierptr(SB)
0x0081 00129 (append.go:23) JMP 103
0x0083 00131 (append.go:22) LEAQ type.[]uint8(SB), AX
0x008a 00138 (append.go:22) MOVQ AX, (SP)
0x008e 00142 (append.go:22) MOVQ DX, 8(SP)
0x0093 00147 (append.go:22) MOVQ BX, 16(SP)
0x0098 00152 (append.go:22) MOVQ CX, 24(SP)
0x009d 00157 (append.go:22) MOVQ BP, 32(SP)
0x00a2 00162 (append.go:22) PCDATA $0, $0
0x00a2 00162 (append.go:22) CALL runtime.growslice(SB)
0x00a7 00167 (append.go:22) MOVQ 40(SP), DX
0x00ac 00172 (append.go:22) MOVQ 48(SP), AX
0x00b1 00177 (append.go:22) MOVQ 56(SP), CX
0x00b6 00182 (append.go:22) ADDQ $3, AX
0x00ba 00186 (append.go:19) MOVQ "".autotmp_0+64(SP), BX
0x00bf 00191 (append.go:22) MOVQ AX, BP
0x00c2 00194 (append.go:22) JMP 58
0x00c7 00199 (append.go:22) NOP
0x00c7 00199 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00cc 00204 (append.go:21) JMP 0
After:
"".p t=1 size=208 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 191
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x+8(SB), DX
0x0025 00037 (append.go:19) MOVQ DX, "".autotmp_0+64(SP)
0x002a 00042 (append.go:19) MOVQ "".x(SB), BX
0x0031 00049 (append.go:22) LEAQ 3(DX), BP
0x0035 00053 (append.go:22) MOVQ BP, "".x+8(SB)
0x003c 00060 (append.go:22) CMPQ BP, CX
0x003f 00063 (append.go:22) JGT $0, 84
0x0041 00065 (append.go:22) MOVB $1, (BX)(DX*1)
0x0045 00069 (append.go:22) MOVB $2, 1(BX)(DX*1)
0x004a 00074 (append.go:22) MOVB $3, 2(BX)(DX*1)
0x004f 00079 (append.go:23) ADDQ $72, SP
0x0053 00083 (append.go:23) RET
0x0054 00084 (append.go:22) LEAQ type.[]uint8(SB), AX
0x005b 00091 (append.go:22) MOVQ AX, (SP)
0x005f 00095 (append.go:22) MOVQ BX, 8(SP)
0x0064 00100 (append.go:22) MOVQ DX, 16(SP)
0x0069 00105 (append.go:22) MOVQ CX, 24(SP)
0x006e 00110 (append.go:22) MOVQ BP, 32(SP)
0x0073 00115 (append.go:22) PCDATA $0, $0
0x0073 00115 (append.go:22) CALL runtime.growslice(SB)
0x0078 00120 (append.go:22) MOVQ 40(SP), CX
0x007d 00125 (append.go:22) MOVQ 56(SP), AX
0x0082 00130 (append.go:22) MOVQ AX, "".x+16(SB)
0x0089 00137 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x008f 00143 (append.go:22) TESTB AL, AL
0x0091 00145 (append.go:22) JNE $0, 168
0x0093 00147 (append.go:22) MOVQ CX, "".x(SB)
0x009a 00154 (append.go:22) MOVQ "".x(SB), BX
0x00a1 00161 (append.go:19) MOVQ "".autotmp_0+64(SP), DX
0x00a6 00166 (append.go:22) JMP 65
0x00a8 00168 (append.go:22) LEAQ "".x(SB), DX
0x00af 00175 (append.go:22) MOVQ DX, (SP)
0x00b3 00179 (append.go:22) MOVQ CX, 8(SP)
0x00b8 00184 (append.go:22) PCDATA $0, $0
0x00b8 00184 (append.go:22) CALL runtime.writebarrierptr(SB)
0x00bd 00189 (append.go:22) JMP 154
0x00bf 00191 (append.go:22) NOP
0x00bf 00191 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00c4 00196 (append.go:21) JMP 0
Change-Id: I77a41ad3a22557a4bb4654de7d6d24a029efe34a
Reviewed-on: https://go-review.googlesource.com/21813
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-10 09:44:17 -07:00
|
|
|
// make result
|
2016-04-04 10:58:21 -07:00
|
|
|
return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c)
|
2015-04-15 15:51:25 -07:00
|
|
|
}
|
|
|
|
|
2015-11-02 16:56:53 -08:00
|
|
|
// condBranch evaluates the boolean expression cond and branches to yes
|
|
|
|
// if cond is true and no if cond is false.
|
|
|
|
// This function is intended to handle && and || better than just calling
|
|
|
|
// s.expr(cond) and branching on the result.
|
|
|
|
func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) {
|
|
|
|
if cond.Op == OANDAND {
|
|
|
|
mid := s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
s.stmtList(cond.Ninit)
|
|
|
|
s.condBranch(cond.Left, mid, no, max8(likely, 0))
|
|
|
|
s.startBlock(mid)
|
|
|
|
s.condBranch(cond.Right, yes, no, likely)
|
|
|
|
return
|
|
|
|
// Note: if likely==1, then both recursive calls pass 1.
|
|
|
|
// If likely==-1, then we don't have enough information to decide
|
2016-03-01 23:21:55 +00:00
|
|
|
// whether the first branch is likely or not. So we pass 0 for
|
2015-11-02 16:56:53 -08:00
|
|
|
// the likeliness of the first branch.
|
|
|
|
// TODO: have the frontend give us branch prediction hints for
|
|
|
|
// OANDAND and OOROR nodes (if it ever has such info).
|
|
|
|
}
|
|
|
|
if cond.Op == OOROR {
|
|
|
|
mid := s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
s.stmtList(cond.Ninit)
|
|
|
|
s.condBranch(cond.Left, yes, mid, min8(likely, 0))
|
|
|
|
s.startBlock(mid)
|
|
|
|
s.condBranch(cond.Right, yes, no, likely)
|
|
|
|
return
|
|
|
|
// Note: if likely==-1, then both recursive calls pass -1.
|
|
|
|
// If likely==1, then we don't have enough info to decide
|
|
|
|
// the likelihood of the first branch.
|
|
|
|
}
|
2015-11-03 09:30:17 -08:00
|
|
|
if cond.Op == ONOT {
|
|
|
|
s.stmtList(cond.Ninit)
|
|
|
|
s.condBranch(cond.Left, no, yes, -likely)
|
|
|
|
return
|
|
|
|
}
|
2015-11-02 16:56:53 -08:00
|
|
|
c := s.expr(cond)
|
|
|
|
b := s.endBlock()
|
|
|
|
b.Kind = ssa.BlockIf
|
2016-03-15 20:45:50 -07:00
|
|
|
b.SetControl(c)
|
2015-11-02 16:56:53 -08:00
|
|
|
b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness
|
|
|
|
b.AddEdgeTo(yes)
|
|
|
|
b.AddEdgeTo(no)
|
|
|
|
}
|
|
|
|
|
2016-03-21 10:22:03 -07:00
|
|
|
type skipMask uint8
|
|
|
|
|
|
|
|
const (
|
|
|
|
skipPtr skipMask = 1 << iota
|
|
|
|
skipLen
|
|
|
|
skipCap
|
|
|
|
)
|
|
|
|
|
2016-01-25 17:06:54 -08:00
|
|
|
// assign does left = right.
|
|
|
|
// Right has already been evaluated to ssa, left has not.
|
|
|
|
// If deref is true, then we do left = *right instead (and right has already been nil-checked).
|
|
|
|
// If deref is true and right == nil, just do left = 0.
|
2016-03-21 10:22:03 -07:00
|
|
|
// skip indicates assignments (at the top level) that can be avoided.
|
2017-02-10 10:15:10 -05:00
|
|
|
func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask) {
|
2015-08-14 21:47:20 -07:00
|
|
|
if left.Op == ONAME && isblank(left) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
t := left.Type
|
|
|
|
dowidth(t)
|
2016-02-27 17:49:31 -08:00
|
|
|
if s.canSSA(left) {
|
2016-01-25 17:06:54 -08:00
|
|
|
if deref {
|
2016-09-09 21:08:46 -07:00
|
|
|
s.Fatalf("can SSA LHS %v but not RHS %s", left, right)
|
2016-01-25 17:06:54 -08:00
|
|
|
}
|
2016-01-11 21:05:33 -08:00
|
|
|
if left.Op == ODOT {
|
|
|
|
// We're assigning to a field of an ssa-able value.
|
|
|
|
// We need to build a new structure with the new value for the
|
|
|
|
// field we're assigning and the old values for the other fields.
|
|
|
|
// For instance:
|
|
|
|
// type T struct {a, b, c int}
|
|
|
|
// var T x
|
|
|
|
// x.b = 5
|
|
|
|
// For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c}
|
|
|
|
|
|
|
|
// Grab information about the structure type.
|
|
|
|
t := left.Left.Type
|
|
|
|
nf := t.NumFields()
|
|
|
|
idx := fieldIdx(left)
|
|
|
|
|
|
|
|
// Grab old value of structure.
|
|
|
|
old := s.expr(left.Left)
|
|
|
|
|
|
|
|
// Make new structure.
|
|
|
|
new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t)
|
|
|
|
|
|
|
|
// Add fields as args.
|
2016-03-14 12:45:18 -07:00
|
|
|
for i := 0; i < nf; i++ {
|
2016-01-11 21:05:33 -08:00
|
|
|
if i == idx {
|
|
|
|
new.AddArg(right)
|
|
|
|
} else {
|
2016-03-14 12:45:18 -07:00
|
|
|
new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old))
|
2016-01-11 21:05:33 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Recursively assign the new value we've made to the base of the dot op.
|
2017-02-10 10:15:10 -05:00
|
|
|
s.assign(left.Left, new, false, 0)
|
2016-01-11 21:05:33 -08:00
|
|
|
// TODO: do we need to update named values here?
|
|
|
|
return
|
|
|
|
}
|
2016-10-30 21:10:03 -07:00
|
|
|
if left.Op == OINDEX && left.Left.Type.IsArray() {
|
|
|
|
// We're assigning to an element of an ssa-able array.
|
|
|
|
// a[i] = v
|
|
|
|
t := left.Left.Type
|
|
|
|
n := t.NumElem()
|
|
|
|
|
|
|
|
i := s.expr(left.Right) // index
|
|
|
|
if n == 0 {
|
|
|
|
// The bounds check must fail. Might as well
|
|
|
|
// ignore the actual index and just use zeros.
|
|
|
|
z := s.constInt(Types[TINT], 0)
|
|
|
|
s.boundsCheck(z, z)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if n != 1 {
|
|
|
|
s.Fatalf("assigning to non-1-length array")
|
|
|
|
}
|
|
|
|
// Rewrite to a = [1]{v}
|
|
|
|
i = s.extendIndex(i, panicindex)
|
|
|
|
s.boundsCheck(i, s.constInt(Types[TINT], 1))
|
|
|
|
v := s.newValue1(ssa.OpArrayMake1, t, right)
|
2017-02-10 10:15:10 -05:00
|
|
|
s.assign(left.Left, v, false, 0)
|
2016-10-30 21:10:03 -07:00
|
|
|
return
|
|
|
|
}
|
2015-06-12 14:23:29 +01:00
|
|
|
// Update variable assignment.
|
2015-08-29 14:54:45 -07:00
|
|
|
s.vars[left] = right
|
2015-10-22 14:22:38 -07:00
|
|
|
s.addNamedValue(left, right)
|
2015-06-12 14:23:29 +01:00
|
|
|
return
|
|
|
|
}
|
2016-03-01 23:21:55 +00:00
|
|
|
// Left is not ssa-able. Compute its address.
|
2017-02-02 19:47:59 -05:00
|
|
|
addr := s.addr(left, false)
|
2016-04-29 12:09:32 -07:00
|
|
|
if left.Op == ONAME && skip == 0 {
|
2015-09-17 16:45:10 -07:00
|
|
|
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, left, s.mem())
|
2015-08-24 02:16:19 -07:00
|
|
|
}
|
2017-02-05 23:43:31 -05:00
|
|
|
if isReflectHeaderDataField(left) {
|
|
|
|
// Package unsafe's documentation says storing pointers into
|
|
|
|
// reflect.SliceHeader and reflect.StringHeader's Data fields
|
|
|
|
// is valid, even though they have type uintptr (#19168).
|
|
|
|
// Mark it pointer type to signal the writebarrier pass to
|
|
|
|
// insert a write barrier.
|
|
|
|
t = Types[TUNSAFEPTR]
|
|
|
|
}
|
2016-01-25 17:06:54 -08:00
|
|
|
if deref {
|
|
|
|
// Treat as a mem->mem move.
|
2017-02-10 10:15:10 -05:00
|
|
|
var store *ssa.Value
|
2016-10-18 11:06:28 -04:00
|
|
|
if right == nil {
|
2017-03-13 21:51:08 -04:00
|
|
|
store = s.newValue2I(ssa.OpZero, ssa.TypeMem, t.Size(), addr, s.mem())
|
2017-02-10 10:15:10 -05:00
|
|
|
} else {
|
2017-03-13 21:51:08 -04:00
|
|
|
store = s.newValue3I(ssa.OpMove, ssa.TypeMem, t.Size(), addr, right, s.mem())
|
2016-10-18 11:06:28 -04:00
|
|
|
}
|
2017-02-09 09:46:44 -05:00
|
|
|
store.Aux = t
|
|
|
|
s.vars[&memVar] = store
|
2016-01-25 17:06:54 -08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
// Treat as a store.
|
2017-02-10 10:15:10 -05:00
|
|
|
s.storeType(t, addr, right, skip)
|
2015-06-12 14:23:29 +01:00
|
|
|
}
|
|
|
|
|
2015-07-20 15:30:52 -07:00
|
|
|
// zeroVal returns the zero value for type t.
|
|
|
|
func (s *state) zeroVal(t *Type) *ssa.Value {
|
|
|
|
switch {
|
2015-07-28 14:19:20 -07:00
|
|
|
case t.IsInteger():
|
|
|
|
switch t.Size() {
|
|
|
|
case 1:
|
|
|
|
return s.constInt8(t, 0)
|
|
|
|
case 2:
|
|
|
|
return s.constInt16(t, 0)
|
|
|
|
case 4:
|
|
|
|
return s.constInt32(t, 0)
|
|
|
|
case 8:
|
|
|
|
return s.constInt64(t, 0)
|
|
|
|
default:
|
2016-09-09 21:08:46 -07:00
|
|
|
s.Fatalf("bad sized integer type %v", t)
|
2015-07-28 14:19:20 -07:00
|
|
|
}
|
2015-08-25 19:21:45 -05:00
|
|
|
case t.IsFloat():
|
|
|
|
switch t.Size() {
|
|
|
|
case 4:
|
|
|
|
return s.constFloat32(t, 0)
|
|
|
|
case 8:
|
|
|
|
return s.constFloat64(t, 0)
|
|
|
|
default:
|
2016-09-09 21:08:46 -07:00
|
|
|
s.Fatalf("bad sized float type %v", t)
|
2015-08-25 19:21:45 -05:00
|
|
|
}
|
2015-08-28 14:24:10 -04:00
|
|
|
case t.IsComplex():
|
|
|
|
switch t.Size() {
|
|
|
|
case 8:
|
|
|
|
z := s.constFloat32(Types[TFLOAT32], 0)
|
2015-08-28 13:52:26 -07:00
|
|
|
return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
|
2015-08-28 14:24:10 -04:00
|
|
|
case 16:
|
|
|
|
z := s.constFloat64(Types[TFLOAT64], 0)
|
2015-08-28 13:52:26 -07:00
|
|
|
return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
|
2015-08-28 14:24:10 -04:00
|
|
|
default:
|
2016-09-09 21:08:46 -07:00
|
|
|
s.Fatalf("bad sized complex type %v", t)
|
2015-08-28 14:24:10 -04:00
|
|
|
}
|
|
|
|
|
2015-07-20 15:30:52 -07:00
|
|
|
case t.IsString():
|
2016-03-06 18:06:09 -08:00
|
|
|
return s.constEmptyString(t)
|
2016-03-28 10:55:44 -07:00
|
|
|
case t.IsPtrShaped():
|
2016-03-06 18:06:09 -08:00
|
|
|
return s.constNil(t)
|
2015-07-20 15:30:52 -07:00
|
|
|
case t.IsBoolean():
|
2015-09-08 16:52:25 -07:00
|
|
|
return s.constBool(false)
|
2015-08-18 10:26:28 -07:00
|
|
|
case t.IsInterface():
|
2016-03-06 18:06:09 -08:00
|
|
|
return s.constInterface(t)
|
2015-08-18 10:26:28 -07:00
|
|
|
case t.IsSlice():
|
2016-03-06 18:06:09 -08:00
|
|
|
return s.constSlice(t)
|
2016-01-11 21:05:33 -08:00
|
|
|
case t.IsStruct():
|
|
|
|
n := t.NumFields()
|
|
|
|
v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t)
|
2016-03-14 12:45:18 -07:00
|
|
|
for i := 0; i < n; i++ {
|
2016-01-11 21:05:33 -08:00
|
|
|
v.AddArg(s.zeroVal(t.FieldType(i).(*Type)))
|
|
|
|
}
|
|
|
|
return v
|
2016-10-30 21:10:03 -07:00
|
|
|
case t.IsArray():
|
|
|
|
switch t.NumElem() {
|
|
|
|
case 0:
|
|
|
|
return s.entryNewValue0(ssa.OpArrayMake0, t)
|
|
|
|
case 1:
|
|
|
|
return s.entryNewValue1(ssa.OpArrayMake1, t, s.zeroVal(t.Elem()))
|
|
|
|
}
|
2015-07-20 15:30:52 -07:00
|
|
|
}
|
2016-09-14 10:01:05 -07:00
|
|
|
s.Fatalf("zero for type %v not implemented", t)
|
2015-07-20 15:30:52 -07:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-09-09 23:56:59 -07:00
|
|
|
type callKind int8
|
|
|
|
|
|
|
|
const (
|
|
|
|
callNormal callKind = iota
|
|
|
|
callDefer
|
|
|
|
callGo
|
|
|
|
)
|
|
|
|
|
2017-03-14 13:25:12 -07:00
|
|
|
var intrinsics map[intrinsicKey]intrinsicBuilder
|
2016-08-29 20:28:20 -07:00
|
|
|
|
|
|
|
// An intrinsicBuilder converts a call node n into an ssa value that
|
2016-11-01 15:28:10 -07:00
|
|
|
// implements that call as an intrinsic. args is a list of arguments to the func.
|
|
|
|
type intrinsicBuilder func(s *state, n *Node, args []*ssa.Value) *ssa.Value
|
2016-08-29 20:28:20 -07:00
|
|
|
|
|
|
|
type intrinsicKey struct {
|
2017-03-14 13:25:12 -07:00
|
|
|
arch *sys.Arch
|
2016-08-29 20:28:20 -07:00
|
|
|
pkg string
|
|
|
|
fn string
|
|
|
|
}
|
|
|
|
|
2017-03-14 13:25:12 -07:00
|
|
|
func init() {
|
|
|
|
intrinsics = map[intrinsicKey]intrinsicBuilder{}
|
|
|
|
|
|
|
|
var all []*sys.Arch
|
|
|
|
var i4 []*sys.Arch
|
|
|
|
var i8 []*sys.Arch
|
|
|
|
var p4 []*sys.Arch
|
|
|
|
var p8 []*sys.Arch
|
|
|
|
for _, a := range sys.Archs {
|
|
|
|
all = append(all, a)
|
|
|
|
if a.IntSize == 4 {
|
|
|
|
i4 = append(i4, a)
|
|
|
|
} else {
|
|
|
|
i8 = append(i8, a)
|
|
|
|
}
|
|
|
|
if a.PtrSize == 4 {
|
|
|
|
p4 = append(p4, a)
|
|
|
|
} else {
|
|
|
|
p8 = append(p8, a)
|
|
|
|
}
|
2016-09-10 22:44:00 +02:00
|
|
|
}
|
|
|
|
|
2017-03-14 13:25:12 -07:00
|
|
|
// add adds the intrinsic b for pkg.fn for the given list of architectures.
|
|
|
|
add := func(pkg, fn string, b intrinsicBuilder, archs ...*sys.Arch) {
|
|
|
|
for _, a := range archs {
|
|
|
|
intrinsics[intrinsicKey{a, pkg, fn}] = b
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// addF does the same as add but operates on architecture families.
|
|
|
|
addF := func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily) {
|
|
|
|
m := 0
|
|
|
|
for _, f := range archFamilies {
|
|
|
|
if f >= 32 {
|
|
|
|
panic("too many architecture families")
|
|
|
|
}
|
|
|
|
m |= 1 << uint(f)
|
|
|
|
}
|
|
|
|
for _, a := range all {
|
|
|
|
if m>>uint(a.Family)&1 != 0 {
|
|
|
|
intrinsics[intrinsicKey{a, pkg, fn}] = b
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// alias defines pkg.fn = pkg2.fn2 for all architectures in archs for which pkg2.fn2 exists.
|
|
|
|
alias := func(pkg, fn, pkg2, fn2 string, archs ...*sys.Arch) {
|
|
|
|
for _, a := range archs {
|
|
|
|
if b, ok := intrinsics[intrinsicKey{a, pkg2, fn2}]; ok {
|
|
|
|
intrinsics[intrinsicKey{a, pkg, fn}] = b
|
|
|
|
}
|
|
|
|
}
|
2016-08-29 16:26:57 -04:00
|
|
|
}
|
|
|
|
|
2017-03-14 13:25:12 -07:00
|
|
|
/******** runtime ********/
|
|
|
|
if !instrumenting {
|
|
|
|
add("runtime", "slicebytetostringtmp",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
|
|
|
// Compiler frontend optimizations emit OARRAYBYTESTRTMP nodes
|
|
|
|
// for the backend instead of slicebytetostringtmp calls
|
|
|
|
// when not instrumenting.
|
|
|
|
slice := args[0]
|
2017-03-18 10:16:03 -07:00
|
|
|
ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
|
2017-03-14 13:25:12 -07:00
|
|
|
len := s.newValue1(ssa.OpSliceLen, Types[TINT], slice)
|
|
|
|
return s.newValue2(ssa.OpStringMake, n.Type, ptr, len)
|
|
|
|
},
|
|
|
|
all...)
|
|
|
|
}
|
|
|
|
add("runtime", "KeepAlive",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
2017-03-18 10:16:03 -07:00
|
|
|
data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
|
2016-08-31 15:17:02 -07:00
|
|
|
s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, ssa.TypeMem, data, s.mem())
|
|
|
|
return nil
|
|
|
|
},
|
2017-03-14 13:25:12 -07:00
|
|
|
all...)
|
2016-09-10 22:44:00 +02:00
|
|
|
|
2017-03-14 13:25:12 -07:00
|
|
|
/******** runtime/internal/sys ********/
|
|
|
|
addF("runtime/internal/sys", "Ctz32",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
|
|
|
return s.newValue1(ssa.OpCtz32, Types[TINT], args[0])
|
|
|
|
},
|
|
|
|
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS)
|
|
|
|
addF("runtime/internal/sys", "Ctz64",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
|
|
|
return s.newValue1(ssa.OpCtz64, Types[TINT], args[0])
|
|
|
|
},
|
|
|
|
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS)
|
|
|
|
addF("runtime/internal/sys", "Bswap32",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
2016-11-01 15:28:10 -07:00
|
|
|
return s.newValue1(ssa.OpBswap32, Types[TUINT32], args[0])
|
2017-03-14 13:25:12 -07:00
|
|
|
},
|
|
|
|
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
|
|
|
|
addF("runtime/internal/sys", "Bswap64",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
2016-11-01 15:28:10 -07:00
|
|
|
return s.newValue1(ssa.OpBswap64, Types[TUINT64], args[0])
|
2017-03-14 13:25:12 -07:00
|
|
|
},
|
|
|
|
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
|
2016-08-29 20:28:20 -07:00
|
|
|
|
2017-03-14 13:25:12 -07:00
|
|
|
/******** runtime/internal/atomic ********/
|
|
|
|
addF("runtime/internal/atomic", "Load",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
2016-11-01 15:28:10 -07:00
|
|
|
v := s.newValue2(ssa.OpAtomicLoad32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), args[0], s.mem())
|
2016-08-29 20:28:20 -07:00
|
|
|
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
|
|
|
|
return s.newValue1(ssa.OpSelect0, Types[TUINT32], v)
|
2017-03-14 13:25:12 -07:00
|
|
|
},
|
|
|
|
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
|
|
|
|
|
|
|
|
addF("runtime/internal/atomic", "Load64",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
2016-11-01 15:28:10 -07:00
|
|
|
v := s.newValue2(ssa.OpAtomicLoad64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), args[0], s.mem())
|
2016-08-29 20:28:20 -07:00
|
|
|
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
|
|
|
|
return s.newValue1(ssa.OpSelect0, Types[TUINT64], v)
|
2017-03-14 13:25:12 -07:00
|
|
|
},
|
|
|
|
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
|
|
|
|
addF("runtime/internal/atomic", "Loadp",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
2017-03-18 10:16:03 -07:00
|
|
|
v := s.newValue2(ssa.OpAtomicLoadPtr, ssa.MakeTuple(s.f.Config.Types.BytePtr, ssa.TypeMem), args[0], s.mem())
|
2016-08-29 20:28:20 -07:00
|
|
|
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
|
2017-03-18 10:16:03 -07:00
|
|
|
return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v)
|
2017-03-14 13:25:12 -07:00
|
|
|
},
|
|
|
|
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
|
2016-08-29 20:28:20 -07:00
|
|
|
|
2017-03-14 13:25:12 -07:00
|
|
|
addF("runtime/internal/atomic", "Store",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
2016-11-01 15:28:10 -07:00
|
|
|
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, ssa.TypeMem, args[0], args[1], s.mem())
|
2016-08-29 20:28:20 -07:00
|
|
|
return nil
|
2017-03-14 13:25:12 -07:00
|
|
|
},
|
|
|
|
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
|
|
|
|
addF("runtime/internal/atomic", "Store64",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
2016-11-01 15:28:10 -07:00
|
|
|
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, ssa.TypeMem, args[0], args[1], s.mem())
|
2016-08-29 20:28:20 -07:00
|
|
|
return nil
|
2017-03-14 13:25:12 -07:00
|
|
|
},
|
|
|
|
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
|
|
|
|
addF("runtime/internal/atomic", "StorepNoWB",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
2016-11-01 15:28:10 -07:00
|
|
|
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, ssa.TypeMem, args[0], args[1], s.mem())
|
2016-08-29 20:28:20 -07:00
|
|
|
return nil
|
2017-03-14 13:25:12 -07:00
|
|
|
},
|
|
|
|
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS)
|
2016-08-29 20:28:20 -07:00
|
|
|
|
2017-03-14 13:25:12 -07:00
|
|
|
addF("runtime/internal/atomic", "Xchg",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
2016-11-01 15:28:10 -07:00
|
|
|
v := s.newValue3(ssa.OpAtomicExchange32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), args[0], args[1], s.mem())
|
2016-08-29 20:28:20 -07:00
|
|
|
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
|
|
|
|
return s.newValue1(ssa.OpSelect0, Types[TUINT32], v)
|
2017-03-14 13:25:12 -07:00
|
|
|
},
|
|
|
|
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
|
|
|
|
addF("runtime/internal/atomic", "Xchg64",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
2016-11-01 15:28:10 -07:00
|
|
|
v := s.newValue3(ssa.OpAtomicExchange64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), args[0], args[1], s.mem())
|
2016-08-29 20:28:20 -07:00
|
|
|
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
|
|
|
|
return s.newValue1(ssa.OpSelect0, Types[TUINT64], v)
|
2017-03-14 13:25:12 -07:00
|
|
|
},
|
|
|
|
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
|
2016-08-29 20:28:20 -07:00
|
|
|
|
2017-03-14 13:25:12 -07:00
|
|
|
addF("runtime/internal/atomic", "Xadd",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
2016-11-01 15:28:10 -07:00
|
|
|
v := s.newValue3(ssa.OpAtomicAdd32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), args[0], args[1], s.mem())
|
2016-08-29 20:28:20 -07:00
|
|
|
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
|
|
|
|
return s.newValue1(ssa.OpSelect0, Types[TUINT32], v)
|
2017-03-14 13:25:12 -07:00
|
|
|
},
|
|
|
|
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
|
|
|
|
addF("runtime/internal/atomic", "Xadd64",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
2016-11-01 15:28:10 -07:00
|
|
|
v := s.newValue3(ssa.OpAtomicAdd64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), args[0], args[1], s.mem())
|
2016-08-29 20:28:20 -07:00
|
|
|
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
|
|
|
|
return s.newValue1(ssa.OpSelect0, Types[TUINT64], v)
|
2017-03-14 13:25:12 -07:00
|
|
|
},
|
|
|
|
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
|
2016-08-29 20:28:20 -07:00
|
|
|
|
2017-03-14 13:25:12 -07:00
|
|
|
addF("runtime/internal/atomic", "Cas",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
2016-11-01 15:28:10 -07:00
|
|
|
v := s.newValue4(ssa.OpAtomicCompareAndSwap32, ssa.MakeTuple(Types[TBOOL], ssa.TypeMem), args[0], args[1], args[2], s.mem())
|
2016-08-29 20:28:20 -07:00
|
|
|
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
|
|
|
|
return s.newValue1(ssa.OpSelect0, Types[TBOOL], v)
|
2017-03-14 13:25:12 -07:00
|
|
|
},
|
|
|
|
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
|
|
|
|
addF("runtime/internal/atomic", "Cas64",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
2016-11-01 15:28:10 -07:00
|
|
|
v := s.newValue4(ssa.OpAtomicCompareAndSwap64, ssa.MakeTuple(Types[TBOOL], ssa.TypeMem), args[0], args[1], args[2], s.mem())
|
2016-08-29 20:28:20 -07:00
|
|
|
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
|
|
|
|
return s.newValue1(ssa.OpSelect0, Types[TBOOL], v)
|
2017-03-14 13:25:12 -07:00
|
|
|
},
|
|
|
|
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
|
2016-08-29 20:28:20 -07:00
|
|
|
|
2017-03-14 13:25:12 -07:00
|
|
|
addF("runtime/internal/atomic", "And8",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
2016-11-01 15:28:10 -07:00
|
|
|
s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd8, ssa.TypeMem, args[0], args[1], s.mem())
|
2016-08-29 20:28:20 -07:00
|
|
|
return nil
|
2017-03-14 13:25:12 -07:00
|
|
|
},
|
|
|
|
sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64)
|
|
|
|
addF("runtime/internal/atomic", "Or8",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
2016-11-01 15:28:10 -07:00
|
|
|
s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, ssa.TypeMem, args[0], args[1], s.mem())
|
2016-08-29 20:28:20 -07:00
|
|
|
return nil
|
2017-03-14 13:25:12 -07:00
|
|
|
},
|
|
|
|
sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64)
|
|
|
|
|
|
|
|
alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...)
|
|
|
|
alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...)
|
|
|
|
alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load", i4...)
|
|
|
|
alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load64", i8...)
|
|
|
|
alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...)
|
|
|
|
alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...)
|
|
|
|
alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...)
|
|
|
|
alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...)
|
|
|
|
alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...)
|
|
|
|
alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...)
|
|
|
|
alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...)
|
|
|
|
alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd64", p8...)
|
|
|
|
alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas", p4...)
|
|
|
|
alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas64", p8...)
|
|
|
|
alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas", p4...)
|
|
|
|
alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas64", p8...)
|
|
|
|
|
|
|
|
/******** math ********/
|
|
|
|
addF("math", "Sqrt",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
2016-11-02 17:20:22 +01:00
|
|
|
return s.newValue1(ssa.OpSqrt, Types[TFLOAT64], args[0])
|
2017-03-14 13:25:12 -07:00
|
|
|
},
|
|
|
|
sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X)
|
2016-08-29 20:28:20 -07:00
|
|
|
|
2017-03-14 13:25:12 -07:00
|
|
|
/******** math/bits ********/
|
|
|
|
addF("math/bits", "TrailingZeros64",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
|
|
|
return s.newValue1(ssa.OpCtz64, Types[TINT], args[0])
|
|
|
|
},
|
|
|
|
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS)
|
|
|
|
addF("math/bits", "TrailingZeros32",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
|
|
|
return s.newValue1(ssa.OpCtz32, Types[TINT], args[0])
|
|
|
|
},
|
|
|
|
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS)
|
|
|
|
addF("math/bits", "TrailingZeros16",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
|
|
|
x := s.newValue1(ssa.OpZeroExt16to32, Types[TUINT32], args[0])
|
|
|
|
c := s.constInt32(Types[TUINT32], 1<<16)
|
|
|
|
y := s.newValue2(ssa.OpOr32, Types[TUINT32], x, c)
|
|
|
|
return s.newValue1(ssa.OpCtz32, Types[TINT], y)
|
|
|
|
},
|
|
|
|
sys.ARM, sys.MIPS)
|
|
|
|
addF("math/bits", "TrailingZeros16",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
|
|
|
x := s.newValue1(ssa.OpZeroExt16to64, Types[TUINT64], args[0])
|
|
|
|
c := s.constInt64(Types[TUINT64], 1<<16)
|
|
|
|
y := s.newValue2(ssa.OpOr64, Types[TUINT64], x, c)
|
|
|
|
return s.newValue1(ssa.OpCtz64, Types[TINT], y)
|
|
|
|
},
|
|
|
|
sys.AMD64, sys.ARM64, sys.S390X)
|
|
|
|
addF("math/bits", "TrailingZeros8",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
|
|
|
x := s.newValue1(ssa.OpZeroExt8to32, Types[TUINT32], args[0])
|
|
|
|
c := s.constInt32(Types[TUINT32], 1<<8)
|
|
|
|
y := s.newValue2(ssa.OpOr32, Types[TUINT32], x, c)
|
|
|
|
return s.newValue1(ssa.OpCtz32, Types[TINT], y)
|
|
|
|
},
|
|
|
|
sys.ARM, sys.MIPS)
|
|
|
|
addF("math/bits", "TrailingZeros8",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
|
|
|
x := s.newValue1(ssa.OpZeroExt8to64, Types[TUINT64], args[0])
|
|
|
|
c := s.constInt64(Types[TUINT64], 1<<8)
|
|
|
|
y := s.newValue2(ssa.OpOr64, Types[TUINT64], x, c)
|
|
|
|
return s.newValue1(ssa.OpCtz64, Types[TINT], y)
|
|
|
|
},
|
|
|
|
sys.AMD64, sys.ARM64, sys.S390X)
|
2017-03-15 21:28:29 -07:00
|
|
|
alias("math/bits", "ReverseBytes64", "runtime/internal/sys", "Bswap64", all...)
|
|
|
|
alias("math/bits", "ReverseBytes32", "runtime/internal/sys", "Bswap32", all...)
|
|
|
|
// ReverseBytes inlines correctly, no need to intrinsify it.
|
|
|
|
// ReverseBytes16 lowers to a rotate, no need for anything special here.
|
2017-03-16 14:08:31 -07:00
|
|
|
addF("math/bits", "Len64",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
|
|
|
return s.newValue1(ssa.OpBitLen64, Types[TINT], args[0])
|
|
|
|
},
|
|
|
|
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS)
|
|
|
|
addF("math/bits", "Len32",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
|
|
|
if s.config.IntSize == 4 {
|
|
|
|
return s.newValue1(ssa.OpBitLen32, Types[TINT], args[0])
|
|
|
|
}
|
|
|
|
x := s.newValue1(ssa.OpZeroExt32to64, Types[TUINT64], args[0])
|
|
|
|
return s.newValue1(ssa.OpBitLen64, Types[TINT], x)
|
|
|
|
},
|
|
|
|
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS)
|
|
|
|
addF("math/bits", "Len16",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
|
|
|
if s.config.IntSize == 4 {
|
|
|
|
x := s.newValue1(ssa.OpZeroExt16to32, Types[TUINT32], args[0])
|
|
|
|
return s.newValue1(ssa.OpBitLen32, Types[TINT], x)
|
|
|
|
}
|
|
|
|
x := s.newValue1(ssa.OpZeroExt16to64, Types[TUINT64], args[0])
|
|
|
|
return s.newValue1(ssa.OpBitLen64, Types[TINT], x)
|
|
|
|
},
|
|
|
|
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS)
|
|
|
|
// Note: disabled on AMD64 because the Go code is faster!
|
|
|
|
addF("math/bits", "Len8",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
|
|
|
if s.config.IntSize == 4 {
|
|
|
|
x := s.newValue1(ssa.OpZeroExt8to32, Types[TUINT32], args[0])
|
|
|
|
return s.newValue1(ssa.OpBitLen32, Types[TINT], x)
|
|
|
|
}
|
|
|
|
x := s.newValue1(ssa.OpZeroExt8to64, Types[TUINT64], args[0])
|
|
|
|
return s.newValue1(ssa.OpBitLen64, Types[TINT], x)
|
|
|
|
},
|
|
|
|
sys.ARM64, sys.ARM, sys.S390X, sys.MIPS)
|
|
|
|
|
|
|
|
addF("math/bits", "Len",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
|
|
|
if s.config.IntSize == 4 {
|
|
|
|
return s.newValue1(ssa.OpBitLen32, Types[TINT], args[0])
|
|
|
|
}
|
|
|
|
return s.newValue1(ssa.OpBitLen64, Types[TINT], args[0])
|
|
|
|
},
|
|
|
|
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS)
|
|
|
|
// LeadingZeros is handled because it trivially calls Len.
|
2017-03-16 22:34:38 -07:00
|
|
|
addF("math/bits", "Reverse64",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
|
|
|
return s.newValue1(ssa.OpBitRev64, Types[TINT], args[0])
|
|
|
|
},
|
|
|
|
sys.ARM64)
|
|
|
|
addF("math/bits", "Reverse32",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
|
|
|
return s.newValue1(ssa.OpBitRev32, Types[TINT], args[0])
|
|
|
|
},
|
|
|
|
sys.ARM64)
|
|
|
|
addF("math/bits", "Reverse16",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
|
|
|
return s.newValue1(ssa.OpBitRev16, Types[TINT], args[0])
|
|
|
|
},
|
|
|
|
sys.ARM64)
|
|
|
|
addF("math/bits", "Reverse8",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
|
|
|
return s.newValue1(ssa.OpBitRev8, Types[TINT], args[0])
|
|
|
|
},
|
|
|
|
sys.ARM64)
|
|
|
|
addF("math/bits", "Reverse",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
|
|
|
if s.config.IntSize == 4 {
|
|
|
|
return s.newValue1(ssa.OpBitRev32, Types[TINT], args[0])
|
|
|
|
}
|
|
|
|
return s.newValue1(ssa.OpBitRev64, Types[TINT], args[0])
|
|
|
|
},
|
|
|
|
sys.ARM64)
|
2016-08-29 20:28:20 -07:00
|
|
|
|
|
|
|
/******** sync/atomic ********/
|
2017-03-14 13:25:12 -07:00
|
|
|
|
|
|
|
// Note: these are disabled by flag_race in findIntrinsic below.
|
|
|
|
alias("sync/atomic", "LoadInt32", "runtime/internal/atomic", "Load", all...)
|
|
|
|
alias("sync/atomic", "LoadInt64", "runtime/internal/atomic", "Load64", all...)
|
|
|
|
alias("sync/atomic", "LoadPointer", "runtime/internal/atomic", "Loadp", all...)
|
|
|
|
alias("sync/atomic", "LoadUint32", "runtime/internal/atomic", "Load", all...)
|
|
|
|
alias("sync/atomic", "LoadUint64", "runtime/internal/atomic", "Load64", all...)
|
|
|
|
alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load", p4...)
|
|
|
|
alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load64", p8...)
|
|
|
|
|
|
|
|
alias("sync/atomic", "StoreInt32", "runtime/internal/atomic", "Store", all...)
|
|
|
|
alias("sync/atomic", "StoreInt64", "runtime/internal/atomic", "Store64", all...)
|
2016-08-29 20:28:20 -07:00
|
|
|
// Note: not StorePointer, that needs a write barrier. Same below for {CompareAnd}Swap.
|
2017-03-14 13:25:12 -07:00
|
|
|
alias("sync/atomic", "StoreUint32", "runtime/internal/atomic", "Store", all...)
|
|
|
|
alias("sync/atomic", "StoreUint64", "runtime/internal/atomic", "Store64", all...)
|
|
|
|
alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store", p4...)
|
|
|
|
alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store64", p8...)
|
|
|
|
|
|
|
|
alias("sync/atomic", "SwapInt32", "runtime/internal/atomic", "Xchg", all...)
|
|
|
|
alias("sync/atomic", "SwapInt64", "runtime/internal/atomic", "Xchg64", all...)
|
|
|
|
alias("sync/atomic", "SwapUint32", "runtime/internal/atomic", "Xchg", all...)
|
|
|
|
alias("sync/atomic", "SwapUint64", "runtime/internal/atomic", "Xchg64", all...)
|
|
|
|
alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg", p4...)
|
|
|
|
alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg64", p8...)
|
|
|
|
|
|
|
|
alias("sync/atomic", "CompareAndSwapInt32", "runtime/internal/atomic", "Cas", all...)
|
|
|
|
alias("sync/atomic", "CompareAndSwapInt64", "runtime/internal/atomic", "Cas64", all...)
|
|
|
|
alias("sync/atomic", "CompareAndSwapUint32", "runtime/internal/atomic", "Cas", all...)
|
|
|
|
alias("sync/atomic", "CompareAndSwapUint64", "runtime/internal/atomic", "Cas64", all...)
|
|
|
|
alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas", p4...)
|
|
|
|
alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas64", p8...)
|
|
|
|
|
|
|
|
alias("sync/atomic", "AddInt32", "runtime/internal/atomic", "Xadd", all...)
|
|
|
|
alias("sync/atomic", "AddInt64", "runtime/internal/atomic", "Xadd64", all...)
|
|
|
|
alias("sync/atomic", "AddUint32", "runtime/internal/atomic", "Xadd", all...)
|
|
|
|
alias("sync/atomic", "AddUint64", "runtime/internal/atomic", "Xadd64", all...)
|
|
|
|
alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd", p4...)
|
|
|
|
alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd64", p8...)
|
2016-10-06 15:43:47 -04:00
|
|
|
|
|
|
|
/******** math/big ********/
|
2017-03-14 13:25:12 -07:00
|
|
|
add("math/big", "mulWW",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
2016-11-01 15:28:10 -07:00
|
|
|
return s.newValue2(ssa.OpMul64uhilo, ssa.MakeTuple(Types[TUINT64], Types[TUINT64]), args[0], args[1])
|
2017-03-14 13:25:12 -07:00
|
|
|
},
|
|
|
|
sys.ArchAMD64)
|
|
|
|
add("math/big", "divWW",
|
|
|
|
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
2016-11-01 15:28:10 -07:00
|
|
|
return s.newValue3(ssa.OpDiv128u, ssa.MakeTuple(Types[TUINT64], Types[TUINT64]), args[0], args[1], args[2])
|
2017-03-14 13:25:12 -07:00
|
|
|
},
|
|
|
|
sys.ArchAMD64)
|
2016-08-29 20:28:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// findIntrinsic returns a function which builds the SSA equivalent of the
|
|
|
|
// function identified by the symbol sym. If sym is not an intrinsic call, returns nil.
|
|
|
|
func findIntrinsic(sym *Sym) intrinsicBuilder {
|
2016-09-14 10:01:05 -07:00
|
|
|
if ssa.IntrinsicsDisable {
|
2016-08-29 20:28:20 -07:00
|
|
|
return nil
|
2016-03-11 00:10:52 -05:00
|
|
|
}
|
2016-08-29 20:28:20 -07:00
|
|
|
if sym == nil || sym.Pkg == nil {
|
|
|
|
return nil
|
2016-03-11 00:10:52 -05:00
|
|
|
}
|
2016-08-29 20:28:20 -07:00
|
|
|
pkg := sym.Pkg.Path
|
2016-10-06 15:43:47 -04:00
|
|
|
if sym.Pkg == localpkg {
|
|
|
|
pkg = myimportpath
|
|
|
|
}
|
2017-03-14 13:25:12 -07:00
|
|
|
if flag_race && pkg == "sync/atomic" {
|
|
|
|
// The race detector needs to be able to intercept these calls.
|
|
|
|
// We can't intrinsify them.
|
|
|
|
return nil
|
2016-08-23 16:49:28 -07:00
|
|
|
}
|
2017-03-14 13:25:12 -07:00
|
|
|
fn := sym.Name
|
2017-03-17 13:35:36 -07:00
|
|
|
return intrinsics[intrinsicKey{thearch.LinkArch.Arch, pkg, fn}]
|
2016-03-11 00:10:52 -05:00
|
|
|
}
|
|
|
|
|
2016-08-23 16:49:28 -07:00
|
|
|
func isIntrinsicCall(n *Node) bool {
|
2016-03-11 00:10:52 -05:00
|
|
|
if n == nil || n.Left == nil {
|
|
|
|
return false
|
|
|
|
}
|
2016-08-29 20:28:20 -07:00
|
|
|
return findIntrinsic(n.Left.Sym) != nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation.
|
|
|
|
func (s *state) intrinsicCall(n *Node) *ssa.Value {
|
2016-11-01 15:28:10 -07:00
|
|
|
v := findIntrinsic(n.Left.Sym)(s, n, s.intrinsicArgs(n))
|
2016-08-29 20:28:20 -07:00
|
|
|
if ssa.IntrinsicsDebug > 0 {
|
|
|
|
x := v
|
|
|
|
if x == nil {
|
|
|
|
x = s.mem()
|
|
|
|
}
|
|
|
|
if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 {
|
|
|
|
x = x.Args[0]
|
|
|
|
}
|
2016-12-07 17:40:46 -08:00
|
|
|
Warnl(n.Pos, "intrinsic substitution for %v with %s", n.Left.Sym.Name, x.LongString())
|
2016-08-29 20:28:20 -07:00
|
|
|
}
|
|
|
|
return v
|
2016-03-11 00:10:52 -05:00
|
|
|
}
|
|
|
|
|
2016-11-01 15:28:10 -07:00
|
|
|
type callArg struct {
|
|
|
|
offset int64
|
|
|
|
v *ssa.Value
|
|
|
|
}
|
|
|
|
type byOffset []callArg
|
|
|
|
|
|
|
|
func (x byOffset) Len() int { return len(x) }
|
|
|
|
func (x byOffset) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
|
|
|
func (x byOffset) Less(i, j int) bool {
|
|
|
|
return x[i].offset < x[j].offset
|
|
|
|
}
|
|
|
|
|
|
|
|
// intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them.
|
|
|
|
func (s *state) intrinsicArgs(n *Node) []*ssa.Value {
|
|
|
|
// This code is complicated because of how walk transforms calls. For a call node,
|
|
|
|
// each entry in n.List is either an assignment to OINDREGSP which actually
|
|
|
|
// stores an arg, or an assignment to a temporary which computes an arg
|
|
|
|
// which is later assigned.
|
|
|
|
// The args can also be out of order.
|
|
|
|
// TODO: when walk goes away someday, this code can go away also.
|
|
|
|
var args []callArg
|
|
|
|
temps := map[*Node]*ssa.Value{}
|
|
|
|
for _, a := range n.List.Slice() {
|
|
|
|
if a.Op != OAS {
|
|
|
|
s.Fatalf("non-assignment as a function argument %s", opnames[a.Op])
|
|
|
|
}
|
|
|
|
l, r := a.Left, a.Right
|
|
|
|
switch l.Op {
|
|
|
|
case ONAME:
|
|
|
|
// Evaluate and store to "temporary".
|
|
|
|
// Walk ensures these temporaries are dead outside of n.
|
|
|
|
temps[l] = s.expr(r)
|
|
|
|
case OINDREGSP:
|
|
|
|
// Store a value to an argument slot.
|
|
|
|
var v *ssa.Value
|
|
|
|
if x, ok := temps[r]; ok {
|
|
|
|
// This is a previously computed temporary.
|
|
|
|
v = x
|
|
|
|
} else {
|
|
|
|
// This is an explicit value; evaluate it.
|
|
|
|
v = s.expr(r)
|
|
|
|
}
|
|
|
|
args = append(args, callArg{l.Xoffset, v})
|
|
|
|
default:
|
|
|
|
s.Fatalf("function argument assignment target not allowed: %s", opnames[l.Op])
|
|
|
|
}
|
2016-03-11 00:10:52 -05:00
|
|
|
}
|
2016-11-01 15:28:10 -07:00
|
|
|
sort.Sort(byOffset(args))
|
|
|
|
res := make([]*ssa.Value, len(args))
|
|
|
|
for i, a := range args {
|
|
|
|
res[i] = a.v
|
|
|
|
}
|
|
|
|
return res
|
2016-08-23 16:49:28 -07:00
|
|
|
}
|
2016-03-11 00:10:52 -05:00
|
|
|
|
2016-01-25 17:06:54 -08:00
|
|
|
// Calls the function n using the specified call type.
|
|
|
|
// Returns the address of the return value (or nil if none).
|
2015-09-09 23:56:59 -07:00
|
|
|
func (s *state) call(n *Node, k callKind) *ssa.Value {
|
|
|
|
var sym *Sym // target symbol (if static)
|
|
|
|
var closure *ssa.Value // ptr to closure to run (if dynamic)
|
|
|
|
var codeptr *ssa.Value // ptr to target code (if dynamic)
|
|
|
|
var rcvr *ssa.Value // receiver to set
|
|
|
|
fn := n.Left
|
|
|
|
switch n.Op {
|
|
|
|
case OCALLFUNC:
|
|
|
|
if k == callNormal && fn.Op == ONAME && fn.Class == PFUNC {
|
|
|
|
sym = fn.Sym
|
|
|
|
break
|
|
|
|
}
|
|
|
|
closure = s.expr(fn)
|
|
|
|
case OCALLMETH:
|
|
|
|
if fn.Op != ODOTMETH {
|
|
|
|
Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn)
|
|
|
|
}
|
|
|
|
if k == callNormal {
|
cmd/compile: change ODOT and friends to use Sym, not Right
The Node type ODOT and its variants all represent a selector, with a
simple name to the right of the dot. Before this change this was
represented by using an ONAME Node in the Right field. This ONAME node
served no useful purpose. This CL changes these Node types to store the
symbol in the Sym field instead, thus not requiring allocating a Node
for each selector.
When compiling x/tools/go/types this CL eliminates nearly 5000 calls to
newname and reduces the total number of Nodes allocated by about 6.6%.
It seems to cut compilation time by 1 to 2 percent.
Getting this right was somewhat subtle, and I added two dubious changes
to produce the exact same output as before. One is to ishairy in
inl.go: the ONAME node increased the cost of ODOT and friends by 1, and
I retained that, although really ODOT is not more expensive than any
other node. The other is to varexpr in walk.go: because the ONAME in
the Right field of an ODOT has no class, varexpr would always return
false for an ODOT, although in fact for some ODOT's it seemingly ought
to return true; I added an && false for now. I will send separate CLs,
that will break toolstash -cmp, to clean these up.
This CL passes toolstash -cmp.
Change-Id: I4af8a10cc59078c436130ce472f25abc3a9b2f80
Reviewed-on: https://go-review.googlesource.com/20890
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2016-03-18 16:52:30 -07:00
|
|
|
sym = fn.Sym
|
2015-09-09 23:56:59 -07:00
|
|
|
break
|
|
|
|
}
|
2016-09-20 16:34:30 -07:00
|
|
|
// Make a name n2 for the function.
|
|
|
|
// fn.Sym might be sync.(*Mutex).Unlock.
|
|
|
|
// Make a PFUNC node out of that, then evaluate it.
|
|
|
|
// We get back an SSA value representing &sync.(*Mutex).Unlock·f.
|
|
|
|
// We can then pass that to defer or go.
|
cmd/compile: change ODOT and friends to use Sym, not Right
The Node type ODOT and its variants all represent a selector, with a
simple name to the right of the dot. Before this change this was
represented by using an ONAME Node in the Right field. This ONAME node
served no useful purpose. This CL changes these Node types to store the
symbol in the Sym field instead, thus not requiring allocating a Node
for each selector.
When compiling x/tools/go/types this CL eliminates nearly 5000 calls to
newname and reduces the total number of Nodes allocated by about 6.6%.
It seems to cut compilation time by 1 to 2 percent.
Getting this right was somewhat subtle, and I added two dubious changes
to produce the exact same output as before. One is to ishairy in
inl.go: the ONAME node increased the cost of ODOT and friends by 1, and
I retained that, although really ODOT is not more expensive than any
other node. The other is to varexpr in walk.go: because the ONAME in
the Right field of an ODOT has no class, varexpr would always return
false for an ODOT, although in fact for some ODOT's it seemingly ought
to return true; I added an && false for now. I will send separate CLs,
that will break toolstash -cmp, to clean these up.
This CL passes toolstash -cmp.
Change-Id: I4af8a10cc59078c436130ce472f25abc3a9b2f80
Reviewed-on: https://go-review.googlesource.com/20890
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2016-03-18 16:52:30 -07:00
|
|
|
n2 := newname(fn.Sym)
|
2015-09-09 23:56:59 -07:00
|
|
|
n2.Class = PFUNC
|
2016-12-07 17:40:46 -08:00
|
|
|
n2.Pos = fn.Pos
|
2016-09-20 16:34:30 -07:00
|
|
|
n2.Type = Types[TUINT8] // dummy type for a static closure. Could use runtime.funcval if we had it.
|
cmd/compile: change ODOT and friends to use Sym, not Right
The Node type ODOT and its variants all represent a selector, with a
simple name to the right of the dot. Before this change this was
represented by using an ONAME Node in the Right field. This ONAME node
served no useful purpose. This CL changes these Node types to store the
symbol in the Sym field instead, thus not requiring allocating a Node
for each selector.
When compiling x/tools/go/types this CL eliminates nearly 5000 calls to
newname and reduces the total number of Nodes allocated by about 6.6%.
It seems to cut compilation time by 1 to 2 percent.
Getting this right was somewhat subtle, and I added two dubious changes
to produce the exact same output as before. One is to ishairy in
inl.go: the ONAME node increased the cost of ODOT and friends by 1, and
I retained that, although really ODOT is not more expensive than any
other node. The other is to varexpr in walk.go: because the ONAME in
the Right field of an ODOT has no class, varexpr would always return
false for an ODOT, although in fact for some ODOT's it seemingly ought
to return true; I added an && false for now. I will send separate CLs,
that will break toolstash -cmp, to clean these up.
This CL passes toolstash -cmp.
Change-Id: I4af8a10cc59078c436130ce472f25abc3a9b2f80
Reviewed-on: https://go-review.googlesource.com/20890
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2016-03-18 16:52:30 -07:00
|
|
|
closure = s.expr(n2)
|
2015-09-09 23:56:59 -07:00
|
|
|
// Note: receiver is already assigned in n.List, so we don't
|
|
|
|
// want to set it here.
|
|
|
|
case OCALLINTER:
|
|
|
|
if fn.Op != ODOTINTER {
|
2016-04-27 15:10:10 +10:00
|
|
|
Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op)
|
2015-09-09 23:56:59 -07:00
|
|
|
}
|
|
|
|
i := s.expr(fn.Left)
|
|
|
|
itab := s.newValue1(ssa.OpITab, Types[TUINTPTR], i)
|
2016-06-06 16:00:33 -04:00
|
|
|
if k != callNormal {
|
|
|
|
s.nilCheck(itab)
|
|
|
|
}
|
2015-09-09 23:56:59 -07:00
|
|
|
itabidx := fn.Xoffset + 3*int64(Widthptr) + 8 // offset of fun field in runtime.itab
|
2017-03-18 10:16:03 -07:00
|
|
|
itab = s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab)
|
2015-09-09 23:56:59 -07:00
|
|
|
if k == callNormal {
|
|
|
|
codeptr = s.newValue2(ssa.OpLoad, Types[TUINTPTR], itab, s.mem())
|
|
|
|
} else {
|
|
|
|
closure = itab
|
|
|
|
}
|
|
|
|
rcvr = s.newValue1(ssa.OpIData, Types[TUINTPTR], i)
|
|
|
|
}
|
|
|
|
dowidth(fn.Type)
|
2016-03-28 14:31:57 -07:00
|
|
|
stksize := fn.Type.ArgWidth() // includes receiver
|
2015-09-09 23:56:59 -07:00
|
|
|
|
2016-03-01 23:21:55 +00:00
|
|
|
// Run all argument assignments. The arg slots have already
|
2015-09-09 23:56:59 -07:00
|
|
|
// been offset by the appropriate amount (+2*widthptr for go/defer,
|
|
|
|
// +widthptr for interface calls).
|
|
|
|
// For OCALLMETH, the receiver is set in these statements.
|
|
|
|
s.stmtList(n.List)
|
|
|
|
|
|
|
|
// Set receiver (for interface calls)
|
|
|
|
if rcvr != nil {
|
2015-10-19 13:56:55 -07:00
|
|
|
argStart := Ctxt.FixedFrameSize()
|
2015-09-09 23:56:59 -07:00
|
|
|
if k != callNormal {
|
|
|
|
argStart += int64(2 * Widthptr)
|
|
|
|
}
|
2017-03-18 10:16:03 -07:00
|
|
|
addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart)
|
2017-03-13 21:51:08 -04:00
|
|
|
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, Types[TUINTPTR], addr, rcvr, s.mem())
|
2015-09-09 23:56:59 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Defer/go args
|
|
|
|
if k != callNormal {
|
|
|
|
// Write argsize and closure (args to Newproc/Deferproc).
|
2016-05-15 00:12:56 -04:00
|
|
|
argStart := Ctxt.FixedFrameSize()
|
2015-09-09 23:56:59 -07:00
|
|
|
argsize := s.constInt32(Types[TUINT32], int32(stksize))
|
2017-03-18 10:16:03 -07:00
|
|
|
addr := s.constOffPtrSP(s.f.Config.Types.UInt32Ptr, argStart)
|
2017-03-13 21:51:08 -04:00
|
|
|
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, Types[TUINT32], addr, argsize, s.mem())
|
2017-03-18 10:16:03 -07:00
|
|
|
addr = s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(Widthptr))
|
2017-03-13 21:51:08 -04:00
|
|
|
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, Types[TUINTPTR], addr, closure, s.mem())
|
2015-09-09 23:56:59 -07:00
|
|
|
stksize += 2 * int64(Widthptr)
|
|
|
|
}
|
|
|
|
|
|
|
|
// call target
|
|
|
|
var call *ssa.Value
|
|
|
|
switch {
|
|
|
|
case k == callDefer:
|
2017-03-10 17:42:02 -08:00
|
|
|
call = s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, Deferproc, s.mem())
|
2015-09-09 23:56:59 -07:00
|
|
|
case k == callGo:
|
2017-03-10 17:42:02 -08:00
|
|
|
call = s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, Newproc, s.mem())
|
2015-09-09 23:56:59 -07:00
|
|
|
case closure != nil:
|
|
|
|
codeptr = s.newValue2(ssa.OpLoad, Types[TUINTPTR], closure, s.mem())
|
|
|
|
call = s.newValue3(ssa.OpClosureCall, ssa.TypeMem, codeptr, closure, s.mem())
|
|
|
|
case codeptr != nil:
|
|
|
|
call = s.newValue2(ssa.OpInterCall, ssa.TypeMem, codeptr, s.mem())
|
|
|
|
case sym != nil:
|
2017-02-06 13:30:40 -08:00
|
|
|
call = s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, Linksym(sym), s.mem())
|
2015-09-09 23:56:59 -07:00
|
|
|
default:
|
2016-09-09 21:08:46 -07:00
|
|
|
Fatalf("bad call type %v %v", n.Op, n)
|
2015-09-09 23:56:59 -07:00
|
|
|
}
|
|
|
|
call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
|
2015-09-17 16:45:10 -07:00
|
|
|
s.vars[&memVar] = call
|
2016-09-09 13:11:07 -07:00
|
|
|
|
|
|
|
// Finish block for defers
|
2016-03-09 19:27:57 -08:00
|
|
|
if k == callDefer {
|
2016-09-09 13:11:07 -07:00
|
|
|
b := s.endBlock()
|
2016-03-09 19:27:57 -08:00
|
|
|
b.Kind = ssa.BlockDefer
|
2016-09-09 13:11:07 -07:00
|
|
|
b.SetControl(call)
|
|
|
|
bNext := s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
b.AddEdgeTo(bNext)
|
|
|
|
// Add recover edge to exit code.
|
2016-03-09 19:27:57 -08:00
|
|
|
r := s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
s.startBlock(r)
|
|
|
|
s.exit()
|
|
|
|
b.AddEdgeTo(r)
|
|
|
|
b.Likely = ssa.BranchLikely
|
2016-09-09 13:11:07 -07:00
|
|
|
s.startBlock(bNext)
|
2016-03-09 19:27:57 -08:00
|
|
|
}
|
2015-09-09 23:56:59 -07:00
|
|
|
|
2016-03-15 11:06:03 -07:00
|
|
|
res := n.Left.Type.Results()
|
|
|
|
if res.NumFields() == 0 || k != callNormal {
|
2015-09-09 23:56:59 -07:00
|
|
|
// call has no return value. Continue with the next statement.
|
|
|
|
return nil
|
|
|
|
}
|
2016-03-15 11:06:03 -07:00
|
|
|
fp := res.Field(0)
|
2017-03-19 09:51:22 +01:00
|
|
|
return s.constOffPtrSP(typPtr(fp.Type), fp.Offset+Ctxt.FixedFrameSize())
|
2015-09-09 23:56:59 -07:00
|
|
|
}
|
|
|
|
|
2015-07-28 14:31:25 -07:00
|
|
|
// etypesign returns the signed-ness of e, for integer/pointer etypes.
|
|
|
|
// -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer.
|
2015-11-16 13:20:16 -08:00
|
|
|
func etypesign(e EType) int8 {
|
2015-07-28 14:31:25 -07:00
|
|
|
switch e {
|
|
|
|
case TINT8, TINT16, TINT32, TINT64, TINT:
|
|
|
|
return -1
|
|
|
|
case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR, TUNSAFEPTR:
|
|
|
|
return +1
|
|
|
|
}
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2015-10-15 20:25:32 -05:00
|
|
|
// lookupSymbol is used to retrieve the symbol (Extern, Arg or Auto) used for a particular node.
|
|
|
|
// This improves the effectiveness of cse by using the same Aux values for the
|
|
|
|
// same symbols.
|
|
|
|
func (s *state) lookupSymbol(n *Node, sym interface{}) interface{} {
|
|
|
|
switch sym.(type) {
|
|
|
|
default:
|
2016-08-30 19:11:19 -07:00
|
|
|
s.Fatalf("sym %v is of unknown type %T", sym, sym)
|
2015-10-15 20:25:32 -05:00
|
|
|
case *ssa.ExternSymbol, *ssa.ArgSymbol, *ssa.AutoSymbol:
|
|
|
|
// these are the only valid types
|
|
|
|
}
|
|
|
|
|
|
|
|
if lsym, ok := s.varsyms[n]; ok {
|
|
|
|
return lsym
|
|
|
|
}
|
2016-08-30 19:11:19 -07:00
|
|
|
s.varsyms[n] = sym
|
|
|
|
return sym
|
2015-10-15 20:25:32 -05:00
|
|
|
}
|
|
|
|
|
2015-06-02 09:16:22 -07:00
|
|
|
// addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
|
2015-07-13 15:55:37 -07:00
|
|
|
// The value that the returned Value represents is guaranteed to be non-nil.
|
2015-10-09 16:48:30 -04:00
|
|
|
// If bounded is true then this address does not require a nil check for its operand
|
|
|
|
// even if that would otherwise be implied.
|
2017-02-02 19:47:59 -05:00
|
|
|
func (s *state) addr(n *Node, bounded bool) *ssa.Value {
|
2017-03-19 09:51:22 +01:00
|
|
|
t := typPtr(n.Type)
|
2015-05-18 16:44:20 -07:00
|
|
|
switch n.Op {
|
|
|
|
case ONAME:
|
2015-06-10 15:03:06 -07:00
|
|
|
switch n.Class {
|
|
|
|
case PEXTERN:
|
2015-05-18 16:44:20 -07:00
|
|
|
// global variable
|
2017-02-06 18:18:49 -08:00
|
|
|
aux := s.lookupSymbol(n, &ssa.ExternSymbol{Typ: n.Type, Sym: Linksym(n.Sym)})
|
2015-10-22 14:22:38 -07:00
|
|
|
v := s.entryNewValue1A(ssa.OpAddr, t, aux, s.sb)
|
2015-07-28 11:08:44 -07:00
|
|
|
// TODO: Make OpAddr use AuxInt as well as Aux.
|
|
|
|
if n.Xoffset != 0 {
|
|
|
|
v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v)
|
|
|
|
}
|
2017-02-02 19:47:59 -05:00
|
|
|
return v
|
2015-09-11 16:40:05 -04:00
|
|
|
case PPARAM:
|
|
|
|
// parameter slot
|
2015-06-29 11:56:28 -07:00
|
|
|
v := s.decladdrs[n]
|
2015-11-16 13:20:16 -08:00
|
|
|
if v != nil {
|
2017-02-02 19:47:59 -05:00
|
|
|
return v
|
2015-06-29 11:56:28 -07:00
|
|
|
}
|
2016-09-30 10:12:32 -07:00
|
|
|
if n == nodfp {
|
|
|
|
// Special arg that points to the frame pointer (Used by ORECOVER).
|
2015-11-16 13:20:16 -08:00
|
|
|
aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n})
|
2017-02-02 19:47:59 -05:00
|
|
|
return s.entryNewValue1A(ssa.OpAddr, t, aux, s.sp)
|
2015-11-16 13:20:16 -08:00
|
|
|
}
|
|
|
|
s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
|
2017-02-02 19:47:59 -05:00
|
|
|
return nil
|
2015-08-24 02:16:19 -07:00
|
|
|
case PAUTO:
|
2016-03-11 20:03:17 -06:00
|
|
|
aux := s.lookupSymbol(n, &ssa.AutoSymbol{Typ: n.Type, Node: n})
|
2017-02-02 19:47:59 -05:00
|
|
|
return s.newValue1A(ssa.OpAddr, t, aux, s.sp)
|
2015-09-11 16:40:05 -04:00
|
|
|
case PPARAMOUT: // Same as PAUTO -- cannot generate LEA early.
|
2015-10-15 20:25:32 -05:00
|
|
|
// ensure that we reuse symbols for out parameters so
|
|
|
|
// that cse works on their addresses
|
|
|
|
aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n})
|
2017-02-02 19:47:59 -05:00
|
|
|
return s.newValue1A(ssa.OpAddr, t, aux, s.sp)
|
2015-06-10 15:03:06 -07:00
|
|
|
default:
|
2016-09-14 10:01:05 -07:00
|
|
|
s.Fatalf("variable address class %v not implemented", classnames[n.Class])
|
2017-02-02 19:47:59 -05:00
|
|
|
return nil
|
2015-05-18 16:44:20 -07:00
|
|
|
}
|
2016-10-24 14:33:22 -07:00
|
|
|
case OINDREGSP:
|
|
|
|
// indirect off REGSP
|
2015-05-18 16:44:20 -07:00
|
|
|
// used for storing/loading arguments/returns to/from callees
|
2017-03-08 12:50:00 -08:00
|
|
|
return s.constOffPtrSP(t, n.Xoffset)
|
2015-05-18 16:44:20 -07:00
|
|
|
case OINDEX:
|
2015-07-10 10:47:28 -06:00
|
|
|
if n.Left.Type.IsSlice() {
|
2015-05-18 16:44:20 -07:00
|
|
|
a := s.expr(n.Left)
|
|
|
|
i := s.expr(n.Right)
|
2016-09-16 00:33:29 +10:00
|
|
|
i = s.extendIndex(i, panicindex)
|
2015-10-22 14:22:38 -07:00
|
|
|
len := s.newValue1(ssa.OpSliceLen, Types[TINT], a)
|
2017-02-27 19:56:38 +02:00
|
|
|
if !n.Bounded() {
|
2015-08-18 14:17:30 -07:00
|
|
|
s.boundsCheck(i, len)
|
|
|
|
}
|
2015-10-22 14:22:38 -07:00
|
|
|
p := s.newValue1(ssa.OpSlicePtr, t, a)
|
2017-02-02 19:47:59 -05:00
|
|
|
return s.newValue2(ssa.OpPtrIndex, t, p, i)
|
2015-07-10 10:47:28 -06:00
|
|
|
} else { // array
|
2017-02-02 19:47:59 -05:00
|
|
|
a := s.addr(n.Left, bounded)
|
2015-07-10 10:47:28 -06:00
|
|
|
i := s.expr(n.Right)
|
2016-09-16 00:33:29 +10:00
|
|
|
i = s.extendIndex(i, panicindex)
|
2016-03-31 14:46:04 -07:00
|
|
|
len := s.constInt(Types[TINT], n.Left.Type.NumElem())
|
2017-02-27 19:56:38 +02:00
|
|
|
if !n.Bounded() {
|
2015-08-18 14:17:30 -07:00
|
|
|
s.boundsCheck(i, len)
|
|
|
|
}
|
2017-03-19 09:51:22 +01:00
|
|
|
return s.newValue2(ssa.OpPtrIndex, typPtr(n.Left.Type.Elem()), a, i)
|
2015-05-18 16:44:20 -07:00
|
|
|
}
|
2015-07-13 21:22:16 -05:00
|
|
|
case OIND:
|
2017-02-02 19:47:59 -05:00
|
|
|
return s.exprPtr(n.Left, bounded, n.Pos)
|
2015-07-13 15:55:37 -07:00
|
|
|
case ODOT:
|
2017-02-02 19:47:59 -05:00
|
|
|
p := s.addr(n.Left, bounded)
|
|
|
|
return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p)
|
2015-07-13 15:55:37 -07:00
|
|
|
case ODOTPTR:
|
2016-12-07 17:40:46 -08:00
|
|
|
p := s.exprPtr(n.Left, bounded, n.Pos)
|
2017-02-02 19:47:59 -05:00
|
|
|
return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p)
|
2015-09-11 16:40:05 -04:00
|
|
|
case OCLOSUREVAR:
|
2016-03-04 12:34:43 -08:00
|
|
|
return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset,
|
2017-03-18 10:16:03 -07:00
|
|
|
s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr))
|
2015-10-09 16:48:30 -04:00
|
|
|
case OCONVNOP:
|
2017-02-02 19:47:59 -05:00
|
|
|
addr := s.addr(n.Left, bounded)
|
|
|
|
return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type
|
2016-01-25 17:06:54 -08:00
|
|
|
case OCALLFUNC, OCALLINTER, OCALLMETH:
|
2017-02-02 19:47:59 -05:00
|
|
|
return s.call(n, callNormal)
|
2016-10-28 11:37:45 -07:00
|
|
|
case ODOTTYPE:
|
|
|
|
v, _ := s.dottype(n, false)
|
|
|
|
if v.Op != ssa.OpLoad {
|
|
|
|
s.Fatalf("dottype of non-load")
|
|
|
|
}
|
|
|
|
if v.Args[1] != s.mem() {
|
|
|
|
s.Fatalf("memory no longer live from dottype load")
|
|
|
|
}
|
2017-02-02 19:47:59 -05:00
|
|
|
return v.Args[0]
|
2015-05-18 16:44:20 -07:00
|
|
|
default:
|
2016-09-14 10:01:05 -07:00
|
|
|
s.Fatalf("unhandled addr %v", n.Op)
|
2017-02-02 19:47:59 -05:00
|
|
|
return nil
|
2015-05-18 16:44:20 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-10 15:03:06 -07:00
|
|
|
// canSSA reports whether n is SSA-able.
|
2016-01-11 21:05:33 -08:00
|
|
|
// n must be an ONAME (or an ODOT sequence with an ONAME base).
|
2016-02-27 17:49:31 -08:00
|
|
|
func (s *state) canSSA(n *Node) bool {
|
2016-04-29 12:09:32 -07:00
|
|
|
if Debug['N'] != 0 {
|
|
|
|
return false
|
|
|
|
}
|
2016-10-30 21:10:03 -07:00
|
|
|
for n.Op == ODOT || (n.Op == OINDEX && n.Left.Type.IsArray()) {
|
2016-01-11 21:05:33 -08:00
|
|
|
n = n.Left
|
|
|
|
}
|
2015-06-10 15:03:06 -07:00
|
|
|
if n.Op != ONAME {
|
2015-06-27 15:45:20 +01:00
|
|
|
return false
|
2015-06-10 15:03:06 -07:00
|
|
|
}
|
2017-02-27 19:56:38 +02:00
|
|
|
if n.Addrtaken() {
|
2015-06-10 15:03:06 -07:00
|
|
|
return false
|
|
|
|
}
|
cmd/compile: fix liveness computation for heap-escaped parameters
The liveness computation of parameters generally was never
correct, but forcing all parameters to be live throughout the
function covered up that problem. The new SSA back end is
too clever: even though it currently keeps the parameter values live
throughout the function, it may find optimizations that mean
the current values are not written back to the original parameter
stack slots immediately or ever (for example if a parameter is set
to nil, SSA constant propagation may replace all later uses of the
parameter with a constant nil, eliminating the need to write the nil
value back to the stack slot), so the liveness code must now
track the actual operations on the stack slots, exposing these
problems.
One small problem in the handling of arguments is that nodarg
can return ONAME PPARAM nodes with adjusted offsets, so that
there are actually multiple *Node pointers for the same parameter
in the instruction stream. This might be possible to correct, but
not in this CL. For now, we fix this by using n.Orig instead of n
when considering PPARAM and PPARAMOUT nodes.
The major problem in the handling of arguments is general
confusion in the liveness code about the meaning of PPARAM|PHEAP
and PPARAMOUT|PHEAP nodes, especially as contrasted with PAUTO|PHEAP.
The difference between these two is that when a local variable "moves"
to the heap, it's really just allocated there to start with; in contrast,
when an argument moves to the heap, the actual data has to be copied
there from the stack at the beginning of the function, and when a
result "moves" to the heap the value in the heap has to be copied
back to the stack when the function returns
This general confusion is also present in the SSA back end.
The PHEAP bit worked decently when I first introduced it 7 years ago (!)
in 391425ae. The back end did nothing sophisticated, and in particular
there was no analysis at all: no escape analysis, no liveness analysis,
and certainly no SSA back end. But the complications caused in the
various downstream consumers suggest that this should be a detail
kept mainly in the front end.
This CL therefore eliminates both the PHEAP bit and even the idea of
"heap variables" from the back ends.
First, it replaces the PPARAM|PHEAP, PPARAMOUT|PHEAP, and PAUTO|PHEAP
variable classes with the single PAUTOHEAP, a pseudo-class indicating
a variable maintained on the heap and available by indirecting a
local variable kept on the stack (a plain PAUTO).
Second, walkexpr replaces all references to PAUTOHEAP variables
with indirections of the corresponding PAUTO variable.
The back ends and the liveness code now just see plain indirected
variables. This may actually produce better code, but the real goal
here is to eliminate these little-used and somewhat suspect code
paths in the back end analyses.
The OPARAM node type goes away too.
A followup CL will do the same to PPARAMREF. I'm not sure that
the back ends (SSA in particular) are handling those right either,
and with the framework established in this CL that change is trivial
and the result clearly more correct.
Fixes #15747.
Change-Id: I2770b1ce3cbc93981bfc7166be66a9da12013d74
Reviewed-on: https://go-review.googlesource.com/23393
Reviewed-by: Keith Randall <khr@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
2016-05-25 01:33:24 -04:00
|
|
|
if n.isParamHeapCopy() {
|
2015-06-10 15:03:06 -07:00
|
|
|
return false
|
|
|
|
}
|
cmd/compile: fix liveness computation for heap-escaped parameters
The liveness computation of parameters generally was never
correct, but forcing all parameters to be live throughout the
function covered up that problem. The new SSA back end is
too clever: even though it currently keeps the parameter values live
throughout the function, it may find optimizations that mean
the current values are not written back to the original parameter
stack slots immediately or ever (for example if a parameter is set
to nil, SSA constant propagation may replace all later uses of the
parameter with a constant nil, eliminating the need to write the nil
value back to the stack slot), so the liveness code must now
track the actual operations on the stack slots, exposing these
problems.
One small problem in the handling of arguments is that nodarg
can return ONAME PPARAM nodes with adjusted offsets, so that
there are actually multiple *Node pointers for the same parameter
in the instruction stream. This might be possible to correct, but
not in this CL. For now, we fix this by using n.Orig instead of n
when considering PPARAM and PPARAMOUT nodes.
The major problem in the handling of arguments is general
confusion in the liveness code about the meaning of PPARAM|PHEAP
and PPARAMOUT|PHEAP nodes, especially as contrasted with PAUTO|PHEAP.
The difference between these two is that when a local variable "moves"
to the heap, it's really just allocated there to start with; in contrast,
when an argument moves to the heap, the actual data has to be copied
there from the stack at the beginning of the function, and when a
result "moves" to the heap the value in the heap has to be copied
back to the stack when the function returns
This general confusion is also present in the SSA back end.
The PHEAP bit worked decently when I first introduced it 7 years ago (!)
in 391425ae. The back end did nothing sophisticated, and in particular
there was no analysis at all: no escape analysis, no liveness analysis,
and certainly no SSA back end. But the complications caused in the
various downstream consumers suggest that this should be a detail
kept mainly in the front end.
This CL therefore eliminates both the PHEAP bit and even the idea of
"heap variables" from the back ends.
First, it replaces the PPARAM|PHEAP, PPARAMOUT|PHEAP, and PAUTO|PHEAP
variable classes with the single PAUTOHEAP, a pseudo-class indicating
a variable maintained on the heap and available by indirecting a
local variable kept on the stack (a plain PAUTO).
Second, walkexpr replaces all references to PAUTOHEAP variables
with indirections of the corresponding PAUTO variable.
The back ends and the liveness code now just see plain indirected
variables. This may actually produce better code, but the real goal
here is to eliminate these little-used and somewhat suspect code
paths in the back end analyses.
The OPARAM node type goes away too.
A followup CL will do the same to PPARAMREF. I'm not sure that
the back ends (SSA in particular) are handling those right either,
and with the framework established in this CL that change is trivial
and the result clearly more correct.
Fixes #15747.
Change-Id: I2770b1ce3cbc93981bfc7166be66a9da12013d74
Reviewed-on: https://go-review.googlesource.com/23393
Reviewed-by: Keith Randall <khr@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
2016-05-25 01:33:24 -04:00
|
|
|
if n.Class == PAUTOHEAP {
|
|
|
|
Fatalf("canSSA of PAUTOHEAP %v", n)
|
|
|
|
}
|
2015-08-28 13:35:32 -07:00
|
|
|
switch n.Class {
|
2016-05-25 10:29:50 -04:00
|
|
|
case PEXTERN:
|
2015-06-10 15:03:06 -07:00
|
|
|
return false
|
2016-02-27 17:49:31 -08:00
|
|
|
case PPARAMOUT:
|
2017-03-15 22:55:21 -07:00
|
|
|
if s.hasdefer {
|
2016-02-27 17:49:31 -08:00
|
|
|
// TODO: handle this case? Named return values must be
|
|
|
|
// in memory so that the deferred function can see them.
|
|
|
|
// Maybe do: if !strings.HasPrefix(n.String(), "~") { return false }
|
2017-01-30 14:55:12 -08:00
|
|
|
// Or maybe not, see issue 18860. Even unnamed return values
|
|
|
|
// must be written back so if a defer recovers, the caller can see them.
|
2016-02-27 17:49:31 -08:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
if s.cgoUnsafeArgs {
|
|
|
|
// Cgo effectively takes the address of all result args,
|
|
|
|
// but the compiler can't see that.
|
|
|
|
return false
|
|
|
|
}
|
2015-06-10 15:03:06 -07:00
|
|
|
}
|
2017-03-15 23:01:31 -07:00
|
|
|
if n.Class == PPARAM && n.Sym != nil && n.Sym.Name == ".this" {
|
2015-09-08 21:28:44 -07:00
|
|
|
// wrappers generated by genwrapper need to update
|
|
|
|
// the .this pointer in place.
|
2016-02-27 17:49:31 -08:00
|
|
|
// TODO: treat as a PPARMOUT?
|
2015-09-08 21:28:44 -07:00
|
|
|
return false
|
|
|
|
}
|
2015-08-18 10:26:28 -07:00
|
|
|
return canSSAType(n.Type)
|
|
|
|
// TODO: try to make more variables SSAable?
|
|
|
|
}
|
|
|
|
|
|
|
|
// canSSA reports whether variables of type t are SSA-able.
|
|
|
|
func canSSAType(t *Type) bool {
|
|
|
|
dowidth(t)
|
|
|
|
if t.Width > int64(4*Widthptr) {
|
2016-03-01 23:21:55 +00:00
|
|
|
// 4*Widthptr is an arbitrary constant. We want it
|
2015-08-18 10:26:28 -07:00
|
|
|
// to be at least 3*Widthptr so slices can be registerized.
|
|
|
|
// Too big and we'll introduce too much register pressure.
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
switch t.Etype {
|
|
|
|
case TARRAY:
|
2016-10-30 21:10:03 -07:00
|
|
|
// We can't do larger arrays because dynamic indexing is
|
2015-08-18 10:26:28 -07:00
|
|
|
// not supported on SSA variables.
|
2016-10-30 21:10:03 -07:00
|
|
|
// TODO: allow if all indexes are constant.
|
|
|
|
if t.NumElem() == 0 {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if t.NumElem() == 1 {
|
|
|
|
return canSSAType(t.Elem())
|
|
|
|
}
|
2015-06-27 15:45:20 +01:00
|
|
|
return false
|
2015-08-18 10:26:28 -07:00
|
|
|
case TSTRUCT:
|
2016-03-17 13:26:08 -07:00
|
|
|
if t.NumFields() > ssa.MaxStruct {
|
2015-08-18 10:26:28 -07:00
|
|
|
return false
|
|
|
|
}
|
2016-03-17 01:32:18 -07:00
|
|
|
for _, t1 := range t.Fields().Slice() {
|
2015-08-18 10:26:28 -07:00
|
|
|
if !canSSAType(t1.Type) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
2016-01-11 21:05:33 -08:00
|
|
|
return true
|
2015-08-18 10:26:28 -07:00
|
|
|
default:
|
|
|
|
return true
|
2015-06-27 15:45:20 +01:00
|
|
|
}
|
2015-06-10 15:03:06 -07:00
|
|
|
}
|
|
|
|
|
2016-04-19 21:06:53 -07:00
|
|
|
// exprPtr evaluates n to a pointer and nil-checks it.
|
2016-12-15 17:17:01 -08:00
|
|
|
func (s *state) exprPtr(n *Node, bounded bool, lineno src.XPos) *ssa.Value {
|
2016-04-19 21:06:53 -07:00
|
|
|
p := s.expr(n)
|
2017-02-27 19:56:38 +02:00
|
|
|
if bounded || n.NonNil() {
|
2017-03-16 22:42:10 -07:00
|
|
|
if s.f.Frontend().Debug_checknil() && lineno.Line() > 1 {
|
|
|
|
s.f.Warnl(lineno, "removed nil check")
|
2016-04-19 21:06:53 -07:00
|
|
|
}
|
|
|
|
return p
|
|
|
|
}
|
|
|
|
s.nilCheck(p)
|
|
|
|
return p
|
|
|
|
}
|
|
|
|
|
2015-05-18 16:44:20 -07:00
|
|
|
// nilCheck generates nil pointer checking code.
|
2015-07-24 11:55:52 -07:00
|
|
|
// Used only for automatically inserted nil checks,
|
|
|
|
// not for user code like 'x != nil'.
|
2015-05-18 16:44:20 -07:00
|
|
|
func (s *state) nilCheck(ptr *ssa.Value) {
|
2016-09-16 00:33:29 +10:00
|
|
|
if disable_checknil != 0 {
|
2015-08-11 09:47:45 -07:00
|
|
|
return
|
|
|
|
}
|
2016-09-13 17:01:01 -07:00
|
|
|
s.newValue2(ssa.OpNilCheck, ssa.TypeVoid, ptr, s.mem())
|
2015-05-18 16:44:20 -07:00
|
|
|
}
|
|
|
|
|
2016-03-01 23:21:55 +00:00
|
|
|
// boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not.
|
2015-05-18 16:44:20 -07:00
|
|
|
// Starts a new block on return.
|
2016-05-25 09:49:28 -04:00
|
|
|
// idx is already converted to full int width.
|
2015-05-18 16:44:20 -07:00
|
|
|
func (s *state) boundsCheck(idx, len *ssa.Value) {
|
2015-08-18 15:25:40 -07:00
|
|
|
if Debug['B'] != 0 {
|
|
|
|
return
|
|
|
|
}
|
2015-05-18 16:44:20 -07:00
|
|
|
|
|
|
|
// bounds check
|
2015-07-30 11:03:05 -07:00
|
|
|
cmp := s.newValue2(ssa.OpIsInBounds, Types[TBOOL], idx, len)
|
2016-09-16 00:33:29 +10:00
|
|
|
s.check(cmp, panicindex)
|
2015-08-24 23:52:03 -07:00
|
|
|
}
|
|
|
|
|
2016-03-01 23:21:55 +00:00
|
|
|
// sliceBoundsCheck generates slice bounds checking code. Checks if 0 <= idx <= len, branches to exit if not.
|
2015-08-24 23:52:03 -07:00
|
|
|
// Starts a new block on return.
|
2016-05-25 09:49:28 -04:00
|
|
|
// idx and len are already converted to full int width.
|
2015-08-24 23:52:03 -07:00
|
|
|
func (s *state) sliceBoundsCheck(idx, len *ssa.Value) {
|
|
|
|
if Debug['B'] != 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// bounds check
|
|
|
|
cmp := s.newValue2(ssa.OpIsSliceInBounds, Types[TBOOL], idx, len)
|
2015-09-17 16:54:15 -07:00
|
|
|
s.check(cmp, panicslice)
|
2015-08-24 23:52:03 -07:00
|
|
|
}
|
|
|
|
|
2016-05-25 09:49:28 -04:00
|
|
|
// If cmp (a bool) is false, panic using the given function.
|
2017-02-06 13:40:19 -08:00
|
|
|
func (s *state) check(cmp *ssa.Value, fn *obj.LSym) {
|
2015-05-18 16:44:20 -07:00
|
|
|
b := s.endBlock()
|
|
|
|
b.Kind = ssa.BlockIf
|
2016-03-15 20:45:50 -07:00
|
|
|
b.SetControl(cmp)
|
2015-08-11 17:28:56 -07:00
|
|
|
b.Likely = ssa.BranchLikely
|
2015-05-18 16:44:20 -07:00
|
|
|
bNext := s.f.NewBlock(ssa.BlockPlain)
|
2016-12-07 18:14:35 -08:00
|
|
|
line := s.peekPos()
|
2015-11-09 21:35:40 -08:00
|
|
|
bPanic := s.panics[funcLine{fn, line}]
|
|
|
|
if bPanic == nil {
|
|
|
|
bPanic = s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
s.panics[funcLine{fn, line}] = bPanic
|
|
|
|
s.startBlock(bPanic)
|
|
|
|
// The panic call takes/returns memory to ensure that the right
|
|
|
|
// memory state is observed if the panic happens.
|
|
|
|
s.rtcall(fn, false, nil)
|
|
|
|
}
|
2015-08-28 21:36:29 -05:00
|
|
|
b.AddEdgeTo(bNext)
|
|
|
|
b.AddEdgeTo(bPanic)
|
2015-05-18 16:44:20 -07:00
|
|
|
s.startBlock(bNext)
|
|
|
|
}
|
|
|
|
|
2016-09-04 16:59:46 -07:00
|
|
|
func (s *state) intDivide(n *Node, a, b *ssa.Value) *ssa.Value {
|
|
|
|
needcheck := true
|
|
|
|
switch b.Op {
|
|
|
|
case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64:
|
|
|
|
if b.AuxInt != 0 {
|
|
|
|
needcheck = false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if needcheck {
|
|
|
|
// do a size-appropriate check for zero
|
|
|
|
cmp := s.newValue2(s.ssaOp(ONE, n.Type), Types[TBOOL], b, s.zeroVal(n.Type))
|
|
|
|
s.check(cmp, panicdivide)
|
|
|
|
}
|
|
|
|
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
|
|
|
|
}
|
|
|
|
|
2015-09-18 15:11:30 -07:00
|
|
|
// rtcall issues a call to the given runtime function fn with the listed args.
|
|
|
|
// Returns a slice of results of the given result types.
|
|
|
|
// The call is added to the end of the current block.
|
|
|
|
// If returns is false, the block is marked as an exit block.
|
2017-02-06 13:40:19 -08:00
|
|
|
func (s *state) rtcall(fn *obj.LSym, returns bool, results []*Type, args ...*ssa.Value) []*ssa.Value {
|
2015-09-18 15:11:30 -07:00
|
|
|
// Write args to the stack
|
2016-05-15 00:12:56 -04:00
|
|
|
off := Ctxt.FixedFrameSize()
|
2015-09-18 15:11:30 -07:00
|
|
|
for _, arg := range args {
|
|
|
|
t := arg.Type
|
|
|
|
off = Rnd(off, t.Alignment())
|
2017-03-08 12:50:00 -08:00
|
|
|
ptr := s.constOffPtrSP(t.PtrTo(), off)
|
2015-09-18 15:11:30 -07:00
|
|
|
size := t.Size()
|
2017-03-13 21:51:08 -04:00
|
|
|
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, t, ptr, arg, s.mem())
|
2015-09-18 15:11:30 -07:00
|
|
|
off += size
|
|
|
|
}
|
|
|
|
off = Rnd(off, int64(Widthptr))
|
2017-03-17 13:35:36 -07:00
|
|
|
if thearch.LinkArch.Name == "amd64p32" {
|
2016-08-08 11:26:25 -07:00
|
|
|
// amd64p32 wants 8-byte alignment of the start of the return values.
|
|
|
|
off = Rnd(off, 8)
|
|
|
|
}
|
2015-09-18 15:11:30 -07:00
|
|
|
|
|
|
|
// Issue call
|
2017-02-06 13:40:19 -08:00
|
|
|
call := s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, fn, s.mem())
|
2015-09-18 15:11:30 -07:00
|
|
|
s.vars[&memVar] = call
|
|
|
|
|
|
|
|
if !returns {
|
2016-09-09 13:11:07 -07:00
|
|
|
// Finish block
|
|
|
|
b := s.endBlock()
|
2015-09-18 15:11:30 -07:00
|
|
|
b.Kind = ssa.BlockExit
|
2016-03-15 20:45:50 -07:00
|
|
|
b.SetControl(call)
|
[dev.ssa] cmd/compile: fix argument size of runtime call in SSA for ARM
The argument size for runtime call was incorrectly includes the size
of LR (FixedFrameSize in general). This makes the stack frame
sometimes unnecessarily 4 bytes larger on ARM.
For example,
func f(b []byte) byte { return b[0] }
compiles to
0x0000 00000 (h.go:6) TEXT "".f(SB), $4-16 // <-- framesize = 4
0x0000 00000 (h.go:6) MOVW 8(g), R1
0x0004 00004 (h.go:6) CMP R1, R13
0x0008 00008 (h.go:6) BLS 52
0x000c 00012 (h.go:6) MOVW.W R14, -8(R13)
0x0010 00016 (h.go:6) FUNCDATA $0, gclocals·8355ad952265fec823c17fcf739bd009(SB)
0x0010 00016 (h.go:6) FUNCDATA $1, gclocals·69c1753bd5f81501d95132d08af04464(SB)
0x0010 00016 (h.go:6) MOVW "".b+4(FP), R0
0x0014 00020 (h.go:6) CMP $0, R0
0x0018 00024 (h.go:6) BLS 44
0x001c 00028 (h.go:6) MOVW "".b(FP), R0
0x0020 00032 (h.go:6) MOVBU (R0), R0
0x0024 00036 (h.go:6) MOVB R0, "".~r1+12(FP)
0x0028 00040 (h.go:6) MOVW.P 8(R13), R15
0x002c 00044 (h.go:6) PCDATA $0, $1
0x002c 00044 (h.go:6) CALL runtime.panicindex(SB)
0x0030 00048 (h.go:6) UNDEF
0x0034 00052 (h.go:6) NOP
0x0034 00052 (h.go:6) MOVW R14, R3
0x0038 00056 (h.go:6) CALL runtime.morestack_noctxt(SB)
0x003c 00060 (h.go:6) JMP 0
Note that the frame size is 4, but there is actually no local. It
incorrectly thinks call to runtime.panicindex needs 4 bytes space
for argument.
This CL fixes it.
Updates #15365.
Change-Id: Ic65d55283a6aa8a7861d7a3fbc7b63c35785eeec
Reviewed-on: https://go-review.googlesource.com/24909
Run-TryBot: Cherry Zhang <cherryyz@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2016-07-13 09:22:48 -06:00
|
|
|
call.AuxInt = off - Ctxt.FixedFrameSize()
|
2015-09-18 15:11:30 -07:00
|
|
|
if len(results) > 0 {
|
|
|
|
Fatalf("panic call can't have results")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Load results
|
|
|
|
res := make([]*ssa.Value, len(results))
|
|
|
|
for i, t := range results {
|
|
|
|
off = Rnd(off, t.Alignment())
|
2017-03-19 09:51:22 +01:00
|
|
|
ptr := s.constOffPtrSP(typPtr(t), off)
|
2015-09-18 15:11:30 -07:00
|
|
|
res[i] = s.newValue2(ssa.OpLoad, t, ptr, s.mem())
|
|
|
|
off += t.Size()
|
|
|
|
}
|
|
|
|
off = Rnd(off, int64(Widthptr))
|
|
|
|
|
|
|
|
// Remember how much callee stack space we needed.
|
|
|
|
call.AuxInt = off
|
|
|
|
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
2017-02-10 10:15:10 -05:00
|
|
|
// do *left = right for type t.
|
|
|
|
func (s *state) storeType(t *Type, left, right *ssa.Value, skip skipMask) {
|
2017-03-17 11:53:24 -04:00
|
|
|
if skip == 0 && (!haspointers(t) || ssa.IsStackAddr(left)) {
|
|
|
|
// Known to not have write barrier. Store the whole type.
|
|
|
|
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, t, left, right, s.mem())
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-02-10 10:15:10 -05:00
|
|
|
// store scalar fields first, so write barrier stores for
|
|
|
|
// pointer fields can be grouped together, and scalar values
|
|
|
|
// don't need to be live across the write barrier call.
|
2017-03-17 11:53:24 -04:00
|
|
|
// TODO: if the writebarrier pass knows how to reorder stores,
|
|
|
|
// we can do a single store here as long as skip==0.
|
2016-03-21 10:22:03 -07:00
|
|
|
s.storeTypeScalars(t, left, right, skip)
|
2017-02-10 10:15:10 -05:00
|
|
|
if skip&skipPtr == 0 && haspointers(t) {
|
|
|
|
s.storeTypePtrs(t, left, right)
|
|
|
|
}
|
2016-01-29 21:57:57 -08:00
|
|
|
}
|
2016-01-25 17:06:54 -08:00
|
|
|
|
2016-01-29 21:57:57 -08:00
|
|
|
// do *left = right for all scalar (non-pointer) parts of t.
|
2016-03-21 10:22:03 -07:00
|
|
|
func (s *state) storeTypeScalars(t *Type, left, right *ssa.Value, skip skipMask) {
|
2016-01-25 17:06:54 -08:00
|
|
|
switch {
|
2016-01-29 21:57:57 -08:00
|
|
|
case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex():
|
2017-03-13 21:51:08 -04:00
|
|
|
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, t, left, right, s.mem())
|
2016-03-28 10:55:44 -07:00
|
|
|
case t.IsPtrShaped():
|
2016-01-25 17:06:54 -08:00
|
|
|
// no scalar fields.
|
|
|
|
case t.IsString():
|
2016-03-21 10:22:03 -07:00
|
|
|
if skip&skipLen != 0 {
|
|
|
|
return
|
|
|
|
}
|
2016-01-25 17:06:54 -08:00
|
|
|
len := s.newValue1(ssa.OpStringLen, Types[TINT], right)
|
2017-03-18 10:16:03 -07:00
|
|
|
lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.IntSize, left)
|
2017-03-13 21:51:08 -04:00
|
|
|
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, Types[TINT], lenAddr, len, s.mem())
|
2016-01-25 17:06:54 -08:00
|
|
|
case t.IsSlice():
|
2016-03-21 10:22:03 -07:00
|
|
|
if skip&skipLen == 0 {
|
|
|
|
len := s.newValue1(ssa.OpSliceLen, Types[TINT], right)
|
2017-03-18 10:16:03 -07:00
|
|
|
lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.IntSize, left)
|
2017-03-13 21:51:08 -04:00
|
|
|
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, Types[TINT], lenAddr, len, s.mem())
|
2016-03-21 10:22:03 -07:00
|
|
|
}
|
|
|
|
if skip&skipCap == 0 {
|
|
|
|
cap := s.newValue1(ssa.OpSliceCap, Types[TINT], right)
|
2017-03-18 10:16:03 -07:00
|
|
|
capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.IntSize, left)
|
2017-03-13 21:51:08 -04:00
|
|
|
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, Types[TINT], capAddr, cap, s.mem())
|
2016-03-21 10:22:03 -07:00
|
|
|
}
|
2016-01-25 17:06:54 -08:00
|
|
|
case t.IsInterface():
|
|
|
|
// itab field doesn't need a write barrier (even though it is a pointer).
|
2017-03-18 10:16:03 -07:00
|
|
|
itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right)
|
2017-03-13 21:51:08 -04:00
|
|
|
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, Types[TUINTPTR], left, itab, s.mem())
|
2016-01-29 21:57:57 -08:00
|
|
|
case t.IsStruct():
|
|
|
|
n := t.NumFields()
|
2016-03-14 12:45:18 -07:00
|
|
|
for i := 0; i < n; i++ {
|
2016-01-29 21:57:57 -08:00
|
|
|
ft := t.FieldType(i)
|
|
|
|
addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
|
2016-03-14 12:45:18 -07:00
|
|
|
val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
|
2016-03-21 10:22:03 -07:00
|
|
|
s.storeTypeScalars(ft.(*Type), addr, val, 0)
|
2016-01-29 21:57:57 -08:00
|
|
|
}
|
2016-10-30 21:10:03 -07:00
|
|
|
case t.IsArray() && t.NumElem() == 0:
|
|
|
|
// nothing
|
|
|
|
case t.IsArray() && t.NumElem() == 1:
|
|
|
|
s.storeTypeScalars(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right), 0)
|
2016-01-25 17:06:54 -08:00
|
|
|
default:
|
2016-09-09 21:08:46 -07:00
|
|
|
s.Fatalf("bad write barrier type %v", t)
|
2016-01-25 17:06:54 -08:00
|
|
|
}
|
2016-01-29 21:57:57 -08:00
|
|
|
}
|
2016-01-25 17:06:54 -08:00
|
|
|
|
2016-01-29 21:57:57 -08:00
|
|
|
// do *left = right for all pointer parts of t.
|
|
|
|
func (s *state) storeTypePtrs(t *Type, left, right *ssa.Value) {
|
2016-01-25 17:06:54 -08:00
|
|
|
switch {
|
2016-03-28 10:55:44 -07:00
|
|
|
case t.IsPtrShaped():
|
2017-03-13 21:51:08 -04:00
|
|
|
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, t, left, right, s.mem())
|
2016-01-25 17:06:54 -08:00
|
|
|
case t.IsString():
|
2017-03-18 10:16:03 -07:00
|
|
|
ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right)
|
|
|
|
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, s.f.Config.Types.BytePtr, left, ptr, s.mem())
|
2016-01-25 17:06:54 -08:00
|
|
|
case t.IsSlice():
|
2017-03-18 10:16:03 -07:00
|
|
|
ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, right)
|
|
|
|
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, s.f.Config.Types.BytePtr, left, ptr, s.mem())
|
2016-01-25 17:06:54 -08:00
|
|
|
case t.IsInterface():
|
2016-01-29 21:57:57 -08:00
|
|
|
// itab field is treated as a scalar.
|
2017-03-18 10:16:03 -07:00
|
|
|
idata := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, right)
|
|
|
|
idataAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.BytePtrPtr, s.config.PtrSize, left)
|
|
|
|
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, s.f.Config.Types.BytePtr, idataAddr, idata, s.mem())
|
2016-01-29 21:57:57 -08:00
|
|
|
case t.IsStruct():
|
|
|
|
n := t.NumFields()
|
2016-03-14 12:45:18 -07:00
|
|
|
for i := 0; i < n; i++ {
|
2016-01-29 21:57:57 -08:00
|
|
|
ft := t.FieldType(i)
|
|
|
|
if !haspointers(ft.(*Type)) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
|
2016-03-14 12:45:18 -07:00
|
|
|
val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
|
2016-01-29 21:57:57 -08:00
|
|
|
s.storeTypePtrs(ft.(*Type), addr, val)
|
|
|
|
}
|
2016-10-30 21:10:03 -07:00
|
|
|
case t.IsArray() && t.NumElem() == 0:
|
|
|
|
// nothing
|
|
|
|
case t.IsArray() && t.NumElem() == 1:
|
|
|
|
s.storeTypePtrs(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right))
|
2016-01-25 17:06:54 -08:00
|
|
|
default:
|
2016-09-09 21:08:46 -07:00
|
|
|
s.Fatalf("bad write barrier type %v", t)
|
2016-01-25 17:06:54 -08:00
|
|
|
}
|
2016-01-29 21:57:57 -08:00
|
|
|
}
|
2016-01-25 17:06:54 -08:00
|
|
|
|
2015-09-12 23:27:26 -07:00
|
|
|
// slice computes the slice v[i:j:k] and returns ptr, len, and cap of result.
|
|
|
|
// i,j,k may be nil, in which case they are set to their default value.
|
|
|
|
// t is a slice, ptr to array, or string type.
|
|
|
|
func (s *state) slice(t *Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value) {
|
|
|
|
var elemtype *Type
|
|
|
|
var ptrtype *Type
|
|
|
|
var ptr *ssa.Value
|
|
|
|
var len *ssa.Value
|
|
|
|
var cap *ssa.Value
|
|
|
|
zero := s.constInt(Types[TINT], 0)
|
|
|
|
switch {
|
|
|
|
case t.IsSlice():
|
2016-03-30 10:57:47 -07:00
|
|
|
elemtype = t.Elem()
|
2017-03-19 09:51:22 +01:00
|
|
|
ptrtype = typPtr(elemtype)
|
2015-09-12 23:27:26 -07:00
|
|
|
ptr = s.newValue1(ssa.OpSlicePtr, ptrtype, v)
|
|
|
|
len = s.newValue1(ssa.OpSliceLen, Types[TINT], v)
|
|
|
|
cap = s.newValue1(ssa.OpSliceCap, Types[TINT], v)
|
|
|
|
case t.IsString():
|
|
|
|
elemtype = Types[TUINT8]
|
2017-03-19 09:51:22 +01:00
|
|
|
ptrtype = typPtr(elemtype)
|
2015-09-12 23:27:26 -07:00
|
|
|
ptr = s.newValue1(ssa.OpStringPtr, ptrtype, v)
|
|
|
|
len = s.newValue1(ssa.OpStringLen, Types[TINT], v)
|
|
|
|
cap = len
|
|
|
|
case t.IsPtr():
|
2016-03-30 10:57:47 -07:00
|
|
|
if !t.Elem().IsArray() {
|
2015-09-12 23:27:26 -07:00
|
|
|
s.Fatalf("bad ptr to array in slice %v\n", t)
|
|
|
|
}
|
2016-03-30 10:57:47 -07:00
|
|
|
elemtype = t.Elem().Elem()
|
2017-03-19 09:51:22 +01:00
|
|
|
ptrtype = typPtr(elemtype)
|
2015-09-12 23:27:26 -07:00
|
|
|
s.nilCheck(v)
|
|
|
|
ptr = v
|
2016-03-31 14:46:04 -07:00
|
|
|
len = s.constInt(Types[TINT], t.Elem().NumElem())
|
2015-09-12 23:27:26 -07:00
|
|
|
cap = len
|
|
|
|
default:
|
|
|
|
s.Fatalf("bad type in slice %v\n", t)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set default values
|
|
|
|
if i == nil {
|
|
|
|
i = zero
|
|
|
|
}
|
|
|
|
if j == nil {
|
|
|
|
j = len
|
|
|
|
}
|
|
|
|
if k == nil {
|
|
|
|
k = cap
|
|
|
|
}
|
|
|
|
|
|
|
|
// Panic if slice indices are not in bounds.
|
|
|
|
s.sliceBoundsCheck(i, j)
|
|
|
|
if j != k {
|
|
|
|
s.sliceBoundsCheck(j, k)
|
|
|
|
}
|
|
|
|
if k != cap {
|
|
|
|
s.sliceBoundsCheck(k, cap)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate the following code assuming that indexes are in bounds.
|
2016-10-25 15:49:52 -07:00
|
|
|
// The masking is to make sure that we don't generate a slice
|
2015-09-12 23:27:26 -07:00
|
|
|
// that points to the next object in memory.
|
2016-10-25 15:49:52 -07:00
|
|
|
// rlen = j - i
|
|
|
|
// rcap = k - i
|
|
|
|
// delta = i * elemsize
|
|
|
|
// rptr = p + delta&mask(rcap)
|
cmd/compile: change the way SSA does slice zero-cap detection
There is a special case for slicing s[i:j] when the resulting
slice has zero capacity, to prevent pointing to the next object
in memory.
Change this special case code from:
rptr := rcap == 0 ? ptr : ptr+i*elemsize
to
rptr := ptr + (rcap == 0 ? 0 : i) * elemsize
This change leads to slightly smaller generated code, replacing
a load with a register zero.
old:
0x002e 00046 (slice.go:8) CMPQ BX, $0
0x0032 00050 (slice.go:8) JEQ $0, 78
0x0034 00052 (slice.go:8) MOVQ "".a+8(FP), BP
0x0039 00057 (slice.go:8) LEAQ (BP)(CX*8), AX
0x003e 00062 ... rest of function ...
0x004e 00078 (slice.go:7) MOVQ "".a+8(FP), AX
0x0053 00083 (slice.go:8) JMP 62
new:
0x002e 00046 (slice.go:8) CMPQ BX, $0
0x0032 00050 (slice.go:8) JEQ $0, 78
0x0034 00052 (slice.go:8) MOVQ "".a+8(FP), BP
0x0039 00057 (slice.go:8) LEAQ (BP)(CX*8), AX
0x003e 00062 ... rest of function...
0x004e 00078 (slice.go:8) MOVQ $0, CX
0x0050 00080 (slice.go:8) JMP 52
Change-Id: I2a396616b0d7b090c226a47c92a7ba14b128401f
Reviewed-on: https://go-review.googlesource.com/20994
Reviewed-by: David Chase <drchase@google.com>
2016-03-21 15:24:08 -07:00
|
|
|
// result = (SliceMake rptr rlen rcap)
|
2016-10-25 15:49:52 -07:00
|
|
|
// where mask(x) is 0 if x==0 and -1 if x>0.
|
2015-11-02 21:28:13 -08:00
|
|
|
subOp := s.ssaOp(OSUB, Types[TINT])
|
|
|
|
mulOp := s.ssaOp(OMUL, Types[TINT])
|
2016-10-25 15:49:52 -07:00
|
|
|
andOp := s.ssaOp(OAND, Types[TINT])
|
2015-11-02 21:28:13 -08:00
|
|
|
rlen := s.newValue2(subOp, Types[TINT], j, i)
|
2015-09-12 23:27:26 -07:00
|
|
|
var rcap *ssa.Value
|
|
|
|
switch {
|
|
|
|
case t.IsString():
|
2016-03-01 23:21:55 +00:00
|
|
|
// Capacity of the result is unimportant. However, we use
|
2015-09-12 23:27:26 -07:00
|
|
|
// rcap to test if we've generated a zero-length slice.
|
|
|
|
// Use length of strings for that.
|
|
|
|
rcap = rlen
|
|
|
|
case j == k:
|
|
|
|
rcap = rlen
|
|
|
|
default:
|
2015-11-02 21:28:13 -08:00
|
|
|
rcap = s.newValue2(subOp, Types[TINT], k, i)
|
2015-09-12 23:27:26 -07:00
|
|
|
}
|
|
|
|
|
2016-10-25 15:49:52 -07:00
|
|
|
var rptr *ssa.Value
|
|
|
|
if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 {
|
|
|
|
// No pointer arithmetic necessary.
|
|
|
|
rptr = ptr
|
|
|
|
} else {
|
|
|
|
// delta = # of bytes to offset pointer by.
|
|
|
|
delta := s.newValue2(mulOp, Types[TINT], i, s.constInt(Types[TINT], elemtype.Width))
|
|
|
|
// If we're slicing to the point where the capacity is zero,
|
|
|
|
// zero out the delta.
|
|
|
|
mask := s.newValue1(ssa.OpSlicemask, Types[TINT], rcap)
|
|
|
|
delta = s.newValue2(andOp, Types[TINT], delta, mask)
|
|
|
|
// Compute rptr = ptr + delta
|
|
|
|
rptr = s.newValue2(ssa.OpAddPtr, ptrtype, ptr, delta)
|
cmd/compile: change the way SSA does slice zero-cap detection
There is a special case for slicing s[i:j] when the resulting
slice has zero capacity, to prevent pointing to the next object
in memory.
Change this special case code from:
rptr := rcap == 0 ? ptr : ptr+i*elemsize
to
rptr := ptr + (rcap == 0 ? 0 : i) * elemsize
This change leads to slightly smaller generated code, replacing
a load with a register zero.
old:
0x002e 00046 (slice.go:8) CMPQ BX, $0
0x0032 00050 (slice.go:8) JEQ $0, 78
0x0034 00052 (slice.go:8) MOVQ "".a+8(FP), BP
0x0039 00057 (slice.go:8) LEAQ (BP)(CX*8), AX
0x003e 00062 ... rest of function ...
0x004e 00078 (slice.go:7) MOVQ "".a+8(FP), AX
0x0053 00083 (slice.go:8) JMP 62
new:
0x002e 00046 (slice.go:8) CMPQ BX, $0
0x0032 00050 (slice.go:8) JEQ $0, 78
0x0034 00052 (slice.go:8) MOVQ "".a+8(FP), BP
0x0039 00057 (slice.go:8) LEAQ (BP)(CX*8), AX
0x003e 00062 ... rest of function...
0x004e 00078 (slice.go:8) MOVQ $0, CX
0x0050 00080 (slice.go:8) JMP 52
Change-Id: I2a396616b0d7b090c226a47c92a7ba14b128401f
Reviewed-on: https://go-review.googlesource.com/20994
Reviewed-by: David Chase <drchase@google.com>
2016-03-21 15:24:08 -07:00
|
|
|
}
|
|
|
|
|
2015-09-12 23:27:26 -07:00
|
|
|
return rptr, rlen, rcap
|
|
|
|
}
|
|
|
|
|
2016-10-18 23:50:40 +02:00
|
|
|
type u642fcvtTab struct {
|
2015-08-20 15:14:20 -04:00
|
|
|
geq, cvt2F, and, rsh, or, add ssa.Op
|
|
|
|
one func(*state, ssa.Type, int64) *ssa.Value
|
|
|
|
}
|
|
|
|
|
2016-10-18 23:50:40 +02:00
|
|
|
var u64_f64 u642fcvtTab = u642fcvtTab{
|
2015-08-20 15:14:20 -04:00
|
|
|
geq: ssa.OpGeq64,
|
|
|
|
cvt2F: ssa.OpCvt64to64F,
|
|
|
|
and: ssa.OpAnd64,
|
|
|
|
rsh: ssa.OpRsh64Ux64,
|
|
|
|
or: ssa.OpOr64,
|
|
|
|
add: ssa.OpAdd64F,
|
|
|
|
one: (*state).constInt64,
|
|
|
|
}
|
|
|
|
|
2016-10-18 23:50:40 +02:00
|
|
|
var u64_f32 u642fcvtTab = u642fcvtTab{
|
2015-08-20 15:14:20 -04:00
|
|
|
geq: ssa.OpGeq64,
|
|
|
|
cvt2F: ssa.OpCvt64to32F,
|
|
|
|
and: ssa.OpAnd64,
|
|
|
|
rsh: ssa.OpRsh64Ux64,
|
|
|
|
or: ssa.OpOr64,
|
|
|
|
add: ssa.OpAdd32F,
|
|
|
|
one: (*state).constInt64,
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
|
2016-10-18 23:50:40 +02:00
|
|
|
return s.uint64Tofloat(&u64_f64, n, x, ft, tt)
|
2015-08-20 15:14:20 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
|
2016-10-18 23:50:40 +02:00
|
|
|
return s.uint64Tofloat(&u64_f32, n, x, ft, tt)
|
2015-08-20 15:14:20 -04:00
|
|
|
}
|
|
|
|
|
2016-10-18 23:50:40 +02:00
|
|
|
func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
|
2015-08-20 15:14:20 -04:00
|
|
|
// if x >= 0 {
|
|
|
|
// result = (floatY) x
|
|
|
|
// } else {
|
|
|
|
// y = uintX(x) ; y = x & 1
|
|
|
|
// z = uintX(x) ; z = z >> 1
|
|
|
|
// z = z >> 1
|
|
|
|
// z = z | y
|
2015-08-26 14:25:40 -04:00
|
|
|
// result = floatY(z)
|
|
|
|
// result = result + result
|
2015-08-20 15:14:20 -04:00
|
|
|
// }
|
|
|
|
//
|
|
|
|
// Code borrowed from old code generator.
|
|
|
|
// What's going on: large 64-bit "unsigned" looks like
|
|
|
|
// negative number to hardware's integer-to-float
|
2016-03-01 23:21:55 +00:00
|
|
|
// conversion. However, because the mantissa is only
|
2015-08-20 15:14:20 -04:00
|
|
|
// 63 bits, we don't need the LSB, so instead we do an
|
|
|
|
// unsigned right shift (divide by two), convert, and
|
2016-03-01 23:21:55 +00:00
|
|
|
// double. However, before we do that, we need to be
|
2015-08-20 15:14:20 -04:00
|
|
|
// sure that we do not lose a "1" if that made the
|
2016-03-01 23:21:55 +00:00
|
|
|
// difference in the resulting rounding. Therefore, we
|
|
|
|
// preserve it, and OR (not ADD) it back in. The case
|
2015-08-20 15:14:20 -04:00
|
|
|
// that matters is when the eleven discarded bits are
|
|
|
|
// equal to 10000000001; that rounds up, and the 1 cannot
|
|
|
|
// be lost else it would round down if the LSB of the
|
|
|
|
// candidate mantissa is 0.
|
|
|
|
cmp := s.newValue2(cvttab.geq, Types[TBOOL], x, s.zeroVal(ft))
|
|
|
|
b := s.endBlock()
|
|
|
|
b.Kind = ssa.BlockIf
|
2016-03-15 20:45:50 -07:00
|
|
|
b.SetControl(cmp)
|
2015-08-20 15:14:20 -04:00
|
|
|
b.Likely = ssa.BranchLikely
|
|
|
|
|
|
|
|
bThen := s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
bElse := s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
bAfter := s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
|
2015-08-28 21:36:29 -05:00
|
|
|
b.AddEdgeTo(bThen)
|
2015-08-20 15:14:20 -04:00
|
|
|
s.startBlock(bThen)
|
|
|
|
a0 := s.newValue1(cvttab.cvt2F, tt, x)
|
|
|
|
s.vars[n] = a0
|
|
|
|
s.endBlock()
|
2015-08-28 21:36:29 -05:00
|
|
|
bThen.AddEdgeTo(bAfter)
|
2015-08-20 15:14:20 -04:00
|
|
|
|
2015-08-28 21:36:29 -05:00
|
|
|
b.AddEdgeTo(bElse)
|
2015-08-20 15:14:20 -04:00
|
|
|
s.startBlock(bElse)
|
|
|
|
one := cvttab.one(s, ft, 1)
|
|
|
|
y := s.newValue2(cvttab.and, ft, x, one)
|
|
|
|
z := s.newValue2(cvttab.rsh, ft, x, one)
|
|
|
|
z = s.newValue2(cvttab.or, ft, z, y)
|
|
|
|
a := s.newValue1(cvttab.cvt2F, tt, z)
|
|
|
|
a1 := s.newValue2(cvttab.add, tt, a, a)
|
|
|
|
s.vars[n] = a1
|
|
|
|
s.endBlock()
|
2015-08-28 21:36:29 -05:00
|
|
|
bElse.AddEdgeTo(bAfter)
|
2015-08-20 15:14:20 -04:00
|
|
|
|
|
|
|
s.startBlock(bAfter)
|
|
|
|
return s.variable(n, n.Type)
|
|
|
|
}
|
|
|
|
|
2016-10-18 23:50:40 +02:00
|
|
|
type u322fcvtTab struct {
|
|
|
|
cvtI2F, cvtF2F ssa.Op
|
|
|
|
}
|
|
|
|
|
|
|
|
var u32_f64 u322fcvtTab = u322fcvtTab{
|
|
|
|
cvtI2F: ssa.OpCvt32to64F,
|
|
|
|
cvtF2F: ssa.OpCopy,
|
|
|
|
}
|
|
|
|
|
|
|
|
var u32_f32 u322fcvtTab = u322fcvtTab{
|
|
|
|
cvtI2F: ssa.OpCvt32to32F,
|
|
|
|
cvtF2F: ssa.OpCvt64Fto32F,
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *state) uint32Tofloat64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
|
|
|
|
return s.uint32Tofloat(&u32_f64, n, x, ft, tt)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *state) uint32Tofloat32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
|
|
|
|
return s.uint32Tofloat(&u32_f32, n, x, ft, tt)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
|
|
|
|
// if x >= 0 {
|
|
|
|
// result = floatY(x)
|
|
|
|
// } else {
|
|
|
|
// result = floatY(float64(x) + (1<<32))
|
|
|
|
// }
|
|
|
|
cmp := s.newValue2(ssa.OpGeq32, Types[TBOOL], x, s.zeroVal(ft))
|
|
|
|
b := s.endBlock()
|
|
|
|
b.Kind = ssa.BlockIf
|
|
|
|
b.SetControl(cmp)
|
|
|
|
b.Likely = ssa.BranchLikely
|
|
|
|
|
|
|
|
bThen := s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
bElse := s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
bAfter := s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
|
|
|
|
b.AddEdgeTo(bThen)
|
|
|
|
s.startBlock(bThen)
|
|
|
|
a0 := s.newValue1(cvttab.cvtI2F, tt, x)
|
|
|
|
s.vars[n] = a0
|
|
|
|
s.endBlock()
|
|
|
|
bThen.AddEdgeTo(bAfter)
|
|
|
|
|
|
|
|
b.AddEdgeTo(bElse)
|
|
|
|
s.startBlock(bElse)
|
|
|
|
a1 := s.newValue1(ssa.OpCvt32to64F, Types[TFLOAT64], x)
|
|
|
|
twoToThe32 := s.constFloat64(Types[TFLOAT64], float64(1<<32))
|
|
|
|
a2 := s.newValue2(ssa.OpAdd64F, Types[TFLOAT64], a1, twoToThe32)
|
|
|
|
a3 := s.newValue1(cvttab.cvtF2F, tt, a2)
|
|
|
|
|
|
|
|
s.vars[n] = a3
|
|
|
|
s.endBlock()
|
|
|
|
bElse.AddEdgeTo(bAfter)
|
|
|
|
|
|
|
|
s.startBlock(bAfter)
|
|
|
|
return s.variable(n, n.Type)
|
|
|
|
}
|
|
|
|
|
2015-08-28 15:56:43 -05:00
|
|
|
// referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
|
|
|
|
func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value {
|
|
|
|
if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() {
|
|
|
|
s.Fatalf("node must be a map or a channel")
|
|
|
|
}
|
2015-08-26 18:40:52 -05:00
|
|
|
// if n == nil {
|
|
|
|
// return 0
|
|
|
|
// } else {
|
2015-08-28 15:56:43 -05:00
|
|
|
// // len
|
2015-08-26 18:40:52 -05:00
|
|
|
// return *((*int)n)
|
2015-08-28 15:56:43 -05:00
|
|
|
// // cap
|
|
|
|
// return *(((*int)n)+1)
|
2015-08-26 18:40:52 -05:00
|
|
|
// }
|
|
|
|
lenType := n.Type
|
2016-03-06 18:06:09 -08:00
|
|
|
nilValue := s.constNil(Types[TUINTPTR])
|
2015-08-28 15:20:54 -05:00
|
|
|
cmp := s.newValue2(ssa.OpEqPtr, Types[TBOOL], x, nilValue)
|
2015-08-26 18:40:52 -05:00
|
|
|
b := s.endBlock()
|
|
|
|
b.Kind = ssa.BlockIf
|
2016-03-15 20:45:50 -07:00
|
|
|
b.SetControl(cmp)
|
2015-08-26 18:40:52 -05:00
|
|
|
b.Likely = ssa.BranchUnlikely
|
|
|
|
|
|
|
|
bThen := s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
bElse := s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
bAfter := s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
|
2015-08-28 15:56:43 -05:00
|
|
|
// length/capacity of a nil map/chan is zero
|
2015-08-28 21:36:29 -05:00
|
|
|
b.AddEdgeTo(bThen)
|
2015-08-26 18:40:52 -05:00
|
|
|
s.startBlock(bThen)
|
|
|
|
s.vars[n] = s.zeroVal(lenType)
|
|
|
|
s.endBlock()
|
2015-08-28 21:36:29 -05:00
|
|
|
bThen.AddEdgeTo(bAfter)
|
2015-08-26 18:40:52 -05:00
|
|
|
|
2015-08-28 21:36:29 -05:00
|
|
|
b.AddEdgeTo(bElse)
|
2015-08-26 18:40:52 -05:00
|
|
|
s.startBlock(bElse)
|
2015-08-28 15:56:43 -05:00
|
|
|
if n.Op == OLEN {
|
|
|
|
// length is stored in the first word for map/chan
|
|
|
|
s.vars[n] = s.newValue2(ssa.OpLoad, lenType, x, s.mem())
|
|
|
|
} else if n.Op == OCAP {
|
|
|
|
// capacity is stored in the second word for chan
|
|
|
|
sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x)
|
|
|
|
s.vars[n] = s.newValue2(ssa.OpLoad, lenType, sw, s.mem())
|
|
|
|
} else {
|
|
|
|
s.Fatalf("op must be OLEN or OCAP")
|
|
|
|
}
|
2015-08-26 18:40:52 -05:00
|
|
|
s.endBlock()
|
2015-08-28 21:36:29 -05:00
|
|
|
bElse.AddEdgeTo(bAfter)
|
2015-08-26 18:40:52 -05:00
|
|
|
|
|
|
|
s.startBlock(bAfter)
|
|
|
|
return s.variable(n, lenType)
|
|
|
|
}
|
|
|
|
|
2015-08-26 14:25:40 -04:00
|
|
|
type f2uCvtTab struct {
|
2016-10-18 23:50:40 +02:00
|
|
|
ltf, cvt2U, subf, or ssa.Op
|
|
|
|
floatValue func(*state, ssa.Type, float64) *ssa.Value
|
|
|
|
intValue func(*state, ssa.Type, int64) *ssa.Value
|
|
|
|
cutoff uint64
|
2015-08-26 14:25:40 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
var f32_u64 f2uCvtTab = f2uCvtTab{
|
2016-10-18 23:50:40 +02:00
|
|
|
ltf: ssa.OpLess32F,
|
|
|
|
cvt2U: ssa.OpCvt32Fto64,
|
|
|
|
subf: ssa.OpSub32F,
|
|
|
|
or: ssa.OpOr64,
|
|
|
|
floatValue: (*state).constFloat32,
|
|
|
|
intValue: (*state).constInt64,
|
|
|
|
cutoff: 9223372036854775808,
|
2015-08-26 14:25:40 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
var f64_u64 f2uCvtTab = f2uCvtTab{
|
2016-10-18 23:50:40 +02:00
|
|
|
ltf: ssa.OpLess64F,
|
|
|
|
cvt2U: ssa.OpCvt64Fto64,
|
|
|
|
subf: ssa.OpSub64F,
|
|
|
|
or: ssa.OpOr64,
|
|
|
|
floatValue: (*state).constFloat64,
|
|
|
|
intValue: (*state).constInt64,
|
|
|
|
cutoff: 9223372036854775808,
|
|
|
|
}
|
|
|
|
|
|
|
|
var f32_u32 f2uCvtTab = f2uCvtTab{
|
|
|
|
ltf: ssa.OpLess32F,
|
|
|
|
cvt2U: ssa.OpCvt32Fto32,
|
|
|
|
subf: ssa.OpSub32F,
|
|
|
|
or: ssa.OpOr32,
|
|
|
|
floatValue: (*state).constFloat32,
|
|
|
|
intValue: func(s *state, t ssa.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
|
|
|
|
cutoff: 2147483648,
|
|
|
|
}
|
|
|
|
|
|
|
|
var f64_u32 f2uCvtTab = f2uCvtTab{
|
|
|
|
ltf: ssa.OpLess64F,
|
|
|
|
cvt2U: ssa.OpCvt64Fto32,
|
|
|
|
subf: ssa.OpSub64F,
|
|
|
|
or: ssa.OpOr32,
|
|
|
|
floatValue: (*state).constFloat64,
|
|
|
|
intValue: func(s *state, t ssa.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
|
|
|
|
cutoff: 2147483648,
|
2015-08-26 14:25:40 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
|
|
|
|
return s.floatToUint(&f32_u64, n, x, ft, tt)
|
|
|
|
}
|
|
|
|
func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
|
|
|
|
return s.floatToUint(&f64_u64, n, x, ft, tt)
|
|
|
|
}
|
|
|
|
|
2016-10-18 23:50:40 +02:00
|
|
|
func (s *state) float32ToUint32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
|
|
|
|
return s.floatToUint(&f32_u32, n, x, ft, tt)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *state) float64ToUint32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
|
|
|
|
return s.floatToUint(&f64_u32, n, x, ft, tt)
|
|
|
|
}
|
|
|
|
|
2015-08-26 14:25:40 -04:00
|
|
|
func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
|
2016-10-18 23:50:40 +02:00
|
|
|
// cutoff:=1<<(intY_Size-1)
|
|
|
|
// if x < floatX(cutoff) {
|
2015-08-26 14:25:40 -04:00
|
|
|
// result = uintY(x)
|
|
|
|
// } else {
|
2016-10-18 23:50:40 +02:00
|
|
|
// y = x - floatX(cutoff)
|
2015-08-26 14:25:40 -04:00
|
|
|
// z = uintY(y)
|
2016-10-18 23:50:40 +02:00
|
|
|
// result = z | -(cutoff)
|
2015-08-26 14:25:40 -04:00
|
|
|
// }
|
2016-10-18 23:50:40 +02:00
|
|
|
cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff))
|
|
|
|
cmp := s.newValue2(cvttab.ltf, Types[TBOOL], x, cutoff)
|
2015-08-26 14:25:40 -04:00
|
|
|
b := s.endBlock()
|
|
|
|
b.Kind = ssa.BlockIf
|
2016-03-15 20:45:50 -07:00
|
|
|
b.SetControl(cmp)
|
2015-08-26 14:25:40 -04:00
|
|
|
b.Likely = ssa.BranchLikely
|
|
|
|
|
|
|
|
bThen := s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
bElse := s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
bAfter := s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
|
2015-08-28 21:36:29 -05:00
|
|
|
b.AddEdgeTo(bThen)
|
2015-08-26 14:25:40 -04:00
|
|
|
s.startBlock(bThen)
|
|
|
|
a0 := s.newValue1(cvttab.cvt2U, tt, x)
|
|
|
|
s.vars[n] = a0
|
|
|
|
s.endBlock()
|
2015-08-28 21:36:29 -05:00
|
|
|
bThen.AddEdgeTo(bAfter)
|
2015-08-26 14:25:40 -04:00
|
|
|
|
2015-08-28 21:36:29 -05:00
|
|
|
b.AddEdgeTo(bElse)
|
2015-08-26 14:25:40 -04:00
|
|
|
s.startBlock(bElse)
|
2016-10-18 23:50:40 +02:00
|
|
|
y := s.newValue2(cvttab.subf, ft, x, cutoff)
|
2015-08-26 14:25:40 -04:00
|
|
|
y = s.newValue1(cvttab.cvt2U, tt, y)
|
2016-10-18 23:50:40 +02:00
|
|
|
z := cvttab.intValue(s, tt, int64(-cvttab.cutoff))
|
|
|
|
a1 := s.newValue2(cvttab.or, tt, y, z)
|
2015-08-26 14:25:40 -04:00
|
|
|
s.vars[n] = a1
|
|
|
|
s.endBlock()
|
2015-08-28 21:36:29 -05:00
|
|
|
bElse.AddEdgeTo(bAfter)
|
2015-08-26 14:25:40 -04:00
|
|
|
|
|
|
|
s.startBlock(bAfter)
|
|
|
|
return s.variable(n, n.Type)
|
|
|
|
}
|
|
|
|
|
2015-09-17 10:31:16 -07:00
|
|
|
// dottype generates SSA for a type assertion node.
|
|
|
|
// commaok indicates whether to panic or return a bool.
|
|
|
|
// If commaok is false, resok will be nil.
|
|
|
|
func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
|
2016-10-28 11:37:45 -07:00
|
|
|
iface := s.expr(n.Left) // input interface
|
cmd/compile: fix pos of typenames created during SSA construction
Prior to this CL, the function's position was used.
The dottype Node's position is clearly better.
I'm not thrilled about introducing a reference to
lineno in the middle of SSA construction;
I will have to remove it later.
My immediate goal is stability and correctness of positions,
though, since that aids refactoring, so this is an improvement.
An example from package io:
func (t *multiWriter) WriteString(s string) (n int, err error) {
var p []byte // lazily initialized if/when needed
for _, w := range t.writers {
if sw, ok := w.(stringWriter); ok {
n, err = sw.WriteString(s)
The w.(stringWriter) type assertion includes loading
the address of static type data for stringWriter:
LEAQ type."".stringWriter(SB), R10
Prior to this CL, this instruction was given the line number
of the function declaration.
After this CL, this instruction is given the line number
of the type assertion itself.
Change-Id: Ifcca274b581a5a57d7e3102c4d7b7786bf307210
Reviewed-on: https://go-review.googlesource.com/38389
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2017-03-20 19:00:14 -07:00
|
|
|
lineno = n.Pos // for typename call
|
2015-09-17 10:31:16 -07:00
|
|
|
target := s.expr(typename(n.Type)) // target type
|
2017-03-18 10:16:03 -07:00
|
|
|
byteptr := s.f.Config.Types.BytePtr
|
2016-10-28 11:37:45 -07:00
|
|
|
|
|
|
|
if n.Type.IsInterface() {
|
|
|
|
if n.Type.IsEmptyInterface() {
|
|
|
|
// Converting to an empty interface.
|
|
|
|
// Input could be an empty or nonempty interface.
|
|
|
|
if Debug_typeassert > 0 {
|
2016-12-07 17:40:46 -08:00
|
|
|
Warnl(n.Pos, "type assertion inlined")
|
2016-10-28 11:37:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Get itab/type field from input.
|
|
|
|
itab := s.newValue1(ssa.OpITab, byteptr, iface)
|
|
|
|
// Conversion succeeds iff that field is not nil.
|
|
|
|
cond := s.newValue2(ssa.OpNeqPtr, Types[TBOOL], itab, s.constNil(byteptr))
|
|
|
|
|
|
|
|
if n.Left.Type.IsEmptyInterface() && commaok {
|
|
|
|
// Converting empty interface to empty interface with ,ok is just a nil check.
|
|
|
|
return iface, cond
|
|
|
|
}
|
|
|
|
|
|
|
|
// Branch on nilness.
|
|
|
|
b := s.endBlock()
|
|
|
|
b.Kind = ssa.BlockIf
|
|
|
|
b.SetControl(cond)
|
|
|
|
b.Likely = ssa.BranchLikely
|
|
|
|
bOk := s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
bFail := s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
b.AddEdgeTo(bOk)
|
|
|
|
b.AddEdgeTo(bFail)
|
|
|
|
|
|
|
|
if !commaok {
|
|
|
|
// On failure, panic by calling panicnildottype.
|
|
|
|
s.startBlock(bFail)
|
|
|
|
s.rtcall(panicnildottype, false, nil, target)
|
|
|
|
|
|
|
|
// On success, return (perhaps modified) input interface.
|
|
|
|
s.startBlock(bOk)
|
|
|
|
if n.Left.Type.IsEmptyInterface() {
|
|
|
|
res = iface // Use input interface unchanged.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Load type out of itab, build interface with existing idata.
|
|
|
|
off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab)
|
|
|
|
typ := s.newValue2(ssa.OpLoad, byteptr, off, s.mem())
|
|
|
|
idata := s.newValue1(ssa.OpIData, n.Type, iface)
|
|
|
|
res = s.newValue2(ssa.OpIMake, n.Type, typ, idata)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
s.startBlock(bOk)
|
|
|
|
// nonempty -> empty
|
|
|
|
// Need to load type from itab
|
|
|
|
off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab)
|
|
|
|
s.vars[&typVar] = s.newValue2(ssa.OpLoad, byteptr, off, s.mem())
|
|
|
|
s.endBlock()
|
|
|
|
|
|
|
|
// itab is nil, might as well use that as the nil result.
|
|
|
|
s.startBlock(bFail)
|
|
|
|
s.vars[&typVar] = itab
|
|
|
|
s.endBlock()
|
|
|
|
|
|
|
|
// Merge point.
|
|
|
|
bEnd := s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
bOk.AddEdgeTo(bEnd)
|
|
|
|
bFail.AddEdgeTo(bEnd)
|
|
|
|
s.startBlock(bEnd)
|
|
|
|
idata := s.newValue1(ssa.OpIData, n.Type, iface)
|
|
|
|
res = s.newValue2(ssa.OpIMake, n.Type, s.variable(&typVar, byteptr), idata)
|
|
|
|
resok = cond
|
|
|
|
delete(s.vars, &typVar)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// converting to a nonempty interface needs a runtime call.
|
|
|
|
if Debug_typeassert > 0 {
|
2016-12-07 17:40:46 -08:00
|
|
|
Warnl(n.Pos, "type assertion not inlined")
|
2016-10-28 11:37:45 -07:00
|
|
|
}
|
|
|
|
if n.Left.Type.IsEmptyInterface() {
|
|
|
|
if commaok {
|
|
|
|
call := s.rtcall(assertE2I2, true, []*Type{n.Type, Types[TBOOL]}, target, iface)
|
|
|
|
return call[0], call[1]
|
|
|
|
}
|
|
|
|
return s.rtcall(assertE2I, true, []*Type{n.Type}, target, iface)[0], nil
|
|
|
|
}
|
|
|
|
if commaok {
|
|
|
|
call := s.rtcall(assertI2I2, true, []*Type{n.Type, Types[TBOOL]}, target, iface)
|
|
|
|
return call[0], call[1]
|
|
|
|
}
|
|
|
|
return s.rtcall(assertI2I, true, []*Type{n.Type}, target, iface)[0], nil
|
2015-09-17 10:31:16 -07:00
|
|
|
}
|
|
|
|
|
2015-10-26 17:34:06 -04:00
|
|
|
if Debug_typeassert > 0 {
|
2016-12-07 17:40:46 -08:00
|
|
|
Warnl(n.Pos, "type assertion inlined")
|
2015-10-26 17:34:06 -04:00
|
|
|
}
|
|
|
|
|
2016-10-28 11:37:45 -07:00
|
|
|
// Converting to a concrete type.
|
|
|
|
direct := isdirectiface(n.Type)
|
2017-01-03 16:15:38 -08:00
|
|
|
itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface
|
2016-10-28 11:37:45 -07:00
|
|
|
if Debug_typeassert > 0 {
|
2016-12-07 17:40:46 -08:00
|
|
|
Warnl(n.Pos, "type assertion inlined")
|
2016-10-28 11:37:45 -07:00
|
|
|
}
|
2017-01-03 16:15:38 -08:00
|
|
|
var targetITab *ssa.Value
|
|
|
|
if n.Left.Type.IsEmptyInterface() {
|
|
|
|
// Looking for pointer to target type.
|
|
|
|
targetITab = target
|
|
|
|
} else {
|
|
|
|
// Looking for pointer to itab for target type and source interface.
|
|
|
|
targetITab = s.expr(itabname(n.Type, n.Left.Type))
|
|
|
|
}
|
2016-10-28 11:37:45 -07:00
|
|
|
|
|
|
|
var tmp *Node // temporary for use with large types
|
|
|
|
var addr *ssa.Value // address of tmp
|
|
|
|
if commaok && !canSSAType(n.Type) {
|
|
|
|
// unSSAable type, use temporary.
|
|
|
|
// TODO: get rid of some of these temporaries.
|
|
|
|
tmp = temp(n.Type)
|
2017-02-02 19:47:59 -05:00
|
|
|
addr = s.addr(tmp, false)
|
2016-10-28 11:37:45 -07:00
|
|
|
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, tmp, s.mem())
|
|
|
|
}
|
|
|
|
|
2017-01-03 16:15:38 -08:00
|
|
|
cond := s.newValue2(ssa.OpEqPtr, Types[TBOOL], itab, targetITab)
|
2015-09-17 10:31:16 -07:00
|
|
|
b := s.endBlock()
|
|
|
|
b.Kind = ssa.BlockIf
|
2016-03-15 20:45:50 -07:00
|
|
|
b.SetControl(cond)
|
2015-09-17 10:31:16 -07:00
|
|
|
b.Likely = ssa.BranchLikely
|
|
|
|
|
|
|
|
bOk := s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
bFail := s.f.NewBlock(ssa.BlockPlain)
|
|
|
|
b.AddEdgeTo(bOk)
|
|
|
|
b.AddEdgeTo(bFail)
|
|
|
|
|
|
|
|
if !commaok {
|
|
|
|
// on failure, panic by calling panicdottype
|
|
|
|
s.startBlock(bFail)
|
2017-03-15 15:34:52 -07:00
|
|
|
sym := s.lookupSymbol(n, &ssa.ExternSymbol{Typ: byteptr, Sym: Linksym(typenamesym(n.Left.Type))})
|
|
|
|
taddr := s.newValue1A(ssa.OpAddr, byteptr, sym, s.sb)
|
2017-01-03 16:15:38 -08:00
|
|
|
if n.Left.Type.IsEmptyInterface() {
|
|
|
|
s.rtcall(panicdottypeE, false, nil, itab, target, taddr)
|
|
|
|
} else {
|
|
|
|
s.rtcall(panicdottypeI, false, nil, itab, target, taddr)
|
|
|
|
}
|
2015-09-17 10:31:16 -07:00
|
|
|
|
2016-10-28 11:37:45 -07:00
|
|
|
// on success, return data from interface
|
2015-09-17 10:31:16 -07:00
|
|
|
s.startBlock(bOk)
|
2016-10-28 11:37:45 -07:00
|
|
|
if direct {
|
|
|
|
return s.newValue1(ssa.OpIData, n.Type, iface), nil
|
|
|
|
}
|
2017-03-19 09:51:22 +01:00
|
|
|
p := s.newValue1(ssa.OpIData, typPtr(n.Type), iface)
|
2016-10-28 11:37:45 -07:00
|
|
|
return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()), nil
|
2015-09-17 10:31:16 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// commaok is the more complicated case because we have
|
|
|
|
// a control flow merge point.
|
|
|
|
bEnd := s.f.NewBlock(ssa.BlockPlain)
|
2016-10-28 11:37:45 -07:00
|
|
|
// Note that we need a new valVar each time (unlike okVar where we can
|
|
|
|
// reuse the variable) because it might have a different type every time.
|
|
|
|
valVar := &Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "val"}}
|
2015-09-17 10:31:16 -07:00
|
|
|
|
|
|
|
// type assertion succeeded
|
|
|
|
s.startBlock(bOk)
|
2016-10-28 11:37:45 -07:00
|
|
|
if tmp == nil {
|
|
|
|
if direct {
|
|
|
|
s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type, iface)
|
|
|
|
} else {
|
2017-03-19 09:51:22 +01:00
|
|
|
p := s.newValue1(ssa.OpIData, typPtr(n.Type), iface)
|
2016-10-28 11:37:45 -07:00
|
|
|
s.vars[valVar] = s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
|
|
|
|
}
|
|
|
|
} else {
|
2017-03-19 09:51:22 +01:00
|
|
|
p := s.newValue1(ssa.OpIData, typPtr(n.Type), iface)
|
2017-03-13 21:51:08 -04:00
|
|
|
store := s.newValue3I(ssa.OpMove, ssa.TypeMem, n.Type.Size(), addr, p, s.mem())
|
2017-02-09 09:46:44 -05:00
|
|
|
store.Aux = n.Type
|
|
|
|
s.vars[&memVar] = store
|
2016-10-28 11:37:45 -07:00
|
|
|
}
|
2015-09-17 10:31:16 -07:00
|
|
|
s.vars[&okVar] = s.constBool(true)
|
|
|
|
s.endBlock()
|
|
|
|
bOk.AddEdgeTo(bEnd)
|
|
|
|
|
|
|
|
// type assertion failed
|
|
|
|
s.startBlock(bFail)
|
2016-10-28 11:37:45 -07:00
|
|
|
if tmp == nil {
|
|
|
|
s.vars[valVar] = s.zeroVal(n.Type)
|
|
|
|
} else {
|
2017-03-13 21:51:08 -04:00
|
|
|
store := s.newValue2I(ssa.OpZero, ssa.TypeMem, n.Type.Size(), addr, s.mem())
|
2017-02-09 09:46:44 -05:00
|
|
|
store.Aux = n.Type
|
|
|
|
s.vars[&memVar] = store
|
2016-10-28 11:37:45 -07:00
|
|
|
}
|
2015-09-17 10:31:16 -07:00
|
|
|
s.vars[&okVar] = s.constBool(false)
|
|
|
|
s.endBlock()
|
|
|
|
bFail.AddEdgeTo(bEnd)
|
|
|
|
|
|
|
|
// merge point
|
|
|
|
s.startBlock(bEnd)
|
2016-10-28 11:37:45 -07:00
|
|
|
if tmp == nil {
|
|
|
|
res = s.variable(valVar, n.Type)
|
|
|
|
delete(s.vars, valVar)
|
|
|
|
} else {
|
|
|
|
res = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
|
|
|
|
s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, tmp, s.mem())
|
|
|
|
}
|
2015-09-17 10:31:16 -07:00
|
|
|
resok = s.variable(&okVar, Types[TBOOL])
|
|
|
|
delete(s.vars, &okVar)
|
|
|
|
return res, resok
|
|
|
|
}
|
|
|
|
|
2015-04-15 15:51:25 -07:00
|
|
|
// variable returns the value of a variable at the current location.
|
2015-06-19 21:02:28 -07:00
|
|
|
func (s *state) variable(name *Node, t ssa.Type) *ssa.Value {
|
2015-04-15 15:51:25 -07:00
|
|
|
v := s.vars[name]
|
2016-09-30 10:12:32 -07:00
|
|
|
if v != nil {
|
|
|
|
return v
|
2016-01-14 16:02:23 -08:00
|
|
|
}
|
2016-09-30 10:12:32 -07:00
|
|
|
v = s.fwdVars[name]
|
|
|
|
if v != nil {
|
|
|
|
return v
|
2015-04-15 15:51:25 -07:00
|
|
|
}
|
|
|
|
|
2016-09-30 10:12:32 -07:00
|
|
|
if s.curBlock == s.f.Entry {
|
|
|
|
// No variable should be live at entry.
|
|
|
|
s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, name, v)
|
2015-04-15 15:51:25 -07:00
|
|
|
}
|
2016-09-30 10:12:32 -07:00
|
|
|
// Make a FwdRef, which records a value that's live on block input.
|
|
|
|
// We'll find the matching definition as part of insertPhis.
|
|
|
|
v = s.newValue0A(ssa.OpFwdRef, t, name)
|
|
|
|
s.fwdVars[name] = v
|
2016-01-14 16:02:23 -08:00
|
|
|
s.addNamedValue(name, v)
|
2015-04-15 15:51:25 -07:00
|
|
|
return v
|
|
|
|
}
|
|
|
|
|
2016-09-30 10:12:32 -07:00
|
|
|
func (s *state) mem() *ssa.Value {
|
|
|
|
return s.variable(&memVar, ssa.TypeMem)
|
|
|
|
}
|
|
|
|
|
2015-10-22 14:22:38 -07:00
|
|
|
func (s *state) addNamedValue(n *Node, v *ssa.Value) {
|
|
|
|
if n.Class == Pxxx {
|
|
|
|
// Don't track our dummy nodes (&memVar etc.).
|
|
|
|
return
|
|
|
|
}
|
2016-10-28 13:33:57 -04:00
|
|
|
if n.IsAutoTmp() {
|
|
|
|
// Don't track temporary variables.
|
2015-10-22 14:22:38 -07:00
|
|
|
return
|
|
|
|
}
|
2016-03-08 20:09:48 -08:00
|
|
|
if n.Class == PPARAMOUT {
|
|
|
|
// Don't track named output values. This prevents return values
|
|
|
|
// from being assigned too early. See #14591 and #14762. TODO: allow this.
|
|
|
|
return
|
|
|
|
}
|
2015-10-22 14:22:38 -07:00
|
|
|
if n.Class == PAUTO && n.Xoffset != 0 {
|
2016-09-09 21:08:46 -07:00
|
|
|
s.Fatalf("AUTO var with offset %v %d", n, n.Xoffset)
|
2015-10-22 14:22:38 -07:00
|
|
|
}
|
2015-11-02 08:10:26 -08:00
|
|
|
loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0}
|
|
|
|
values, ok := s.f.NamedValues[loc]
|
2015-10-22 14:22:38 -07:00
|
|
|
if !ok {
|
2015-11-02 08:10:26 -08:00
|
|
|
s.f.Names = append(s.f.Names, loc)
|
2015-10-22 14:22:38 -07:00
|
|
|
}
|
2015-11-02 08:10:26 -08:00
|
|
|
s.f.NamedValues[loc] = append(values, v)
|
2015-10-22 14:22:38 -07:00
|
|
|
}
|
|
|
|
|
2016-03-12 14:07:40 -08:00
|
|
|
// Branch is an unresolved branch.
|
|
|
|
type Branch struct {
|
|
|
|
P *obj.Prog // branch instruction
|
|
|
|
B *ssa.Block // target
|
2015-05-12 11:06:44 -07:00
|
|
|
}
|
|
|
|
|
2016-03-12 14:07:40 -08:00
|
|
|
// SSAGenState contains state needed during Prog generation.
|
|
|
|
type SSAGenState struct {
|
|
|
|
// Branches remembers all the branch instructions we've seen
|
2015-08-28 22:51:01 -07:00
|
|
|
// and where they would like to go.
|
2016-03-12 14:07:40 -08:00
|
|
|
Branches []Branch
|
2015-08-28 22:51:01 -07:00
|
|
|
|
|
|
|
// bstart remembers where each block starts (indexed by block ID)
|
|
|
|
bstart []*obj.Prog
|
2016-07-26 11:51:33 -07:00
|
|
|
|
|
|
|
// 387 port: maps from SSE registers (REG_X?) to 387 registers (REG_F?)
|
2016-08-10 11:44:57 -07:00
|
|
|
SSEto387 map[int16]int16
|
|
|
|
// Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include x86-387, PPC, and Sparc V8.
|
|
|
|
ScratchFpMem *Node
|
2017-03-17 08:58:36 -07:00
|
|
|
|
|
|
|
maxarg int64 // largest frame size for arguments to calls made by the function
|
2017-03-09 18:32:17 -08:00
|
|
|
|
|
|
|
// Map from GC safe points to stack map index, generated by
|
|
|
|
// liveness analysis.
|
|
|
|
stackMapIndex map[*ssa.Value]int
|
2015-08-28 22:51:01 -07:00
|
|
|
}
|
|
|
|
|
2017-03-20 08:01:28 -07:00
|
|
|
// Prog appends a new Prog.
|
|
|
|
func (s *SSAGenState) Prog(as obj.As) *obj.Prog {
|
|
|
|
return Prog(as)
|
|
|
|
}
|
|
|
|
|
2016-03-12 14:07:40 -08:00
|
|
|
// Pc returns the current Prog.
|
|
|
|
func (s *SSAGenState) Pc() *obj.Prog {
|
2016-10-07 02:06:33 +09:00
|
|
|
return pc
|
2016-03-12 14:07:40 -08:00
|
|
|
}
|
|
|
|
|
2016-12-07 17:40:46 -08:00
|
|
|
// SetPos sets the current source position.
|
2016-12-15 17:17:01 -08:00
|
|
|
func (s *SSAGenState) SetPos(pos src.XPos) {
|
2016-12-07 17:40:46 -08:00
|
|
|
lineno = pos
|
2016-03-12 14:07:40 -08:00
|
|
|
}
|
|
|
|
|
2015-05-12 11:06:44 -07:00
|
|
|
// genssa appends entries to ptxt for each instruction in f.
|
2017-03-09 18:32:17 -08:00
|
|
|
func genssa(f *ssa.Func, ptxt *obj.Prog) {
|
2016-03-12 14:07:40 -08:00
|
|
|
var s SSAGenState
|
2015-08-28 22:51:01 -07:00
|
|
|
|
2017-03-17 07:49:22 -07:00
|
|
|
e := f.Frontend().(*ssafn)
|
2015-07-22 13:13:53 -07:00
|
|
|
|
2017-03-09 18:32:17 -08:00
|
|
|
// Generate GC bitmaps.
|
|
|
|
gcargs := makefuncdatasym("gcargs·", obj.FUNCDATA_ArgsPointerMaps)
|
|
|
|
gclocals := makefuncdatasym("gclocals·", obj.FUNCDATA_LocalsPointerMaps)
|
|
|
|
s.stackMapIndex = liveness(e, f, gcargs, gclocals)
|
|
|
|
|
2015-05-12 11:06:44 -07:00
|
|
|
// Remember where each block starts.
|
2015-08-28 22:51:01 -07:00
|
|
|
s.bstart = make([]*obj.Prog, f.NumBlocks())
|
2015-05-12 11:06:44 -07:00
|
|
|
|
2015-07-31 14:37:15 -07:00
|
|
|
var valueProgs map[*obj.Prog]*ssa.Value
|
|
|
|
var blockProgs map[*obj.Prog]*ssa.Block
|
2016-03-17 13:46:43 +11:00
|
|
|
var logProgs = e.log
|
2015-07-31 14:37:15 -07:00
|
|
|
if logProgs {
|
|
|
|
valueProgs = make(map[*obj.Prog]*ssa.Value, f.NumValues())
|
|
|
|
blockProgs = make(map[*obj.Prog]*ssa.Block, f.NumBlocks())
|
|
|
|
f.Logf("genssa %s\n", f.Name)
|
2016-10-07 02:06:33 +09:00
|
|
|
blockProgs[pc] = f.Blocks[0]
|
2015-07-31 14:37:15 -07:00
|
|
|
}
|
|
|
|
|
2017-03-17 13:35:36 -07:00
|
|
|
if thearch.Use387 {
|
2016-07-26 11:51:33 -07:00
|
|
|
s.SSEto387 = map[int16]int16{}
|
2016-08-10 11:44:57 -07:00
|
|
|
}
|
2016-10-03 12:26:25 -07:00
|
|
|
|
|
|
|
s.ScratchFpMem = scratchFpMem
|
|
|
|
scratchFpMem = nil
|
2016-07-26 11:51:33 -07:00
|
|
|
|
2015-05-12 11:06:44 -07:00
|
|
|
// Emit basic blocks
|
|
|
|
for i, b := range f.Blocks {
|
2016-10-07 02:06:33 +09:00
|
|
|
s.bstart[b.ID] = pc
|
2015-05-12 11:06:44 -07:00
|
|
|
// Emit values in block
|
2017-03-17 13:35:36 -07:00
|
|
|
thearch.SSAMarkMoves(&s, b)
|
2015-05-12 11:06:44 -07:00
|
|
|
for _, v := range b.Values {
|
2016-10-07 02:06:33 +09:00
|
|
|
x := pc
|
2017-03-07 12:31:59 -08:00
|
|
|
s.SetPos(v.Pos)
|
|
|
|
|
|
|
|
switch v.Op {
|
|
|
|
case ssa.OpInitMem:
|
|
|
|
// memory arg needs no code
|
|
|
|
case ssa.OpArg:
|
|
|
|
// input args need no code
|
|
|
|
case ssa.OpSP, ssa.OpSB:
|
|
|
|
// nothing to do
|
|
|
|
case ssa.OpSelect0, ssa.OpSelect1:
|
|
|
|
// nothing to do
|
|
|
|
case ssa.OpGetG:
|
|
|
|
// nothing to do when there's a g register,
|
|
|
|
// and checkLower complains if there's not
|
2017-03-09 18:32:17 -08:00
|
|
|
case ssa.OpVarDef, ssa.OpVarKill, ssa.OpVarLive, ssa.OpKeepAlive:
|
|
|
|
// nothing to do; already used by liveness
|
2017-03-07 12:31:59 -08:00
|
|
|
case ssa.OpPhi:
|
|
|
|
CheckLoweredPhi(v)
|
|
|
|
|
|
|
|
default:
|
|
|
|
// let the backend handle it
|
2017-03-17 13:35:36 -07:00
|
|
|
thearch.SSAGenValue(&s, v)
|
2017-03-07 12:31:59 -08:00
|
|
|
}
|
|
|
|
|
2015-07-31 14:37:15 -07:00
|
|
|
if logProgs {
|
2016-10-07 02:06:33 +09:00
|
|
|
for ; x != pc; x = x.Link {
|
2015-07-31 14:37:15 -07:00
|
|
|
valueProgs[x] = v
|
|
|
|
}
|
|
|
|
}
|
2015-05-12 11:06:44 -07:00
|
|
|
}
|
|
|
|
// Emit control flow instructions for block
|
|
|
|
var next *ssa.Block
|
2016-09-09 13:11:07 -07:00
|
|
|
if i < len(f.Blocks)-1 && Debug['N'] == 0 {
|
2016-02-22 23:19:00 -08:00
|
|
|
// If -N, leave next==nil so every block with successors
|
2016-02-26 16:32:01 -08:00
|
|
|
// ends in a JMP (except call blocks - plive doesn't like
|
|
|
|
// select{send,recv} followed by a JMP call). Helps keep
|
|
|
|
// line numbers for otherwise empty blocks.
|
2015-05-12 11:06:44 -07:00
|
|
|
next = f.Blocks[i+1]
|
|
|
|
}
|
2016-10-07 02:06:33 +09:00
|
|
|
x := pc
|
2017-03-17 13:35:31 -07:00
|
|
|
s.SetPos(b.Pos)
|
2017-03-17 13:35:36 -07:00
|
|
|
thearch.SSAGenBlock(&s, b, next)
|
2015-07-31 14:37:15 -07:00
|
|
|
if logProgs {
|
2016-10-07 02:06:33 +09:00
|
|
|
for ; x != pc; x = x.Link {
|
2015-07-31 14:37:15 -07:00
|
|
|
blockProgs[x] = b
|
|
|
|
}
|
|
|
|
}
|
2015-05-12 11:06:44 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Resolve branches
|
2016-03-12 14:07:40 -08:00
|
|
|
for _, br := range s.Branches {
|
|
|
|
br.P.To.Val = s.bstart[br.B.ID]
|
2015-08-28 22:51:01 -07:00
|
|
|
}
|
2015-05-12 11:06:44 -07:00
|
|
|
|
2015-07-31 14:37:15 -07:00
|
|
|
if logProgs {
|
|
|
|
for p := ptxt; p != nil; p = p.Link {
|
|
|
|
var s string
|
|
|
|
if v, ok := valueProgs[p]; ok {
|
|
|
|
s = v.String()
|
|
|
|
} else if b, ok := blockProgs[p]; ok {
|
|
|
|
s = b.String()
|
|
|
|
} else {
|
|
|
|
s = " " // most value and branch strings are 2-3 characters long
|
|
|
|
}
|
|
|
|
f.Logf("%s\t%s\n", s, p)
|
|
|
|
}
|
cmd/compile: rearrange fields between ssa.Func, ssa.Cache, and ssa.Config
This makes ssa.Func, ssa.Cache, and ssa.Config fulfill
the roles laid out for them in CL 38160.
The only non-trivial change in this CL is how cached
values and blocks get IDs. Prior to this CL, their IDs were
assigned as part of resetting the cache, and only modified
IDs were reset. This required knowing how many values and
blocks were modified, which required a tight coupling between
ssa.Func and ssa.Config. To eliminate that coupling,
we now zero values and blocks during reset,
and assign their IDs when they are used.
Since unused values and blocks have ID == 0,
we can efficiently find the last used value/block,
to avoid zeroing everything.
Bulk zeroing is efficient, but not efficient enough
to obviate the need to avoid zeroing everything every time.
As a happy side-effect, ssa.Func.Free is no longer necessary.
DebugHashMatch and friends now belong in func.go.
They have been left in place for clarity and review.
I will move them in a subsequent CL.
Passes toolstash -cmp. No compiler performance impact.
No change in 'go test cmd/compile/internal/ssa' execution time.
Change-Id: I2eb7af58da067ef6a36e815a6f386cfe8634d098
Reviewed-on: https://go-review.googlesource.com/38167
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2017-03-15 11:15:13 -07:00
|
|
|
if f.HTMLWriter != nil {
|
2016-12-09 17:15:05 -08:00
|
|
|
// LineHist is defunct now - this code won't do
|
|
|
|
// anything.
|
|
|
|
// TODO: fix this (ideally without a global variable)
|
|
|
|
// saved := ptxt.Ctxt.LineHist.PrintFilenameOnly
|
|
|
|
// ptxt.Ctxt.LineHist.PrintFilenameOnly = true
|
2015-08-10 12:15:52 -07:00
|
|
|
var buf bytes.Buffer
|
|
|
|
buf.WriteString("<code>")
|
|
|
|
buf.WriteString("<dl class=\"ssa-gen\">")
|
|
|
|
for p := ptxt; p != nil; p = p.Link {
|
|
|
|
buf.WriteString("<dt class=\"ssa-prog-src\">")
|
|
|
|
if v, ok := valueProgs[p]; ok {
|
|
|
|
buf.WriteString(v.HTML())
|
|
|
|
} else if b, ok := blockProgs[p]; ok {
|
|
|
|
buf.WriteString(b.HTML())
|
|
|
|
}
|
|
|
|
buf.WriteString("</dt>")
|
|
|
|
buf.WriteString("<dd class=\"ssa-prog\">")
|
|
|
|
buf.WriteString(html.EscapeString(p.String()))
|
|
|
|
buf.WriteString("</dd>")
|
|
|
|
buf.WriteString("</li>")
|
|
|
|
}
|
|
|
|
buf.WriteString("</dl>")
|
|
|
|
buf.WriteString("</code>")
|
cmd/compile: rearrange fields between ssa.Func, ssa.Cache, and ssa.Config
This makes ssa.Func, ssa.Cache, and ssa.Config fulfill
the roles laid out for them in CL 38160.
The only non-trivial change in this CL is how cached
values and blocks get IDs. Prior to this CL, their IDs were
assigned as part of resetting the cache, and only modified
IDs were reset. This required knowing how many values and
blocks were modified, which required a tight coupling between
ssa.Func and ssa.Config. To eliminate that coupling,
we now zero values and blocks during reset,
and assign their IDs when they are used.
Since unused values and blocks have ID == 0,
we can efficiently find the last used value/block,
to avoid zeroing everything.
Bulk zeroing is efficient, but not efficient enough
to obviate the need to avoid zeroing everything every time.
As a happy side-effect, ssa.Func.Free is no longer necessary.
DebugHashMatch and friends now belong in func.go.
They have been left in place for clarity and review.
I will move them in a subsequent CL.
Passes toolstash -cmp. No compiler performance impact.
No change in 'go test cmd/compile/internal/ssa' execution time.
Change-Id: I2eb7af58da067ef6a36e815a6f386cfe8634d098
Reviewed-on: https://go-review.googlesource.com/38167
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2017-03-15 11:15:13 -07:00
|
|
|
f.HTMLWriter.WriteColumn("genssa", buf.String())
|
2016-12-09 17:15:05 -08:00
|
|
|
// ptxt.Ctxt.LineHist.PrintFilenameOnly = saved
|
2015-08-10 12:15:52 -07:00
|
|
|
}
|
2015-07-31 14:37:15 -07:00
|
|
|
}
|
|
|
|
|
2016-03-01 23:21:55 +00:00
|
|
|
// Add frame prologue. Zero ambiguously live variables.
|
2017-03-17 09:10:57 -07:00
|
|
|
thearch.Defframe(ptxt, e.curfn, e.stksize+s.maxarg)
|
2015-08-24 02:16:19 -07:00
|
|
|
if Debug['f'] != 0 {
|
|
|
|
frame(0)
|
|
|
|
}
|
2015-05-12 11:06:44 -07:00
|
|
|
|
cmd/compile: rearrange fields between ssa.Func, ssa.Cache, and ssa.Config
This makes ssa.Func, ssa.Cache, and ssa.Config fulfill
the roles laid out for them in CL 38160.
The only non-trivial change in this CL is how cached
values and blocks get IDs. Prior to this CL, their IDs were
assigned as part of resetting the cache, and only modified
IDs were reset. This required knowing how many values and
blocks were modified, which required a tight coupling between
ssa.Func and ssa.Config. To eliminate that coupling,
we now zero values and blocks during reset,
and assign their IDs when they are used.
Since unused values and blocks have ID == 0,
we can efficiently find the last used value/block,
to avoid zeroing everything.
Bulk zeroing is efficient, but not efficient enough
to obviate the need to avoid zeroing everything every time.
As a happy side-effect, ssa.Func.Free is no longer necessary.
DebugHashMatch and friends now belong in func.go.
They have been left in place for clarity and review.
I will move them in a subsequent CL.
Passes toolstash -cmp. No compiler performance impact.
No change in 'go test cmd/compile/internal/ssa' execution time.
Change-Id: I2eb7af58da067ef6a36e815a6f386cfe8634d098
Reviewed-on: https://go-review.googlesource.com/38167
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2017-03-15 11:15:13 -07:00
|
|
|
f.HTMLWriter.Close()
|
|
|
|
f.HTMLWriter = nil
|
2015-05-12 11:06:44 -07:00
|
|
|
}
|
|
|
|
|
2016-03-12 14:07:40 -08:00
|
|
|
type FloatingEQNEJump struct {
|
|
|
|
Jump obj.As
|
|
|
|
Index int
|
2015-08-18 14:39:26 -04:00
|
|
|
}
|
|
|
|
|
2017-03-20 08:01:28 -07:00
|
|
|
func (s *SSAGenState) oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump, likely ssa.BranchPrediction) {
|
|
|
|
p := s.Prog(jumps.Jump)
|
2015-08-18 14:39:26 -04:00
|
|
|
p.To.Type = obj.TYPE_BRANCH
|
2016-03-12 14:07:40 -08:00
|
|
|
to := jumps.Index
|
2017-03-20 08:01:28 -07:00
|
|
|
s.Branches = append(s.Branches, Branch{p, b.Succs[to].Block()})
|
2015-08-18 14:39:26 -04:00
|
|
|
if to == 1 {
|
|
|
|
likely = -likely
|
|
|
|
}
|
|
|
|
// liblink reorders the instruction stream as it sees fit.
|
|
|
|
// Pass along what we know so liblink can make use of it.
|
|
|
|
// TODO: Once we've fully switched to SSA,
|
|
|
|
// make liblink leave our output alone.
|
|
|
|
switch likely {
|
|
|
|
case ssa.BranchUnlikely:
|
|
|
|
p.From.Type = obj.TYPE_CONST
|
|
|
|
p.From.Offset = 0
|
|
|
|
case ssa.BranchLikely:
|
|
|
|
p.From.Type = obj.TYPE_CONST
|
|
|
|
p.From.Offset = 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-22 10:27:30 -07:00
|
|
|
func (s *SSAGenState) FPJump(b, next *ssa.Block, jumps *[2][2]FloatingEQNEJump) {
|
2015-08-18 14:39:26 -04:00
|
|
|
likely := b.Likely
|
|
|
|
switch next {
|
2016-04-28 16:52:47 -07:00
|
|
|
case b.Succs[0].Block():
|
2017-03-20 08:01:28 -07:00
|
|
|
s.oneFPJump(b, &jumps[0][0], likely)
|
|
|
|
s.oneFPJump(b, &jumps[0][1], likely)
|
2016-04-28 16:52:47 -07:00
|
|
|
case b.Succs[1].Block():
|
2017-03-20 08:01:28 -07:00
|
|
|
s.oneFPJump(b, &jumps[1][0], likely)
|
|
|
|
s.oneFPJump(b, &jumps[1][1], likely)
|
2015-08-18 14:39:26 -04:00
|
|
|
default:
|
2017-03-20 08:01:28 -07:00
|
|
|
s.oneFPJump(b, &jumps[1][0], likely)
|
|
|
|
s.oneFPJump(b, &jumps[1][1], likely)
|
|
|
|
q := s.Prog(obj.AJMP)
|
2015-08-18 14:39:26 -04:00
|
|
|
q.To.Type = obj.TYPE_BRANCH
|
2016-04-28 16:52:47 -07:00
|
|
|
s.Branches = append(s.Branches, Branch{q, b.Succs[1].Block()})
|
2015-05-12 11:06:44 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-24 14:37:17 -05:00
|
|
|
func AuxOffset(v *ssa.Value) (offset int64) {
|
|
|
|
if v.Aux == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
switch sym := v.Aux.(type) {
|
|
|
|
|
|
|
|
case *ssa.AutoSymbol:
|
|
|
|
n := sym.Node.(*Node)
|
|
|
|
return n.Xoffset
|
|
|
|
}
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2016-03-12 14:07:40 -08:00
|
|
|
// AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a.
|
|
|
|
func AddAux(a *obj.Addr, v *ssa.Value) {
|
|
|
|
AddAux2(a, v, v.AuxInt)
|
2015-10-21 13:13:56 -07:00
|
|
|
}
|
2016-03-12 14:07:40 -08:00
|
|
|
func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
|
2016-05-06 10:11:41 -07:00
|
|
|
if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR {
|
2016-04-29 09:02:27 -07:00
|
|
|
v.Fatalf("bad AddAux addr %v", a)
|
2015-06-19 21:02:28 -07:00
|
|
|
}
|
|
|
|
// add integer offset
|
2015-10-21 13:13:56 -07:00
|
|
|
a.Offset += offset
|
2015-06-19 21:02:28 -07:00
|
|
|
|
|
|
|
// If no additional symbol offset, we're done.
|
|
|
|
if v.Aux == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Add symbol's offset from its base register.
|
|
|
|
switch sym := v.Aux.(type) {
|
|
|
|
case *ssa.ExternSymbol:
|
|
|
|
a.Name = obj.NAME_EXTERN
|
2017-02-06 18:18:49 -08:00
|
|
|
a.Sym = sym.Sym
|
2015-06-19 21:02:28 -07:00
|
|
|
case *ssa.ArgSymbol:
|
2015-08-24 02:16:19 -07:00
|
|
|
n := sym.Node.(*Node)
|
|
|
|
a.Name = obj.NAME_PARAM
|
|
|
|
a.Sym = Linksym(n.Orig.Sym)
|
2016-09-29 19:09:36 -07:00
|
|
|
a.Offset += n.Xoffset
|
2015-06-19 21:02:28 -07:00
|
|
|
case *ssa.AutoSymbol:
|
2015-08-24 02:16:19 -07:00
|
|
|
n := sym.Node.(*Node)
|
|
|
|
a.Name = obj.NAME_AUTO
|
|
|
|
a.Sym = Linksym(n.Sym)
|
2016-10-03 12:26:25 -07:00
|
|
|
a.Offset += n.Xoffset
|
2015-06-19 21:02:28 -07:00
|
|
|
default:
|
|
|
|
v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-02 21:28:13 -08:00
|
|
|
// extendIndex extends v to a full int width.
|
2016-05-25 09:49:28 -04:00
|
|
|
// panic using the given function if v does not fit in an int (only on 32-bit archs).
|
2017-02-06 13:40:19 -08:00
|
|
|
func (s *state) extendIndex(v *ssa.Value, panicfn *obj.LSym) *ssa.Value {
|
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled
For integer types less than a machine register, we have to decide
what the invariants are for the high bits of the register. We used
to set the high bits to the correct extension (sign or zero, as
determined by the type) of the low bits.
This CL makes the compiler ignore the high bits of the register
altogether (they are junk).
On this plus side, this means ops that generate subword results don't
have to worry about correctly extending them. On the minus side,
ops that consume subword arguments have to deal with the input
registers not being correctly extended.
For x86, this tradeoff is probably worth it. Almost all opcodes
have versions that use only the correct subword piece of their
inputs. (The one big exception is array indexing.) Not many opcodes
can correctly sign extend on output.
For other architectures, the tradeoff is probably not so clear, as
they don't have many subword-safe opcodes (e.g. 16-bit compare,
ignoring the high 16/48 bits). Fortunately we can decide whether
we do this per-architecture.
For the machine-independent opcodes, we pretend that the "register"
size is equal to the type width, so sign extension is immaterial.
Opcodes that care about the signedness of the input (e.g. compare,
right shift) have two different variants.
Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d
Reviewed-on: https://go-review.googlesource.com/12600
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
|
|
|
size := v.Type.Size()
|
2015-11-02 21:28:13 -08:00
|
|
|
if size == s.config.IntSize {
|
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled
For integer types less than a machine register, we have to decide
what the invariants are for the high bits of the register. We used
to set the high bits to the correct extension (sign or zero, as
determined by the type) of the low bits.
This CL makes the compiler ignore the high bits of the register
altogether (they are junk).
On this plus side, this means ops that generate subword results don't
have to worry about correctly extending them. On the minus side,
ops that consume subword arguments have to deal with the input
registers not being correctly extended.
For x86, this tradeoff is probably worth it. Almost all opcodes
have versions that use only the correct subword piece of their
inputs. (The one big exception is array indexing.) Not many opcodes
can correctly sign extend on output.
For other architectures, the tradeoff is probably not so clear, as
they don't have many subword-safe opcodes (e.g. 16-bit compare,
ignoring the high 16/48 bits). Fortunately we can decide whether
we do this per-architecture.
For the machine-independent opcodes, we pretend that the "register"
size is equal to the type width, so sign extension is immaterial.
Opcodes that care about the signedness of the input (e.g. compare,
right shift) have two different variants.
Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d
Reviewed-on: https://go-review.googlesource.com/12600
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
|
|
|
return v
|
|
|
|
}
|
2015-11-02 21:28:13 -08:00
|
|
|
if size > s.config.IntSize {
|
2016-05-25 09:49:28 -04:00
|
|
|
// truncate 64-bit indexes on 32-bit pointer archs. Test the
|
|
|
|
// high word and branch to out-of-bounds failure if it is not 0.
|
|
|
|
if Debug['B'] == 0 {
|
|
|
|
hi := s.newValue1(ssa.OpInt64Hi, Types[TUINT32], v)
|
|
|
|
cmp := s.newValue2(ssa.OpEq32, Types[TBOOL], hi, s.constInt32(Types[TUINT32], 0))
|
|
|
|
s.check(cmp, panicfn)
|
|
|
|
}
|
|
|
|
return s.newValue1(ssa.OpTrunc64to32, Types[TINT], v)
|
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled
For integer types less than a machine register, we have to decide
what the invariants are for the high bits of the register. We used
to set the high bits to the correct extension (sign or zero, as
determined by the type) of the low bits.
This CL makes the compiler ignore the high bits of the register
altogether (they are junk).
On this plus side, this means ops that generate subword results don't
have to worry about correctly extending them. On the minus side,
ops that consume subword arguments have to deal with the input
registers not being correctly extended.
For x86, this tradeoff is probably worth it. Almost all opcodes
have versions that use only the correct subword piece of their
inputs. (The one big exception is array indexing.) Not many opcodes
can correctly sign extend on output.
For other architectures, the tradeoff is probably not so clear, as
they don't have many subword-safe opcodes (e.g. 16-bit compare,
ignoring the high 16/48 bits). Fortunately we can decide whether
we do this per-architecture.
For the machine-independent opcodes, we pretend that the "register"
size is equal to the type width, so sign extension is immaterial.
Opcodes that care about the signedness of the input (e.g. compare,
right shift) have two different variants.
Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d
Reviewed-on: https://go-review.googlesource.com/12600
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Extend value to the required size
|
|
|
|
var op ssa.Op
|
|
|
|
if v.Type.IsSigned() {
|
2015-11-02 21:28:13 -08:00
|
|
|
switch 10*size + s.config.IntSize {
|
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled
For integer types less than a machine register, we have to decide
what the invariants are for the high bits of the register. We used
to set the high bits to the correct extension (sign or zero, as
determined by the type) of the low bits.
This CL makes the compiler ignore the high bits of the register
altogether (they are junk).
On this plus side, this means ops that generate subword results don't
have to worry about correctly extending them. On the minus side,
ops that consume subword arguments have to deal with the input
registers not being correctly extended.
For x86, this tradeoff is probably worth it. Almost all opcodes
have versions that use only the correct subword piece of their
inputs. (The one big exception is array indexing.) Not many opcodes
can correctly sign extend on output.
For other architectures, the tradeoff is probably not so clear, as
they don't have many subword-safe opcodes (e.g. 16-bit compare,
ignoring the high 16/48 bits). Fortunately we can decide whether
we do this per-architecture.
For the machine-independent opcodes, we pretend that the "register"
size is equal to the type width, so sign extension is immaterial.
Opcodes that care about the signedness of the input (e.g. compare,
right shift) have two different variants.
Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d
Reviewed-on: https://go-review.googlesource.com/12600
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
|
|
|
case 14:
|
|
|
|
op = ssa.OpSignExt8to32
|
|
|
|
case 18:
|
|
|
|
op = ssa.OpSignExt8to64
|
|
|
|
case 24:
|
|
|
|
op = ssa.OpSignExt16to32
|
|
|
|
case 28:
|
|
|
|
op = ssa.OpSignExt16to64
|
|
|
|
case 48:
|
|
|
|
op = ssa.OpSignExt32to64
|
|
|
|
default:
|
|
|
|
s.Fatalf("bad signed index extension %s", v.Type)
|
|
|
|
}
|
|
|
|
} else {
|
2015-11-02 21:28:13 -08:00
|
|
|
switch 10*size + s.config.IntSize {
|
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled
For integer types less than a machine register, we have to decide
what the invariants are for the high bits of the register. We used
to set the high bits to the correct extension (sign or zero, as
determined by the type) of the low bits.
This CL makes the compiler ignore the high bits of the register
altogether (they are junk).
On this plus side, this means ops that generate subword results don't
have to worry about correctly extending them. On the minus side,
ops that consume subword arguments have to deal with the input
registers not being correctly extended.
For x86, this tradeoff is probably worth it. Almost all opcodes
have versions that use only the correct subword piece of their
inputs. (The one big exception is array indexing.) Not many opcodes
can correctly sign extend on output.
For other architectures, the tradeoff is probably not so clear, as
they don't have many subword-safe opcodes (e.g. 16-bit compare,
ignoring the high 16/48 bits). Fortunately we can decide whether
we do this per-architecture.
For the machine-independent opcodes, we pretend that the "register"
size is equal to the type width, so sign extension is immaterial.
Opcodes that care about the signedness of the input (e.g. compare,
right shift) have two different variants.
Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d
Reviewed-on: https://go-review.googlesource.com/12600
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
|
|
|
case 14:
|
|
|
|
op = ssa.OpZeroExt8to32
|
|
|
|
case 18:
|
|
|
|
op = ssa.OpZeroExt8to64
|
|
|
|
case 24:
|
|
|
|
op = ssa.OpZeroExt16to32
|
|
|
|
case 28:
|
|
|
|
op = ssa.OpZeroExt16to64
|
|
|
|
case 48:
|
|
|
|
op = ssa.OpZeroExt32to64
|
|
|
|
default:
|
|
|
|
s.Fatalf("bad unsigned index extension %s", v.Type)
|
|
|
|
}
|
|
|
|
}
|
2015-11-02 21:28:13 -08:00
|
|
|
return s.newValue1(op, Types[TINT], v)
|
[dev.ssa] cmd/compile/internal/ssa: redo how sign extension is handled
For integer types less than a machine register, we have to decide
what the invariants are for the high bits of the register. We used
to set the high bits to the correct extension (sign or zero, as
determined by the type) of the low bits.
This CL makes the compiler ignore the high bits of the register
altogether (they are junk).
On this plus side, this means ops that generate subword results don't
have to worry about correctly extending them. On the minus side,
ops that consume subword arguments have to deal with the input
registers not being correctly extended.
For x86, this tradeoff is probably worth it. Almost all opcodes
have versions that use only the correct subword piece of their
inputs. (The one big exception is array indexing.) Not many opcodes
can correctly sign extend on output.
For other architectures, the tradeoff is probably not so clear, as
they don't have many subword-safe opcodes (e.g. 16-bit compare,
ignoring the high 16/48 bits). Fortunately we can decide whether
we do this per-architecture.
For the machine-independent opcodes, we pretend that the "register"
size is equal to the type width, so sign extension is immaterial.
Opcodes that care about the signedness of the input (e.g. compare,
right shift) have two different variants.
Change-Id: I465484c5734545ee697afe83bc8bf4b53bd9df8d
Reviewed-on: https://go-review.googlesource.com/12600
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2015-07-23 14:35:02 -07:00
|
|
|
}
|
|
|
|
|
2016-06-15 15:26:47 -07:00
|
|
|
// CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values.
|
|
|
|
// Called during ssaGenValue.
|
|
|
|
func CheckLoweredPhi(v *ssa.Value) {
|
|
|
|
if v.Op != ssa.OpPhi {
|
|
|
|
v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString())
|
|
|
|
}
|
|
|
|
if v.Type.IsMemory() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
f := v.Block.Func
|
|
|
|
loc := f.RegAlloc[v.ID]
|
|
|
|
for _, a := range v.Args {
|
|
|
|
if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead?
|
|
|
|
v.Fatalf("phi arg at different location than phi: %v @ %v, but arg %v @ %v\n%s\n", v, loc, a, aloc, v.Block.Func)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-03 13:40:03 -07:00
|
|
|
// CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block.
|
|
|
|
// The output of LoweredGetClosurePtr is generally hardwired to the correct register.
|
|
|
|
// That register contains the closure pointer on closure entry.
|
|
|
|
func CheckLoweredGetClosurePtr(v *ssa.Value) {
|
|
|
|
entry := v.Block.Func.Entry
|
|
|
|
if entry != v.Block || entry.Values[0] != v {
|
2016-07-21 10:37:59 -07:00
|
|
|
Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
|
2016-07-03 13:40:03 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-12 14:07:40 -08:00
|
|
|
// AutoVar returns a *Node and int64 representing the auto variable and offset within it
|
2015-11-02 08:10:26 -08:00
|
|
|
// where v should be spilled.
|
2016-03-12 14:07:40 -08:00
|
|
|
func AutoVar(v *ssa.Value) (*Node, int64) {
|
2015-11-02 08:10:26 -08:00
|
|
|
loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot)
|
2016-01-04 13:34:54 -08:00
|
|
|
if v.Type.Size() > loc.Type.Size() {
|
|
|
|
v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type)
|
|
|
|
}
|
2015-11-02 08:10:26 -08:00
|
|
|
return loc.N.(*Node), loc.Off
|
2015-05-12 11:06:44 -07:00
|
|
|
}
|
2015-05-27 14:52:22 -07:00
|
|
|
|
2016-10-03 12:26:25 -07:00
|
|
|
func AddrAuto(a *obj.Addr, v *ssa.Value) {
|
|
|
|
n, off := AutoVar(v)
|
|
|
|
a.Type = obj.TYPE_MEM
|
|
|
|
a.Sym = Linksym(n.Sym)
|
2017-03-17 13:35:36 -07:00
|
|
|
a.Reg = int16(thearch.REGSP)
|
2016-10-03 12:26:25 -07:00
|
|
|
a.Offset = n.Xoffset + off
|
|
|
|
if n.Class == PPARAM || n.Class == PPARAMOUT {
|
|
|
|
a.Name = obj.NAME_PARAM
|
|
|
|
} else {
|
|
|
|
a.Name = obj.NAME_AUTO
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *SSAGenState) AddrScratch(a *obj.Addr) {
|
2016-10-04 13:00:21 -07:00
|
|
|
if s.ScratchFpMem == nil {
|
|
|
|
panic("no scratch memory available; forgot to declare usesScratch for Op?")
|
|
|
|
}
|
2016-10-03 12:26:25 -07:00
|
|
|
a.Type = obj.TYPE_MEM
|
|
|
|
a.Name = obj.NAME_AUTO
|
|
|
|
a.Sym = Linksym(s.ScratchFpMem.Sym)
|
2017-03-17 13:35:36 -07:00
|
|
|
a.Reg = int16(thearch.REGSP)
|
2016-10-03 12:26:25 -07:00
|
|
|
a.Offset = s.ScratchFpMem.Xoffset
|
|
|
|
}
|
|
|
|
|
2017-03-10 18:34:41 -08:00
|
|
|
func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog {
|
2017-03-09 18:32:17 -08:00
|
|
|
idx, ok := s.stackMapIndex[v]
|
|
|
|
if !ok {
|
|
|
|
Fatalf("missing stack map index for %v", v.LongString())
|
|
|
|
}
|
2017-03-20 08:01:28 -07:00
|
|
|
p := s.Prog(obj.APCDATA)
|
2017-03-09 18:32:17 -08:00
|
|
|
Addrconst(&p.From, obj.PCDATA_StackMapIndex)
|
|
|
|
Addrconst(&p.To, int64(idx))
|
|
|
|
|
2017-03-10 18:34:41 -08:00
|
|
|
if sym, _ := v.Aux.(*obj.LSym); sym == Deferreturn {
|
|
|
|
// Deferred calls will appear to be returning to
|
|
|
|
// the CALL deferreturn(SB) that we are about to emit.
|
|
|
|
// However, the stack trace code will show the line
|
|
|
|
// of the instruction byte before the return PC.
|
|
|
|
// To avoid that being an unrelated instruction,
|
|
|
|
// insert an actual hardware NOP that will have the right line number.
|
|
|
|
// This is different from obj.ANOP, which is a virtual no-op
|
|
|
|
// that doesn't make it into the instruction stream.
|
2017-03-17 13:35:36 -07:00
|
|
|
thearch.Ginsnop()
|
2017-03-10 18:34:41 -08:00
|
|
|
}
|
|
|
|
|
2017-03-20 08:01:28 -07:00
|
|
|
p = s.Prog(obj.ACALL)
|
2017-03-10 18:34:41 -08:00
|
|
|
if sym, ok := v.Aux.(*obj.LSym); ok {
|
|
|
|
p.To.Type = obj.TYPE_MEM
|
|
|
|
p.To.Name = obj.NAME_EXTERN
|
|
|
|
p.To.Sym = sym
|
|
|
|
} else {
|
|
|
|
// TODO(mdempsky): Can these differences be eliminated?
|
2017-03-17 13:35:36 -07:00
|
|
|
switch thearch.LinkArch.Family {
|
2017-03-10 18:34:41 -08:00
|
|
|
case sys.AMD64, sys.I386, sys.PPC64, sys.S390X:
|
|
|
|
p.To.Type = obj.TYPE_REG
|
|
|
|
case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64:
|
|
|
|
p.To.Type = obj.TYPE_MEM
|
|
|
|
default:
|
|
|
|
Fatalf("unknown indirect call family")
|
|
|
|
}
|
|
|
|
p.To.Reg = v.Args[0].Reg()
|
|
|
|
}
|
2017-03-17 08:58:36 -07:00
|
|
|
if s.maxarg < v.AuxInt {
|
|
|
|
s.maxarg = v.AuxInt
|
2017-03-10 18:34:41 -08:00
|
|
|
}
|
|
|
|
return p
|
|
|
|
}
|
|
|
|
|
2016-01-11 21:05:33 -08:00
|
|
|
// fieldIdx finds the index of the field referred to by the ODOT node n.
|
2016-03-14 12:45:18 -07:00
|
|
|
func fieldIdx(n *Node) int {
|
2016-01-11 21:05:33 -08:00
|
|
|
t := n.Left.Type
|
cmd/compile: change ODOT and friends to use Sym, not Right
The Node type ODOT and its variants all represent a selector, with a
simple name to the right of the dot. Before this change this was
represented by using an ONAME Node in the Right field. This ONAME node
served no useful purpose. This CL changes these Node types to store the
symbol in the Sym field instead, thus not requiring allocating a Node
for each selector.
When compiling x/tools/go/types this CL eliminates nearly 5000 calls to
newname and reduces the total number of Nodes allocated by about 6.6%.
It seems to cut compilation time by 1 to 2 percent.
Getting this right was somewhat subtle, and I added two dubious changes
to produce the exact same output as before. One is to ishairy in
inl.go: the ONAME node increased the cost of ODOT and friends by 1, and
I retained that, although really ODOT is not more expensive than any
other node. The other is to varexpr in walk.go: because the ONAME in
the Right field of an ODOT has no class, varexpr would always return
false for an ODOT, although in fact for some ODOT's it seemingly ought
to return true; I added an && false for now. I will send separate CLs,
that will break toolstash -cmp, to clean these up.
This CL passes toolstash -cmp.
Change-Id: I4af8a10cc59078c436130ce472f25abc3a9b2f80
Reviewed-on: https://go-review.googlesource.com/20890
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2016-03-18 16:52:30 -07:00
|
|
|
f := n.Sym
|
2016-03-30 14:56:08 -07:00
|
|
|
if !t.IsStruct() {
|
2016-01-11 21:05:33 -08:00
|
|
|
panic("ODOT's LHS is not a struct")
|
|
|
|
}
|
|
|
|
|
2016-03-14 12:45:18 -07:00
|
|
|
var i int
|
2016-03-17 01:32:18 -07:00
|
|
|
for _, t1 := range t.Fields().Slice() {
|
cmd/compile: change ODOT and friends to use Sym, not Right
The Node type ODOT and its variants all represent a selector, with a
simple name to the right of the dot. Before this change this was
represented by using an ONAME Node in the Right field. This ONAME node
served no useful purpose. This CL changes these Node types to store the
symbol in the Sym field instead, thus not requiring allocating a Node
for each selector.
When compiling x/tools/go/types this CL eliminates nearly 5000 calls to
newname and reduces the total number of Nodes allocated by about 6.6%.
It seems to cut compilation time by 1 to 2 percent.
Getting this right was somewhat subtle, and I added two dubious changes
to produce the exact same output as before. One is to ishairy in
inl.go: the ONAME node increased the cost of ODOT and friends by 1, and
I retained that, although really ODOT is not more expensive than any
other node. The other is to varexpr in walk.go: because the ONAME in
the Right field of an ODOT has no class, varexpr would always return
false for an ODOT, although in fact for some ODOT's it seemingly ought
to return true; I added an && false for now. I will send separate CLs,
that will break toolstash -cmp, to clean these up.
This CL passes toolstash -cmp.
Change-Id: I4af8a10cc59078c436130ce472f25abc3a9b2f80
Reviewed-on: https://go-review.googlesource.com/20890
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2016-03-18 16:52:30 -07:00
|
|
|
if t1.Sym != f {
|
2016-01-11 21:05:33 -08:00
|
|
|
i++
|
|
|
|
continue
|
|
|
|
}
|
2016-03-28 09:40:53 -07:00
|
|
|
if t1.Offset != n.Xoffset {
|
2016-01-11 21:05:33 -08:00
|
|
|
panic("field offset doesn't match")
|
|
|
|
}
|
|
|
|
return i
|
|
|
|
}
|
2016-09-09 21:08:46 -07:00
|
|
|
panic(fmt.Sprintf("can't find field in expr %v\n", n))
|
2016-01-11 21:05:33 -08:00
|
|
|
|
2016-04-03 12:43:27 +01:00
|
|
|
// TODO: keep the result of this function somewhere in the ODOT Node
|
2016-01-11 21:05:33 -08:00
|
|
|
// so we don't have to recompute it each time we need it.
|
|
|
|
}
|
|
|
|
|
2017-03-17 07:49:22 -07:00
|
|
|
// ssafn holds frontend information about a function that the backend is processing.
|
|
|
|
// It also exports a bunch of compiler services for the ssa backend.
|
|
|
|
type ssafn struct {
|
2017-03-17 09:19:56 -07:00
|
|
|
curfn *Node
|
2016-08-30 19:11:19 -07:00
|
|
|
strings map[string]interface{} // map from constant string to data symbols
|
|
|
|
stksize int64 // stack size for current frame
|
|
|
|
stkptrsize int64 // prefix of stack containing pointers
|
2017-03-17 09:19:56 -07:00
|
|
|
log bool
|
2017-03-17 07:49:22 -07:00
|
|
|
}
|
|
|
|
|
2015-07-24 11:28:12 -07:00
|
|
|
// StringData returns a symbol (a *Sym wrapped in an interface) which
|
|
|
|
// is the data component of a global string constant containing s.
|
2016-08-30 19:11:19 -07:00
|
|
|
func (e *ssafn) StringData(s string) interface{} {
|
|
|
|
if aux, ok := e.strings[s]; ok {
|
|
|
|
return aux
|
|
|
|
}
|
|
|
|
if e.strings == nil {
|
|
|
|
e.strings = make(map[string]interface{})
|
|
|
|
}
|
2016-10-13 22:31:46 +03:00
|
|
|
data := stringsym(s)
|
2016-08-30 19:11:19 -07:00
|
|
|
aux := &ssa.ExternSymbol{Typ: idealstring, Sym: data}
|
|
|
|
e.strings[s] = aux
|
|
|
|
return aux
|
2015-05-27 14:52:22 -07:00
|
|
|
}
|
2015-06-12 11:01:13 -07:00
|
|
|
|
2017-03-17 07:49:22 -07:00
|
|
|
func (e *ssafn) Auto(t ssa.Type) ssa.GCNode {
|
2016-09-14 10:01:05 -07:00
|
|
|
n := temp(t.(*Type)) // Note: adds new auto to Curfn.Func.Dcl list
|
2015-08-24 02:16:19 -07:00
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2017-03-17 07:49:22 -07:00
|
|
|
func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
|
cmd/compile: better job of naming compound types
Compound AUTO types weren't named previously. That was because live
variable analysis (plive.go) doesn't handle spilling to compound types.
It can't handle them because there is no valid place to put VARDEFs when
regalloc is spilling compound types.
compound types = multiword builtin types: complex, string, slice, and
interface.
Instead, we split named AUTOs into individual one-word variables. For
example, a string s gets split into a byte ptr s.ptr and an integer
s.len. Those two variables can be spilled to / restored from
independently. As a result, live variable analysis can handle them
because they are one-word objects.
This CL will change how AUTOs are described in DWARF information.
Consider the code:
func f(s string, i int) int {
x := s[i:i+5]
g()
return lookup(x)
}
The old compiler would spill x to two consecutive slots on the stack,
both named x (at offsets 0 and 8). The new compiler spills the pointer
of x to a slot named x.ptr. It doesn't spill x.len at all, as it is a
constant (5) and can be rematerialized for the call to lookup.
So compound objects may not be spilled in their entirety, and even if
they are they won't necessarily be contiguous. Such is the price of
optimization.
Re-enable live variable analysis tests. One test remains disabled, it
fails because of #14904.
Change-Id: I8ef2b5ab91e43a0d2136bfc231c05d100ec0b801
Reviewed-on: https://go-review.googlesource.com/21233
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2016-03-28 11:25:17 -07:00
|
|
|
n := name.N.(*Node)
|
2017-03-19 09:51:22 +01:00
|
|
|
ptrType := typPtr(Types[TUINT8])
|
cmd/compile: better job of naming compound types
Compound AUTO types weren't named previously. That was because live
variable analysis (plive.go) doesn't handle spilling to compound types.
It can't handle them because there is no valid place to put VARDEFs when
regalloc is spilling compound types.
compound types = multiword builtin types: complex, string, slice, and
interface.
Instead, we split named AUTOs into individual one-word variables. For
example, a string s gets split into a byte ptr s.ptr and an integer
s.len. Those two variables can be spilled to / restored from
independently. As a result, live variable analysis can handle them
because they are one-word objects.
This CL will change how AUTOs are described in DWARF information.
Consider the code:
func f(s string, i int) int {
x := s[i:i+5]
g()
return lookup(x)
}
The old compiler would spill x to two consecutive slots on the stack,
both named x (at offsets 0 and 8). The new compiler spills the pointer
of x to a slot named x.ptr. It doesn't spill x.len at all, as it is a
constant (5) and can be rematerialized for the call to lookup.
So compound objects may not be spilled in their entirety, and even if
they are they won't necessarily be contiguous. Such is the price of
optimization.
Re-enable live variable analysis tests. One test remains disabled, it
fails because of #14904.
Change-Id: I8ef2b5ab91e43a0d2136bfc231c05d100ec0b801
Reviewed-on: https://go-review.googlesource.com/21233
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2016-03-28 11:25:17 -07:00
|
|
|
lenType := Types[TINT]
|
2017-02-27 19:56:38 +02:00
|
|
|
if n.Class == PAUTO && !n.Addrtaken() {
|
cmd/compile: better job of naming compound types
Compound AUTO types weren't named previously. That was because live
variable analysis (plive.go) doesn't handle spilling to compound types.
It can't handle them because there is no valid place to put VARDEFs when
regalloc is spilling compound types.
compound types = multiword builtin types: complex, string, slice, and
interface.
Instead, we split named AUTOs into individual one-word variables. For
example, a string s gets split into a byte ptr s.ptr and an integer
s.len. Those two variables can be spilled to / restored from
independently. As a result, live variable analysis can handle them
because they are one-word objects.
This CL will change how AUTOs are described in DWARF information.
Consider the code:
func f(s string, i int) int {
x := s[i:i+5]
g()
return lookup(x)
}
The old compiler would spill x to two consecutive slots on the stack,
both named x (at offsets 0 and 8). The new compiler spills the pointer
of x to a slot named x.ptr. It doesn't spill x.len at all, as it is a
constant (5) and can be rematerialized for the call to lookup.
So compound objects may not be spilled in their entirety, and even if
they are they won't necessarily be contiguous. Such is the price of
optimization.
Re-enable live variable analysis tests. One test remains disabled, it
fails because of #14904.
Change-Id: I8ef2b5ab91e43a0d2136bfc231c05d100ec0b801
Reviewed-on: https://go-review.googlesource.com/21233
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2016-03-28 11:25:17 -07:00
|
|
|
// Split this string up into two separate variables.
|
2017-03-21 20:58:00 -07:00
|
|
|
p := e.namedAuto(n.Sym.Name+".ptr", ptrType, n.Pos)
|
|
|
|
l := e.namedAuto(n.Sym.Name+".len", lenType, n.Pos)
|
2016-04-29 09:02:27 -07:00
|
|
|
return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0}
|
cmd/compile: better job of naming compound types
Compound AUTO types weren't named previously. That was because live
variable analysis (plive.go) doesn't handle spilling to compound types.
It can't handle them because there is no valid place to put VARDEFs when
regalloc is spilling compound types.
compound types = multiword builtin types: complex, string, slice, and
interface.
Instead, we split named AUTOs into individual one-word variables. For
example, a string s gets split into a byte ptr s.ptr and an integer
s.len. Those two variables can be spilled to / restored from
independently. As a result, live variable analysis can handle them
because they are one-word objects.
This CL will change how AUTOs are described in DWARF information.
Consider the code:
func f(s string, i int) int {
x := s[i:i+5]
g()
return lookup(x)
}
The old compiler would spill x to two consecutive slots on the stack,
both named x (at offsets 0 and 8). The new compiler spills the pointer
of x to a slot named x.ptr. It doesn't spill x.len at all, as it is a
constant (5) and can be rematerialized for the call to lookup.
So compound objects may not be spilled in their entirety, and even if
they are they won't necessarily be contiguous. Such is the price of
optimization.
Re-enable live variable analysis tests. One test remains disabled, it
fails because of #14904.
Change-Id: I8ef2b5ab91e43a0d2136bfc231c05d100ec0b801
Reviewed-on: https://go-review.googlesource.com/21233
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2016-03-28 11:25:17 -07:00
|
|
|
}
|
|
|
|
// Return the two parts of the larger variable.
|
2016-04-29 09:02:27 -07:00
|
|
|
return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)}
|
cmd/compile: better job of naming compound types
Compound AUTO types weren't named previously. That was because live
variable analysis (plive.go) doesn't handle spilling to compound types.
It can't handle them because there is no valid place to put VARDEFs when
regalloc is spilling compound types.
compound types = multiword builtin types: complex, string, slice, and
interface.
Instead, we split named AUTOs into individual one-word variables. For
example, a string s gets split into a byte ptr s.ptr and an integer
s.len. Those two variables can be spilled to / restored from
independently. As a result, live variable analysis can handle them
because they are one-word objects.
This CL will change how AUTOs are described in DWARF information.
Consider the code:
func f(s string, i int) int {
x := s[i:i+5]
g()
return lookup(x)
}
The old compiler would spill x to two consecutive slots on the stack,
both named x (at offsets 0 and 8). The new compiler spills the pointer
of x to a slot named x.ptr. It doesn't spill x.len at all, as it is a
constant (5) and can be rematerialized for the call to lookup.
So compound objects may not be spilled in their entirety, and even if
they are they won't necessarily be contiguous. Such is the price of
optimization.
Re-enable live variable analysis tests. One test remains disabled, it
fails because of #14904.
Change-Id: I8ef2b5ab91e43a0d2136bfc231c05d100ec0b801
Reviewed-on: https://go-review.googlesource.com/21233
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2016-03-28 11:25:17 -07:00
|
|
|
}
|
|
|
|
|
2017-03-17 07:49:22 -07:00
|
|
|
func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
|
cmd/compile: better job of naming compound types
Compound AUTO types weren't named previously. That was because live
variable analysis (plive.go) doesn't handle spilling to compound types.
It can't handle them because there is no valid place to put VARDEFs when
regalloc is spilling compound types.
compound types = multiword builtin types: complex, string, slice, and
interface.
Instead, we split named AUTOs into individual one-word variables. For
example, a string s gets split into a byte ptr s.ptr and an integer
s.len. Those two variables can be spilled to / restored from
independently. As a result, live variable analysis can handle them
because they are one-word objects.
This CL will change how AUTOs are described in DWARF information.
Consider the code:
func f(s string, i int) int {
x := s[i:i+5]
g()
return lookup(x)
}
The old compiler would spill x to two consecutive slots on the stack,
both named x (at offsets 0 and 8). The new compiler spills the pointer
of x to a slot named x.ptr. It doesn't spill x.len at all, as it is a
constant (5) and can be rematerialized for the call to lookup.
So compound objects may not be spilled in their entirety, and even if
they are they won't necessarily be contiguous. Such is the price of
optimization.
Re-enable live variable analysis tests. One test remains disabled, it
fails because of #14904.
Change-Id: I8ef2b5ab91e43a0d2136bfc231c05d100ec0b801
Reviewed-on: https://go-review.googlesource.com/21233
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2016-03-28 11:25:17 -07:00
|
|
|
n := name.N.(*Node)
|
2017-03-19 09:51:22 +01:00
|
|
|
t := typPtr(Types[TUINT8])
|
2017-02-27 19:56:38 +02:00
|
|
|
if n.Class == PAUTO && !n.Addrtaken() {
|
cmd/compile: better job of naming compound types
Compound AUTO types weren't named previously. That was because live
variable analysis (plive.go) doesn't handle spilling to compound types.
It can't handle them because there is no valid place to put VARDEFs when
regalloc is spilling compound types.
compound types = multiword builtin types: complex, string, slice, and
interface.
Instead, we split named AUTOs into individual one-word variables. For
example, a string s gets split into a byte ptr s.ptr and an integer
s.len. Those two variables can be spilled to / restored from
independently. As a result, live variable analysis can handle them
because they are one-word objects.
This CL will change how AUTOs are described in DWARF information.
Consider the code:
func f(s string, i int) int {
x := s[i:i+5]
g()
return lookup(x)
}
The old compiler would spill x to two consecutive slots on the stack,
both named x (at offsets 0 and 8). The new compiler spills the pointer
of x to a slot named x.ptr. It doesn't spill x.len at all, as it is a
constant (5) and can be rematerialized for the call to lookup.
So compound objects may not be spilled in their entirety, and even if
they are they won't necessarily be contiguous. Such is the price of
optimization.
Re-enable live variable analysis tests. One test remains disabled, it
fails because of #14904.
Change-Id: I8ef2b5ab91e43a0d2136bfc231c05d100ec0b801
Reviewed-on: https://go-review.googlesource.com/21233
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2016-03-28 11:25:17 -07:00
|
|
|
// Split this interface up into two separate variables.
|
|
|
|
f := ".itab"
|
2016-04-01 13:36:24 -07:00
|
|
|
if n.Type.IsEmptyInterface() {
|
cmd/compile: better job of naming compound types
Compound AUTO types weren't named previously. That was because live
variable analysis (plive.go) doesn't handle spilling to compound types.
It can't handle them because there is no valid place to put VARDEFs when
regalloc is spilling compound types.
compound types = multiword builtin types: complex, string, slice, and
interface.
Instead, we split named AUTOs into individual one-word variables. For
example, a string s gets split into a byte ptr s.ptr and an integer
s.len. Those two variables can be spilled to / restored from
independently. As a result, live variable analysis can handle them
because they are one-word objects.
This CL will change how AUTOs are described in DWARF information.
Consider the code:
func f(s string, i int) int {
x := s[i:i+5]
g()
return lookup(x)
}
The old compiler would spill x to two consecutive slots on the stack,
both named x (at offsets 0 and 8). The new compiler spills the pointer
of x to a slot named x.ptr. It doesn't spill x.len at all, as it is a
constant (5) and can be rematerialized for the call to lookup.
So compound objects may not be spilled in their entirety, and even if
they are they won't necessarily be contiguous. Such is the price of
optimization.
Re-enable live variable analysis tests. One test remains disabled, it
fails because of #14904.
Change-Id: I8ef2b5ab91e43a0d2136bfc231c05d100ec0b801
Reviewed-on: https://go-review.googlesource.com/21233
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2016-03-28 11:25:17 -07:00
|
|
|
f = ".type"
|
|
|
|
}
|
2017-03-21 20:58:00 -07:00
|
|
|
c := e.namedAuto(n.Sym.Name+f, t, n.Pos)
|
|
|
|
d := e.namedAuto(n.Sym.Name+".data", t, n.Pos)
|
2016-04-29 09:02:27 -07:00
|
|
|
return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0}
|
cmd/compile: better job of naming compound types
Compound AUTO types weren't named previously. That was because live
variable analysis (plive.go) doesn't handle spilling to compound types.
It can't handle them because there is no valid place to put VARDEFs when
regalloc is spilling compound types.
compound types = multiword builtin types: complex, string, slice, and
interface.
Instead, we split named AUTOs into individual one-word variables. For
example, a string s gets split into a byte ptr s.ptr and an integer
s.len. Those two variables can be spilled to / restored from
independently. As a result, live variable analysis can handle them
because they are one-word objects.
This CL will change how AUTOs are described in DWARF information.
Consider the code:
func f(s string, i int) int {
x := s[i:i+5]
g()
return lookup(x)
}
The old compiler would spill x to two consecutive slots on the stack,
both named x (at offsets 0 and 8). The new compiler spills the pointer
of x to a slot named x.ptr. It doesn't spill x.len at all, as it is a
constant (5) and can be rematerialized for the call to lookup.
So compound objects may not be spilled in their entirety, and even if
they are they won't necessarily be contiguous. Such is the price of
optimization.
Re-enable live variable analysis tests. One test remains disabled, it
fails because of #14904.
Change-Id: I8ef2b5ab91e43a0d2136bfc231c05d100ec0b801
Reviewed-on: https://go-review.googlesource.com/21233
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2016-03-28 11:25:17 -07:00
|
|
|
}
|
|
|
|
// Return the two parts of the larger variable.
|
2016-04-29 09:02:27 -07:00
|
|
|
return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + int64(Widthptr)}
|
cmd/compile: better job of naming compound types
Compound AUTO types weren't named previously. That was because live
variable analysis (plive.go) doesn't handle spilling to compound types.
It can't handle them because there is no valid place to put VARDEFs when
regalloc is spilling compound types.
compound types = multiword builtin types: complex, string, slice, and
interface.
Instead, we split named AUTOs into individual one-word variables. For
example, a string s gets split into a byte ptr s.ptr and an integer
s.len. Those two variables can be spilled to / restored from
independently. As a result, live variable analysis can handle them
because they are one-word objects.
This CL will change how AUTOs are described in DWARF information.
Consider the code:
func f(s string, i int) int {
x := s[i:i+5]
g()
return lookup(x)
}
The old compiler would spill x to two consecutive slots on the stack,
both named x (at offsets 0 and 8). The new compiler spills the pointer
of x to a slot named x.ptr. It doesn't spill x.len at all, as it is a
constant (5) and can be rematerialized for the call to lookup.
So compound objects may not be spilled in their entirety, and even if
they are they won't necessarily be contiguous. Such is the price of
optimization.
Re-enable live variable analysis tests. One test remains disabled, it
fails because of #14904.
Change-Id: I8ef2b5ab91e43a0d2136bfc231c05d100ec0b801
Reviewed-on: https://go-review.googlesource.com/21233
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2016-03-28 11:25:17 -07:00
|
|
|
}
|
|
|
|
|
2017-03-17 07:49:22 -07:00
|
|
|
func (e *ssafn) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) {
|
cmd/compile: better job of naming compound types
Compound AUTO types weren't named previously. That was because live
variable analysis (plive.go) doesn't handle spilling to compound types.
It can't handle them because there is no valid place to put VARDEFs when
regalloc is spilling compound types.
compound types = multiword builtin types: complex, string, slice, and
interface.
Instead, we split named AUTOs into individual one-word variables. For
example, a string s gets split into a byte ptr s.ptr and an integer
s.len. Those two variables can be spilled to / restored from
independently. As a result, live variable analysis can handle them
because they are one-word objects.
This CL will change how AUTOs are described in DWARF information.
Consider the code:
func f(s string, i int) int {
x := s[i:i+5]
g()
return lookup(x)
}
The old compiler would spill x to two consecutive slots on the stack,
both named x (at offsets 0 and 8). The new compiler spills the pointer
of x to a slot named x.ptr. It doesn't spill x.len at all, as it is a
constant (5) and can be rematerialized for the call to lookup.
So compound objects may not be spilled in their entirety, and even if
they are they won't necessarily be contiguous. Such is the price of
optimization.
Re-enable live variable analysis tests. One test remains disabled, it
fails because of #14904.
Change-Id: I8ef2b5ab91e43a0d2136bfc231c05d100ec0b801
Reviewed-on: https://go-review.googlesource.com/21233
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2016-03-28 11:25:17 -07:00
|
|
|
n := name.N.(*Node)
|
2017-03-19 09:51:22 +01:00
|
|
|
ptrType := typPtr(name.Type.ElemType().(*Type))
|
cmd/compile: better job of naming compound types
Compound AUTO types weren't named previously. That was because live
variable analysis (plive.go) doesn't handle spilling to compound types.
It can't handle them because there is no valid place to put VARDEFs when
regalloc is spilling compound types.
compound types = multiword builtin types: complex, string, slice, and
interface.
Instead, we split named AUTOs into individual one-word variables. For
example, a string s gets split into a byte ptr s.ptr and an integer
s.len. Those two variables can be spilled to / restored from
independently. As a result, live variable analysis can handle them
because they are one-word objects.
This CL will change how AUTOs are described in DWARF information.
Consider the code:
func f(s string, i int) int {
x := s[i:i+5]
g()
return lookup(x)
}
The old compiler would spill x to two consecutive slots on the stack,
both named x (at offsets 0 and 8). The new compiler spills the pointer
of x to a slot named x.ptr. It doesn't spill x.len at all, as it is a
constant (5) and can be rematerialized for the call to lookup.
So compound objects may not be spilled in their entirety, and even if
they are they won't necessarily be contiguous. Such is the price of
optimization.
Re-enable live variable analysis tests. One test remains disabled, it
fails because of #14904.
Change-Id: I8ef2b5ab91e43a0d2136bfc231c05d100ec0b801
Reviewed-on: https://go-review.googlesource.com/21233
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2016-03-28 11:25:17 -07:00
|
|
|
lenType := Types[TINT]
|
2017-02-27 19:56:38 +02:00
|
|
|
if n.Class == PAUTO && !n.Addrtaken() {
|
cmd/compile: better job of naming compound types
Compound AUTO types weren't named previously. That was because live
variable analysis (plive.go) doesn't handle spilling to compound types.
It can't handle them because there is no valid place to put VARDEFs when
regalloc is spilling compound types.
compound types = multiword builtin types: complex, string, slice, and
interface.
Instead, we split named AUTOs into individual one-word variables. For
example, a string s gets split into a byte ptr s.ptr and an integer
s.len. Those two variables can be spilled to / restored from
independently. As a result, live variable analysis can handle them
because they are one-word objects.
This CL will change how AUTOs are described in DWARF information.
Consider the code:
func f(s string, i int) int {
x := s[i:i+5]
g()
return lookup(x)
}
The old compiler would spill x to two consecutive slots on the stack,
both named x (at offsets 0 and 8). The new compiler spills the pointer
of x to a slot named x.ptr. It doesn't spill x.len at all, as it is a
constant (5) and can be rematerialized for the call to lookup.
So compound objects may not be spilled in their entirety, and even if
they are they won't necessarily be contiguous. Such is the price of
optimization.
Re-enable live variable analysis tests. One test remains disabled, it
fails because of #14904.
Change-Id: I8ef2b5ab91e43a0d2136bfc231c05d100ec0b801
Reviewed-on: https://go-review.googlesource.com/21233
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2016-03-28 11:25:17 -07:00
|
|
|
// Split this slice up into three separate variables.
|
2017-03-21 20:58:00 -07:00
|
|
|
p := e.namedAuto(n.Sym.Name+".ptr", ptrType, n.Pos)
|
|
|
|
l := e.namedAuto(n.Sym.Name+".len", lenType, n.Pos)
|
|
|
|
c := e.namedAuto(n.Sym.Name+".cap", lenType, n.Pos)
|
2016-04-29 09:02:27 -07:00
|
|
|
return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0}, ssa.LocalSlot{N: c, Type: lenType, Off: 0}
|
cmd/compile: better job of naming compound types
Compound AUTO types weren't named previously. That was because live
variable analysis (plive.go) doesn't handle spilling to compound types.
It can't handle them because there is no valid place to put VARDEFs when
regalloc is spilling compound types.
compound types = multiword builtin types: complex, string, slice, and
interface.
Instead, we split named AUTOs into individual one-word variables. For
example, a string s gets split into a byte ptr s.ptr and an integer
s.len. Those two variables can be spilled to / restored from
independently. As a result, live variable analysis can handle them
because they are one-word objects.
This CL will change how AUTOs are described in DWARF information.
Consider the code:
func f(s string, i int) int {
x := s[i:i+5]
g()
return lookup(x)
}
The old compiler would spill x to two consecutive slots on the stack,
both named x (at offsets 0 and 8). The new compiler spills the pointer
of x to a slot named x.ptr. It doesn't spill x.len at all, as it is a
constant (5) and can be rematerialized for the call to lookup.
So compound objects may not be spilled in their entirety, and even if
they are they won't necessarily be contiguous. Such is the price of
optimization.
Re-enable live variable analysis tests. One test remains disabled, it
fails because of #14904.
Change-Id: I8ef2b5ab91e43a0d2136bfc231c05d100ec0b801
Reviewed-on: https://go-review.googlesource.com/21233
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2016-03-28 11:25:17 -07:00
|
|
|
}
|
|
|
|
// Return the three parts of the larger variable.
|
2016-04-29 09:02:27 -07:00
|
|
|
return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off},
|
|
|
|
ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)},
|
|
|
|
ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(2*Widthptr)}
|
cmd/compile: better job of naming compound types
Compound AUTO types weren't named previously. That was because live
variable analysis (plive.go) doesn't handle spilling to compound types.
It can't handle them because there is no valid place to put VARDEFs when
regalloc is spilling compound types.
compound types = multiword builtin types: complex, string, slice, and
interface.
Instead, we split named AUTOs into individual one-word variables. For
example, a string s gets split into a byte ptr s.ptr and an integer
s.len. Those two variables can be spilled to / restored from
independently. As a result, live variable analysis can handle them
because they are one-word objects.
This CL will change how AUTOs are described in DWARF information.
Consider the code:
func f(s string, i int) int {
x := s[i:i+5]
g()
return lookup(x)
}
The old compiler would spill x to two consecutive slots on the stack,
both named x (at offsets 0 and 8). The new compiler spills the pointer
of x to a slot named x.ptr. It doesn't spill x.len at all, as it is a
constant (5) and can be rematerialized for the call to lookup.
So compound objects may not be spilled in their entirety, and even if
they are they won't necessarily be contiguous. Such is the price of
optimization.
Re-enable live variable analysis tests. One test remains disabled, it
fails because of #14904.
Change-Id: I8ef2b5ab91e43a0d2136bfc231c05d100ec0b801
Reviewed-on: https://go-review.googlesource.com/21233
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2016-03-28 11:25:17 -07:00
|
|
|
}
|
|
|
|
|
2017-03-17 07:49:22 -07:00
|
|
|
func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
|
cmd/compile: better job of naming compound types
Compound AUTO types weren't named previously. That was because live
variable analysis (plive.go) doesn't handle spilling to compound types.
It can't handle them because there is no valid place to put VARDEFs when
regalloc is spilling compound types.
compound types = multiword builtin types: complex, string, slice, and
interface.
Instead, we split named AUTOs into individual one-word variables. For
example, a string s gets split into a byte ptr s.ptr and an integer
s.len. Those two variables can be spilled to / restored from
independently. As a result, live variable analysis can handle them
because they are one-word objects.
This CL will change how AUTOs are described in DWARF information.
Consider the code:
func f(s string, i int) int {
x := s[i:i+5]
g()
return lookup(x)
}
The old compiler would spill x to two consecutive slots on the stack,
both named x (at offsets 0 and 8). The new compiler spills the pointer
of x to a slot named x.ptr. It doesn't spill x.len at all, as it is a
constant (5) and can be rematerialized for the call to lookup.
So compound objects may not be spilled in their entirety, and even if
they are they won't necessarily be contiguous. Such is the price of
optimization.
Re-enable live variable analysis tests. One test remains disabled, it
fails because of #14904.
Change-Id: I8ef2b5ab91e43a0d2136bfc231c05d100ec0b801
Reviewed-on: https://go-review.googlesource.com/21233
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2016-03-28 11:25:17 -07:00
|
|
|
n := name.N.(*Node)
|
|
|
|
s := name.Type.Size() / 2
|
|
|
|
var t *Type
|
|
|
|
if s == 8 {
|
|
|
|
t = Types[TFLOAT64]
|
|
|
|
} else {
|
|
|
|
t = Types[TFLOAT32]
|
|
|
|
}
|
2017-02-27 19:56:38 +02:00
|
|
|
if n.Class == PAUTO && !n.Addrtaken() {
|
cmd/compile: better job of naming compound types
Compound AUTO types weren't named previously. That was because live
variable analysis (plive.go) doesn't handle spilling to compound types.
It can't handle them because there is no valid place to put VARDEFs when
regalloc is spilling compound types.
compound types = multiword builtin types: complex, string, slice, and
interface.
Instead, we split named AUTOs into individual one-word variables. For
example, a string s gets split into a byte ptr s.ptr and an integer
s.len. Those two variables can be spilled to / restored from
independently. As a result, live variable analysis can handle them
because they are one-word objects.
This CL will change how AUTOs are described in DWARF information.
Consider the code:
func f(s string, i int) int {
x := s[i:i+5]
g()
return lookup(x)
}
The old compiler would spill x to two consecutive slots on the stack,
both named x (at offsets 0 and 8). The new compiler spills the pointer
of x to a slot named x.ptr. It doesn't spill x.len at all, as it is a
constant (5) and can be rematerialized for the call to lookup.
So compound objects may not be spilled in their entirety, and even if
they are they won't necessarily be contiguous. Such is the price of
optimization.
Re-enable live variable analysis tests. One test remains disabled, it
fails because of #14904.
Change-Id: I8ef2b5ab91e43a0d2136bfc231c05d100ec0b801
Reviewed-on: https://go-review.googlesource.com/21233
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2016-03-28 11:25:17 -07:00
|
|
|
// Split this complex up into two separate variables.
|
2017-03-21 20:58:00 -07:00
|
|
|
c := e.namedAuto(n.Sym.Name+".real", t, n.Pos)
|
|
|
|
d := e.namedAuto(n.Sym.Name+".imag", t, n.Pos)
|
2016-04-29 09:02:27 -07:00
|
|
|
return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0}
|
cmd/compile: better job of naming compound types
Compound AUTO types weren't named previously. That was because live
variable analysis (plive.go) doesn't handle spilling to compound types.
It can't handle them because there is no valid place to put VARDEFs when
regalloc is spilling compound types.
compound types = multiword builtin types: complex, string, slice, and
interface.
Instead, we split named AUTOs into individual one-word variables. For
example, a string s gets split into a byte ptr s.ptr and an integer
s.len. Those two variables can be spilled to / restored from
independently. As a result, live variable analysis can handle them
because they are one-word objects.
This CL will change how AUTOs are described in DWARF information.
Consider the code:
func f(s string, i int) int {
x := s[i:i+5]
g()
return lookup(x)
}
The old compiler would spill x to two consecutive slots on the stack,
both named x (at offsets 0 and 8). The new compiler spills the pointer
of x to a slot named x.ptr. It doesn't spill x.len at all, as it is a
constant (5) and can be rematerialized for the call to lookup.
So compound objects may not be spilled in their entirety, and even if
they are they won't necessarily be contiguous. Such is the price of
optimization.
Re-enable live variable analysis tests. One test remains disabled, it
fails because of #14904.
Change-Id: I8ef2b5ab91e43a0d2136bfc231c05d100ec0b801
Reviewed-on: https://go-review.googlesource.com/21233
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2016-03-28 11:25:17 -07:00
|
|
|
}
|
|
|
|
// Return the two parts of the larger variable.
|
2016-04-29 09:02:27 -07:00
|
|
|
return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + s}
|
cmd/compile: better job of naming compound types
Compound AUTO types weren't named previously. That was because live
variable analysis (plive.go) doesn't handle spilling to compound types.
It can't handle them because there is no valid place to put VARDEFs when
regalloc is spilling compound types.
compound types = multiword builtin types: complex, string, slice, and
interface.
Instead, we split named AUTOs into individual one-word variables. For
example, a string s gets split into a byte ptr s.ptr and an integer
s.len. Those two variables can be spilled to / restored from
independently. As a result, live variable analysis can handle them
because they are one-word objects.
This CL will change how AUTOs are described in DWARF information.
Consider the code:
func f(s string, i int) int {
x := s[i:i+5]
g()
return lookup(x)
}
The old compiler would spill x to two consecutive slots on the stack,
both named x (at offsets 0 and 8). The new compiler spills the pointer
of x to a slot named x.ptr. It doesn't spill x.len at all, as it is a
constant (5) and can be rematerialized for the call to lookup.
So compound objects may not be spilled in their entirety, and even if
they are they won't necessarily be contiguous. Such is the price of
optimization.
Re-enable live variable analysis tests. One test remains disabled, it
fails because of #14904.
Change-Id: I8ef2b5ab91e43a0d2136bfc231c05d100ec0b801
Reviewed-on: https://go-review.googlesource.com/21233
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2016-03-28 11:25:17 -07:00
|
|
|
}
|
|
|
|
|
2017-03-17 07:49:22 -07:00
|
|
|
func (e *ssafn) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
|
[dev.ssa] cmd/compile: decompose 64-bit integer on ARM
Introduce dec64 rules to (generically) decompose 64-bit integer on
32-bit architectures. 64-bit integer is composed/decomposed with
Int64Make/Hi/Lo ops, as for complex types.
The idea of dealing with Add64 is the following:
(Add64 (Int64Make xh xl) (Int64Make yh yl))
->
(Int64Make
(Add32withcarry xh yh (Select0 (Add32carry xl yl)))
(Select1 (Add32carry xl yl)))
where Add32carry returns a tuple (flags,uint32). Select0 and Select1
read the first and the second component of the tuple, respectively.
The two Add32carry will be CSE'd.
Similarly for multiplication, Mul32uhilo returns a tuple (hi, lo).
Also add support of KeepAlive, to fix build after merge.
Tests addressed_ssa.go, array_ssa.go, break_ssa.go, chan_ssa.go,
cmp_ssa.go, ctl_ssa.go, map_ssa.go, and string_ssa.go in
cmd/compile/internal/gc/testdata passed.
Progress on SSA for ARM. Still not complete.
Updates #15365.
Change-Id: I7867c76785a456312de5d8398a6b3f7ca5a4f7ec
Reviewed-on: https://go-review.googlesource.com/23213
Reviewed-by: Keith Randall <khr@golang.org>
2016-05-18 18:14:36 -04:00
|
|
|
n := name.N.(*Node)
|
|
|
|
var t *Type
|
|
|
|
if name.Type.IsSigned() {
|
|
|
|
t = Types[TINT32]
|
|
|
|
} else {
|
|
|
|
t = Types[TUINT32]
|
|
|
|
}
|
2017-02-27 19:56:38 +02:00
|
|
|
if n.Class == PAUTO && !n.Addrtaken() {
|
[dev.ssa] cmd/compile: decompose 64-bit integer on ARM
Introduce dec64 rules to (generically) decompose 64-bit integer on
32-bit architectures. 64-bit integer is composed/decomposed with
Int64Make/Hi/Lo ops, as for complex types.
The idea of dealing with Add64 is the following:
(Add64 (Int64Make xh xl) (Int64Make yh yl))
->
(Int64Make
(Add32withcarry xh yh (Select0 (Add32carry xl yl)))
(Select1 (Add32carry xl yl)))
where Add32carry returns a tuple (flags,uint32). Select0 and Select1
read the first and the second component of the tuple, respectively.
The two Add32carry will be CSE'd.
Similarly for multiplication, Mul32uhilo returns a tuple (hi, lo).
Also add support of KeepAlive, to fix build after merge.
Tests addressed_ssa.go, array_ssa.go, break_ssa.go, chan_ssa.go,
cmp_ssa.go, ctl_ssa.go, map_ssa.go, and string_ssa.go in
cmd/compile/internal/gc/testdata passed.
Progress on SSA for ARM. Still not complete.
Updates #15365.
Change-Id: I7867c76785a456312de5d8398a6b3f7ca5a4f7ec
Reviewed-on: https://go-review.googlesource.com/23213
Reviewed-by: Keith Randall <khr@golang.org>
2016-05-18 18:14:36 -04:00
|
|
|
// Split this int64 up into two separate variables.
|
2017-03-21 20:58:00 -07:00
|
|
|
h := e.namedAuto(n.Sym.Name+".hi", t, n.Pos)
|
|
|
|
l := e.namedAuto(n.Sym.Name+".lo", Types[TUINT32], n.Pos)
|
[dev.ssa] cmd/compile: decompose 64-bit integer on ARM
Introduce dec64 rules to (generically) decompose 64-bit integer on
32-bit architectures. 64-bit integer is composed/decomposed with
Int64Make/Hi/Lo ops, as for complex types.
The idea of dealing with Add64 is the following:
(Add64 (Int64Make xh xl) (Int64Make yh yl))
->
(Int64Make
(Add32withcarry xh yh (Select0 (Add32carry xl yl)))
(Select1 (Add32carry xl yl)))
where Add32carry returns a tuple (flags,uint32). Select0 and Select1
read the first and the second component of the tuple, respectively.
The two Add32carry will be CSE'd.
Similarly for multiplication, Mul32uhilo returns a tuple (hi, lo).
Also add support of KeepAlive, to fix build after merge.
Tests addressed_ssa.go, array_ssa.go, break_ssa.go, chan_ssa.go,
cmp_ssa.go, ctl_ssa.go, map_ssa.go, and string_ssa.go in
cmd/compile/internal/gc/testdata passed.
Progress on SSA for ARM. Still not complete.
Updates #15365.
Change-Id: I7867c76785a456312de5d8398a6b3f7ca5a4f7ec
Reviewed-on: https://go-review.googlesource.com/23213
Reviewed-by: Keith Randall <khr@golang.org>
2016-05-18 18:14:36 -04:00
|
|
|
return ssa.LocalSlot{N: h, Type: t, Off: 0}, ssa.LocalSlot{N: l, Type: Types[TUINT32], Off: 0}
|
|
|
|
}
|
|
|
|
// Return the two parts of the larger variable.
|
2017-03-17 13:35:36 -07:00
|
|
|
if thearch.LinkArch.ByteOrder == binary.BigEndian {
|
2016-10-18 23:50:40 +02:00
|
|
|
return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: Types[TUINT32], Off: name.Off + 4}
|
|
|
|
}
|
[dev.ssa] cmd/compile: decompose 64-bit integer on ARM
Introduce dec64 rules to (generically) decompose 64-bit integer on
32-bit architectures. 64-bit integer is composed/decomposed with
Int64Make/Hi/Lo ops, as for complex types.
The idea of dealing with Add64 is the following:
(Add64 (Int64Make xh xl) (Int64Make yh yl))
->
(Int64Make
(Add32withcarry xh yh (Select0 (Add32carry xl yl)))
(Select1 (Add32carry xl yl)))
where Add32carry returns a tuple (flags,uint32). Select0 and Select1
read the first and the second component of the tuple, respectively.
The two Add32carry will be CSE'd.
Similarly for multiplication, Mul32uhilo returns a tuple (hi, lo).
Also add support of KeepAlive, to fix build after merge.
Tests addressed_ssa.go, array_ssa.go, break_ssa.go, chan_ssa.go,
cmp_ssa.go, ctl_ssa.go, map_ssa.go, and string_ssa.go in
cmd/compile/internal/gc/testdata passed.
Progress on SSA for ARM. Still not complete.
Updates #15365.
Change-Id: I7867c76785a456312de5d8398a6b3f7ca5a4f7ec
Reviewed-on: https://go-review.googlesource.com/23213
Reviewed-by: Keith Randall <khr@golang.org>
2016-05-18 18:14:36 -04:00
|
|
|
return ssa.LocalSlot{N: n, Type: t, Off: name.Off + 4}, ssa.LocalSlot{N: n, Type: Types[TUINT32], Off: name.Off}
|
|
|
|
}
|
|
|
|
|
2017-03-17 07:49:22 -07:00
|
|
|
func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot {
|
2016-03-31 21:24:10 -07:00
|
|
|
n := name.N.(*Node)
|
|
|
|
st := name.Type
|
|
|
|
ft := st.FieldType(i)
|
2017-02-27 19:56:38 +02:00
|
|
|
if n.Class == PAUTO && !n.Addrtaken() {
|
2016-03-31 21:24:10 -07:00
|
|
|
// Note: the _ field may appear several times. But
|
|
|
|
// have no fear, identically-named but distinct Autos are
|
|
|
|
// ok, albeit maybe confusing for a debugger.
|
2017-03-21 20:58:00 -07:00
|
|
|
x := e.namedAuto(n.Sym.Name+"."+st.FieldName(i), ft, n.Pos)
|
2016-04-29 09:02:27 -07:00
|
|
|
return ssa.LocalSlot{N: x, Type: ft, Off: 0}
|
2016-03-31 21:24:10 -07:00
|
|
|
}
|
2016-04-29 09:02:27 -07:00
|
|
|
return ssa.LocalSlot{N: n, Type: ft, Off: name.Off + st.FieldOff(i)}
|
2016-03-31 21:24:10 -07:00
|
|
|
}
|
|
|
|
|
2017-03-17 07:49:22 -07:00
|
|
|
func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot {
|
2016-10-30 21:10:03 -07:00
|
|
|
n := name.N.(*Node)
|
|
|
|
at := name.Type
|
|
|
|
if at.NumElem() != 1 {
|
|
|
|
Fatalf("bad array size")
|
|
|
|
}
|
|
|
|
et := at.ElemType()
|
2017-02-27 19:56:38 +02:00
|
|
|
if n.Class == PAUTO && !n.Addrtaken() {
|
2017-03-21 20:58:00 -07:00
|
|
|
x := e.namedAuto(n.Sym.Name+"[0]", et, n.Pos)
|
2016-10-30 21:10:03 -07:00
|
|
|
return ssa.LocalSlot{N: x, Type: et, Off: 0}
|
|
|
|
}
|
|
|
|
return ssa.LocalSlot{N: n, Type: et, Off: name.Off}
|
|
|
|
}
|
|
|
|
|
2017-03-17 07:49:22 -07:00
|
|
|
func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym {
|
cmd/compile: de-virtualize interface calls
With this change, code like
h := sha1.New()
h.Write(buf)
sum := h.Sum()
gets compiled into static calls rather than
interface calls, because the compiler is able
to prove that 'h' is really a *sha1.digest.
The InterCall re-write rule hits a few dozen times
during make.bash, and hundreds of times during all.bash.
The most common pattern identified by the compiler
is a constructor like
func New() Interface { return &impl{...} }
where the constructor gets inlined into the caller,
and the result is used immediately. Examples include
{sha1,md5,crc32,crc64,...}.New, base64.NewEncoder,
base64.NewDecoder, errors.New, net.Pipe, and so on.
Some existing benchmarks that change on darwin/amd64:
Crc64/ISO4KB-8 2.67µs ± 1% 2.66µs ± 0% -0.36% (p=0.015 n=10+10)
Crc64/ISO1KB-8 694ns ± 0% 690ns ± 1% -0.59% (p=0.001 n=10+10)
Adler32KB-8 473ns ± 1% 471ns ± 0% -0.39% (p=0.010 n=10+9)
On architectures like amd64, the reduction in code size
appears to contribute more to benchmark improvements than just
removing the indirect call, since that branch gets predicted
accurately when called in a loop.
Updates #19361
Change-Id: I57d4dc21ef40a05ec0fbd55a9bb0eb74cdc67a3d
Reviewed-on: https://go-review.googlesource.com/38139
Run-TryBot: Philip Hofer <phofer@umich.edu>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2017-03-13 15:03:17 -07:00
|
|
|
return itabsym(it, offset)
|
|
|
|
}
|
|
|
|
|
cmd/compile: better job of naming compound types
Compound AUTO types weren't named previously. That was because live
variable analysis (plive.go) doesn't handle spilling to compound types.
It can't handle them because there is no valid place to put VARDEFs when
regalloc is spilling compound types.
compound types = multiword builtin types: complex, string, slice, and
interface.
Instead, we split named AUTOs into individual one-word variables. For
example, a string s gets split into a byte ptr s.ptr and an integer
s.len. Those two variables can be spilled to / restored from
independently. As a result, live variable analysis can handle them
because they are one-word objects.
This CL will change how AUTOs are described in DWARF information.
Consider the code:
func f(s string, i int) int {
x := s[i:i+5]
g()
return lookup(x)
}
The old compiler would spill x to two consecutive slots on the stack,
both named x (at offsets 0 and 8). The new compiler spills the pointer
of x to a slot named x.ptr. It doesn't spill x.len at all, as it is a
constant (5) and can be rematerialized for the call to lookup.
So compound objects may not be spilled in their entirety, and even if
they are they won't necessarily be contiguous. Such is the price of
optimization.
Re-enable live variable analysis tests. One test remains disabled, it
fails because of #14904.
Change-Id: I8ef2b5ab91e43a0d2136bfc231c05d100ec0b801
Reviewed-on: https://go-review.googlesource.com/21233
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2016-03-28 11:25:17 -07:00
|
|
|
// namedAuto returns a new AUTO variable with the given name and type.
|
2016-10-28 13:33:57 -04:00
|
|
|
// These are exposed to the debugger.
|
2017-03-21 20:58:00 -07:00
|
|
|
func (e *ssafn) namedAuto(name string, typ ssa.Type, pos src.XPos) ssa.GCNode {
|
cmd/compile: better job of naming compound types
Compound AUTO types weren't named previously. That was because live
variable analysis (plive.go) doesn't handle spilling to compound types.
It can't handle them because there is no valid place to put VARDEFs when
regalloc is spilling compound types.
compound types = multiword builtin types: complex, string, slice, and
interface.
Instead, we split named AUTOs into individual one-word variables. For
example, a string s gets split into a byte ptr s.ptr and an integer
s.len. Those two variables can be spilled to / restored from
independently. As a result, live variable analysis can handle them
because they are one-word objects.
This CL will change how AUTOs are described in DWARF information.
Consider the code:
func f(s string, i int) int {
x := s[i:i+5]
g()
return lookup(x)
}
The old compiler would spill x to two consecutive slots on the stack,
both named x (at offsets 0 and 8). The new compiler spills the pointer
of x to a slot named x.ptr. It doesn't spill x.len at all, as it is a
constant (5) and can be rematerialized for the call to lookup.
So compound objects may not be spilled in their entirety, and even if
they are they won't necessarily be contiguous. Such is the price of
optimization.
Re-enable live variable analysis tests. One test remains disabled, it
fails because of #14904.
Change-Id: I8ef2b5ab91e43a0d2136bfc231c05d100ec0b801
Reviewed-on: https://go-review.googlesource.com/21233
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2016-03-28 11:25:17 -07:00
|
|
|
t := typ.(*Type)
|
2016-08-10 10:36:42 -07:00
|
|
|
s := &Sym{Name: name, Pkg: localpkg}
|
2017-03-21 20:58:00 -07:00
|
|
|
|
|
|
|
n := new(Node)
|
|
|
|
n.Name = new(Name)
|
|
|
|
n.Op = ONAME
|
|
|
|
n.Pos = pos
|
|
|
|
n.Orig = n
|
|
|
|
|
cmd/compile: better job of naming compound types
Compound AUTO types weren't named previously. That was because live
variable analysis (plive.go) doesn't handle spilling to compound types.
It can't handle them because there is no valid place to put VARDEFs when
regalloc is spilling compound types.
compound types = multiword builtin types: complex, string, slice, and
interface.
Instead, we split named AUTOs into individual one-word variables. For
example, a string s gets split into a byte ptr s.ptr and an integer
s.len. Those two variables can be spilled to / restored from
independently. As a result, live variable analysis can handle them
because they are one-word objects.
This CL will change how AUTOs are described in DWARF information.
Consider the code:
func f(s string, i int) int {
x := s[i:i+5]
g()
return lookup(x)
}
The old compiler would spill x to two consecutive slots on the stack,
both named x (at offsets 0 and 8). The new compiler spills the pointer
of x to a slot named x.ptr. It doesn't spill x.len at all, as it is a
constant (5) and can be rematerialized for the call to lookup.
So compound objects may not be spilled in their entirety, and even if
they are they won't necessarily be contiguous. Such is the price of
optimization.
Re-enable live variable analysis tests. One test remains disabled, it
fails because of #14904.
Change-Id: I8ef2b5ab91e43a0d2136bfc231c05d100ec0b801
Reviewed-on: https://go-review.googlesource.com/21233
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2016-03-28 11:25:17 -07:00
|
|
|
s.Def = n
|
2017-02-27 19:56:38 +02:00
|
|
|
s.Def.SetUsed(true)
|
cmd/compile: better job of naming compound types
Compound AUTO types weren't named previously. That was because live
variable analysis (plive.go) doesn't handle spilling to compound types.
It can't handle them because there is no valid place to put VARDEFs when
regalloc is spilling compound types.
compound types = multiword builtin types: complex, string, slice, and
interface.
Instead, we split named AUTOs into individual one-word variables. For
example, a string s gets split into a byte ptr s.ptr and an integer
s.len. Those two variables can be spilled to / restored from
independently. As a result, live variable analysis can handle them
because they are one-word objects.
This CL will change how AUTOs are described in DWARF information.
Consider the code:
func f(s string, i int) int {
x := s[i:i+5]
g()
return lookup(x)
}
The old compiler would spill x to two consecutive slots on the stack,
both named x (at offsets 0 and 8). The new compiler spills the pointer
of x to a slot named x.ptr. It doesn't spill x.len at all, as it is a
constant (5) and can be rematerialized for the call to lookup.
So compound objects may not be spilled in their entirety, and even if
they are they won't necessarily be contiguous. Such is the price of
optimization.
Re-enable live variable analysis tests. One test remains disabled, it
fails because of #14904.
Change-Id: I8ef2b5ab91e43a0d2136bfc231c05d100ec0b801
Reviewed-on: https://go-review.googlesource.com/21233
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2016-03-28 11:25:17 -07:00
|
|
|
n.Sym = s
|
|
|
|
n.Type = t
|
|
|
|
n.Class = PAUTO
|
2017-02-27 19:56:38 +02:00
|
|
|
n.SetAddable(true)
|
cmd/compile: better job of naming compound types
Compound AUTO types weren't named previously. That was because live
variable analysis (plive.go) doesn't handle spilling to compound types.
It can't handle them because there is no valid place to put VARDEFs when
regalloc is spilling compound types.
compound types = multiword builtin types: complex, string, slice, and
interface.
Instead, we split named AUTOs into individual one-word variables. For
example, a string s gets split into a byte ptr s.ptr and an integer
s.len. Those two variables can be spilled to / restored from
independently. As a result, live variable analysis can handle them
because they are one-word objects.
This CL will change how AUTOs are described in DWARF information.
Consider the code:
func f(s string, i int) int {
x := s[i:i+5]
g()
return lookup(x)
}
The old compiler would spill x to two consecutive slots on the stack,
both named x (at offsets 0 and 8). The new compiler spills the pointer
of x to a slot named x.ptr. It doesn't spill x.len at all, as it is a
constant (5) and can be rematerialized for the call to lookup.
So compound objects may not be spilled in their entirety, and even if
they are they won't necessarily be contiguous. Such is the price of
optimization.
Re-enable live variable analysis tests. One test remains disabled, it
fails because of #14904.
Change-Id: I8ef2b5ab91e43a0d2136bfc231c05d100ec0b801
Reviewed-on: https://go-review.googlesource.com/21233
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2016-03-28 11:25:17 -07:00
|
|
|
n.Esc = EscNever
|
2017-03-17 07:49:22 -07:00
|
|
|
n.Name.Curfn = e.curfn
|
|
|
|
e.curfn.Func.Dcl = append(e.curfn.Func.Dcl, n)
|
cmd/compile: better job of naming compound types
Compound AUTO types weren't named previously. That was because live
variable analysis (plive.go) doesn't handle spilling to compound types.
It can't handle them because there is no valid place to put VARDEFs when
regalloc is spilling compound types.
compound types = multiword builtin types: complex, string, slice, and
interface.
Instead, we split named AUTOs into individual one-word variables. For
example, a string s gets split into a byte ptr s.ptr and an integer
s.len. Those two variables can be spilled to / restored from
independently. As a result, live variable analysis can handle them
because they are one-word objects.
This CL will change how AUTOs are described in DWARF information.
Consider the code:
func f(s string, i int) int {
x := s[i:i+5]
g()
return lookup(x)
}
The old compiler would spill x to two consecutive slots on the stack,
both named x (at offsets 0 and 8). The new compiler spills the pointer
of x to a slot named x.ptr. It doesn't spill x.len at all, as it is a
constant (5) and can be rematerialized for the call to lookup.
So compound objects may not be spilled in their entirety, and even if
they are they won't necessarily be contiguous. Such is the price of
optimization.
Re-enable live variable analysis tests. One test remains disabled, it
fails because of #14904.
Change-Id: I8ef2b5ab91e43a0d2136bfc231c05d100ec0b801
Reviewed-on: https://go-review.googlesource.com/21233
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2016-03-28 11:25:17 -07:00
|
|
|
|
|
|
|
dowidth(t)
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2017-03-17 07:49:22 -07:00
|
|
|
func (e *ssafn) CanSSA(t ssa.Type) bool {
|
2015-09-18 22:58:10 -07:00
|
|
|
return canSSAType(t.(*Type))
|
|
|
|
}
|
|
|
|
|
2017-03-17 07:49:22 -07:00
|
|
|
func (e *ssafn) Line(pos src.XPos) string {
|
2016-12-09 17:15:05 -08:00
|
|
|
return linestr(pos)
|
2016-01-14 16:02:23 -08:00
|
|
|
}
|
|
|
|
|
2015-06-12 11:01:13 -07:00
|
|
|
// Log logs a message from the compiler.
|
2017-03-17 07:49:22 -07:00
|
|
|
func (e *ssafn) Logf(msg string, args ...interface{}) {
|
2016-09-14 10:01:05 -07:00
|
|
|
if e.log {
|
2015-06-12 11:01:13 -07:00
|
|
|
fmt.Printf(msg, args...)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-17 07:49:22 -07:00
|
|
|
func (e *ssafn) Log() bool {
|
2016-01-29 14:44:15 -05:00
|
|
|
return e.log
|
|
|
|
}
|
|
|
|
|
2015-06-12 11:01:13 -07:00
|
|
|
// Fatal reports a compiler error and exits.
|
2017-03-17 07:49:22 -07:00
|
|
|
func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...interface{}) {
|
2016-12-09 17:15:05 -08:00
|
|
|
lineno = pos
|
2016-09-14 10:01:05 -07:00
|
|
|
Fatalf(msg, args...)
|
2015-06-12 11:01:13 -07:00
|
|
|
}
|
2015-10-22 14:22:38 -07:00
|
|
|
|
2017-02-10 10:15:10 -05:00
|
|
|
// Error reports a compiler error but keep going.
|
2017-03-17 07:49:22 -07:00
|
|
|
func (e *ssafn) Error(pos src.XPos, msg string, args ...interface{}) {
|
2017-02-10 10:15:10 -05:00
|
|
|
yyerrorl(pos, msg, args...)
|
|
|
|
}
|
|
|
|
|
2015-10-26 17:34:06 -04:00
|
|
|
// Warnl reports a "warning", which is usually flag-triggered
|
|
|
|
// logging output for the benefit of tests.
|
2017-03-17 07:49:22 -07:00
|
|
|
func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...interface{}) {
|
2016-12-09 17:15:05 -08:00
|
|
|
Warnl(pos, fmt_, args...)
|
2015-10-26 17:34:06 -04:00
|
|
|
}
|
|
|
|
|
2017-03-17 07:49:22 -07:00
|
|
|
func (e *ssafn) Debug_checknil() bool {
|
2015-10-26 17:34:06 -04:00
|
|
|
return Debug_checknil != 0
|
|
|
|
}
|
|
|
|
|
2017-03-17 07:49:22 -07:00
|
|
|
func (e *ssafn) Debug_wb() bool {
|
2016-10-13 06:57:00 -04:00
|
|
|
return Debug_wb != 0
|
|
|
|
}
|
|
|
|
|
2017-03-17 07:49:22 -07:00
|
|
|
func (e *ssafn) UseWriteBarrier() bool {
|
2017-02-05 23:43:31 -05:00
|
|
|
return use_writebarrier
|
|
|
|
}
|
|
|
|
|
2017-03-17 07:49:22 -07:00
|
|
|
func (e *ssafn) Syslook(name string) *obj.LSym {
|
2017-02-06 13:30:40 -08:00
|
|
|
return Linksym(syslook(name).Sym)
|
2016-10-13 06:57:00 -04:00
|
|
|
}
|
|
|
|
|
2015-10-22 14:22:38 -07:00
|
|
|
func (n *Node) Typ() ssa.Type {
|
|
|
|
return n.Type
|
|
|
|
}
|