2015-06-05 13:04:29 -04:00
|
|
|
// Copyright 2015 The Go Authors. All rights reserved.
|
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
2015-08-11 12:51:33 -07:00
|
|
|
// Register allocation.
|
|
|
|
|
//
|
2016-03-01 23:21:55 +00:00
|
|
|
// We use a version of a linear scan register allocator. We treat the
|
2015-08-11 12:51:33 -07:00
|
|
|
// whole function as a single long basic block and run through
|
2016-03-01 23:21:55 +00:00
|
|
|
// it using a greedy register allocator. Then all merge edges
|
2015-08-11 12:51:33 -07:00
|
|
|
// (those targeting a block with len(Preds)>1) are processed to
|
|
|
|
|
// shuffle data into the place that the target of the edge expects.
|
|
|
|
|
//
|
|
|
|
|
// The greedy allocator moves values into registers just before they
|
|
|
|
|
// are used, spills registers only when necessary, and spills the
|
|
|
|
|
// value whose next use is farthest in the future.
|
|
|
|
|
//
|
|
|
|
|
// The register allocator requires that a block is not scheduled until
|
2016-03-01 23:21:55 +00:00
|
|
|
// at least one of its predecessors have been scheduled. The most recent
|
2015-08-11 12:51:33 -07:00
|
|
|
// such predecessor provides the starting register state for a block.
|
|
|
|
|
//
|
|
|
|
|
// It also requires that there are no critical edges (critical =
|
|
|
|
|
// comes from a block with >1 successor and goes to a block with >1
|
|
|
|
|
// predecessor). This makes it easy to add fixup code on merge edges -
|
|
|
|
|
// the source of a merge edge has only one successor, so we can add
|
|
|
|
|
// fixup code to the end of that block.
|
|
|
|
|
|
|
|
|
|
// Spilling
|
|
|
|
|
//
|
2017-03-07 14:45:46 -05:00
|
|
|
// During the normal course of the allocator, we might throw a still-live
|
|
|
|
|
// value out of all registers. When that value is subsequently used, we must
|
|
|
|
|
// load it from a slot on the stack. We must also issue an instruction to
|
|
|
|
|
// initialize that stack location with a copy of v.
|
|
|
|
|
//
|
|
|
|
|
// pre-regalloc:
|
|
|
|
|
// (1) v = Op ...
|
|
|
|
|
// (2) x = Op ...
|
|
|
|
|
// (3) ... = Op v ...
|
|
|
|
|
//
|
|
|
|
|
// post-regalloc:
|
|
|
|
|
// (1) v = Op ... : AX // computes v, store result in AX
|
|
|
|
|
// s = StoreReg v // spill v to a stack slot
|
|
|
|
|
// (2) x = Op ... : AX // some other op uses AX
|
|
|
|
|
// c = LoadReg s : CX // restore v from stack slot
|
|
|
|
|
// (3) ... = Op c ... // use the restored value
|
|
|
|
|
//
|
|
|
|
|
// Allocation occurs normally until we reach (3) and we realize we have
|
|
|
|
|
// a use of v and it isn't in any register. At that point, we allocate
|
|
|
|
|
// a spill (a StoreReg) for v. We can't determine the correct place for
|
|
|
|
|
// the spill at this point, so we allocate the spill as blockless initially.
|
|
|
|
|
// The restore is then generated to load v back into a register so it can
|
|
|
|
|
// be used. Subsequent uses of v will use the restored value c instead.
|
|
|
|
|
//
|
|
|
|
|
// What remains is the question of where to schedule the spill.
|
|
|
|
|
// During allocation, we keep track of the dominator of all restores of v.
|
|
|
|
|
// The spill of v must dominate that block. The spill must also be issued at
|
|
|
|
|
// a point where v is still in a register.
|
|
|
|
|
//
|
|
|
|
|
// To find the right place, start at b, the block which dominates all restores.
|
|
|
|
|
// - If b is v.Block, then issue the spill right after v.
|
|
|
|
|
// It is known to be in a register at that point, and dominates any restores.
|
|
|
|
|
// - Otherwise, if v is in a register at the start of b,
|
|
|
|
|
// put the spill of v at the start of b.
|
|
|
|
|
// - Otherwise, set b = immediate dominator of b, and repeat.
|
2015-08-11 12:51:33 -07:00
|
|
|
//
|
2016-03-01 23:21:55 +00:00
|
|
|
// Phi values are special, as always. We define two kinds of phis, those
|
2015-08-11 12:51:33 -07:00
|
|
|
// where the merge happens in a register (a "register" phi) and those where
|
|
|
|
|
// the merge happens in a stack location (a "stack" phi).
|
|
|
|
|
//
|
|
|
|
|
// A register phi must have the phi and all of its inputs allocated to the
|
2017-03-07 14:45:46 -05:00
|
|
|
// same register. Register phis are spilled similarly to regular ops.
|
2015-08-11 12:51:33 -07:00
|
|
|
//
|
|
|
|
|
// A stack phi must have the phi and all of its inputs allocated to the same
|
2016-03-01 23:21:55 +00:00
|
|
|
// stack location. Stack phis start out life already spilled - each phi
|
2015-08-11 12:51:33 -07:00
|
|
|
// input must be a store (using StoreReg) at the end of the corresponding
|
|
|
|
|
// predecessor block.
|
|
|
|
|
// b1: y = ... : AX b2: z = ... : BX
|
|
|
|
|
// y2 = StoreReg y z2 = StoreReg z
|
|
|
|
|
// goto b3 goto b3
|
|
|
|
|
// b3: x = phi(y2, z2)
|
|
|
|
|
// The stack allocator knows that StoreReg args of stack-allocated phis
|
|
|
|
|
// must be allocated to the same stack slot as the phi that uses them.
|
|
|
|
|
// x is now a spilled value and a restore must appear before its first use.
|
|
|
|
|
|
|
|
|
|
// TODO
|
|
|
|
|
|
|
|
|
|
// Use an affinity graph to mark two values which should use the
|
2016-03-01 23:21:55 +00:00
|
|
|
// same register. This affinity graph will be used to prefer certain
|
|
|
|
|
// registers for allocation. This affinity helps eliminate moves that
|
2015-08-11 12:51:33 -07:00
|
|
|
// are required for phi implementations and helps generate allocations
|
|
|
|
|
// for 2-register architectures.
|
|
|
|
|
|
2016-03-01 23:21:55 +00:00
|
|
|
// Note: regalloc generates a not-quite-SSA output. If we have:
|
2015-08-11 12:51:33 -07:00
|
|
|
//
|
|
|
|
|
// b1: x = ... : AX
|
|
|
|
|
// x2 = StoreReg x
|
|
|
|
|
// ... AX gets reused for something else ...
|
|
|
|
|
// if ... goto b3 else b4
|
|
|
|
|
//
|
|
|
|
|
// b3: x3 = LoadReg x2 : BX b4: x4 = LoadReg x2 : CX
|
|
|
|
|
// ... use x3 ... ... use x4 ...
|
|
|
|
|
//
|
|
|
|
|
// b2: ... use x3 ...
|
|
|
|
|
//
|
|
|
|
|
// If b3 is the primary predecessor of b2, then we use x3 in b2 and
|
|
|
|
|
// add a x4:CX->BX copy at the end of b4.
|
|
|
|
|
// But the definition of x3 doesn't dominate b2. We should really
|
2020-11-24 22:09:57 -05:00
|
|
|
// insert an extra phi at the start of b2 (x5=phi(x3,x4):BX) to keep
|
2016-03-01 23:21:55 +00:00
|
|
|
// SSA form. For now, we ignore this problem as remaining in strict
|
|
|
|
|
// SSA form isn't needed after regalloc. We'll just leave the use
|
2015-08-11 12:51:33 -07:00
|
|
|
// of x3 not dominated by the definition of x3, and the CX->BX copy
|
|
|
|
|
// will have no use (so don't run deadcode after regalloc!).
|
|
|
|
|
// TODO: maybe we should introduce these extra phis?
|
|
|
|
|
|
2015-05-05 16:19:12 -07:00
|
|
|
package ssa
|
|
|
|
|
|
2015-08-11 12:51:33 -07:00
|
|
|
import (
|
2021-03-17 19:15:38 -04:00
|
|
|
"cmd/compile/internal/base"
|
2020-11-17 21:47:56 -05:00
|
|
|
"cmd/compile/internal/ir"
|
cmd/compile: change ssa.Type into *types.Type
When package ssa was created, Type was in package gc.
To avoid circular dependencies, we used an interface (ssa.Type)
to represent type information in SSA.
In the Go 1.9 cycle, gri extricated the Type type from package gc.
As a result, we can now use it in package ssa.
Now, instead of package types depending on package ssa,
it is the other way.
This is a more sensible dependency tree,
and helps compiler performance a bit.
Though this is a big CL, most of the changes are
mechanical and uninteresting.
Interesting bits:
* Add new singleton globals to package types for the special
SSA types Memory, Void, Invalid, Flags, and Int128.
* Add two new Types, TSSA for the special types,
and TTUPLE, for SSA tuple types.
ssa.MakeTuple is now types.NewTuple.
* Move type comparison result constants CMPlt, CMPeq, and CMPgt
to package types.
* We had picked the name "types" in our rules for the handy
list of types provided by ssa.Config. That conflicted with
the types package name, so change it to "typ".
* Update the type comparison routine to handle tuples and special
types inline.
* Teach gc/fmt.go how to print special types.
* We can now eliminate ElemTypes in favor of just Elem,
and probably also some other duplicated Type methods
designed to return ssa.Type instead of *types.Type.
* The ssa tests were using their own dummy types,
and they were not particularly careful about types in general.
Of necessity, this CL switches them to use *types.Type;
it does not make them more type-accurate.
Unfortunately, using types.Type means initializing a bit
of the types universe.
This is prime for refactoring and improvement.
This shrinks ssa.Value; it now fits in a smaller size class
on 64 bit systems. This doesn't have a giant impact,
though, since most Values are preallocated in a chunk.
name old alloc/op new alloc/op delta
Template 37.9MB ± 0% 37.7MB ± 0% -0.57% (p=0.000 n=10+8)
Unicode 28.9MB ± 0% 28.7MB ± 0% -0.52% (p=0.000 n=10+10)
GoTypes 110MB ± 0% 109MB ± 0% -0.88% (p=0.000 n=10+10)
Flate 24.7MB ± 0% 24.6MB ± 0% -0.66% (p=0.000 n=10+10)
GoParser 31.1MB ± 0% 30.9MB ± 0% -0.61% (p=0.000 n=10+9)
Reflect 73.9MB ± 0% 73.4MB ± 0% -0.62% (p=0.000 n=10+8)
Tar 25.8MB ± 0% 25.6MB ± 0% -0.77% (p=0.000 n=9+10)
XML 41.2MB ± 0% 40.9MB ± 0% -0.80% (p=0.000 n=10+10)
[Geo mean] 40.5MB 40.3MB -0.68%
name old allocs/op new allocs/op delta
Template 385k ± 0% 386k ± 0% ~ (p=0.356 n=10+9)
Unicode 343k ± 1% 344k ± 0% ~ (p=0.481 n=10+10)
GoTypes 1.16M ± 0% 1.16M ± 0% -0.16% (p=0.004 n=10+10)
Flate 238k ± 1% 238k ± 1% ~ (p=0.853 n=10+10)
GoParser 320k ± 0% 320k ± 0% ~ (p=0.720 n=10+9)
Reflect 957k ± 0% 957k ± 0% ~ (p=0.460 n=10+8)
Tar 252k ± 0% 252k ± 0% ~ (p=0.133 n=9+10)
XML 400k ± 0% 400k ± 0% ~ (p=0.796 n=10+10)
[Geo mean] 428k 428k -0.01%
Removing all the interface calls helps non-trivially with CPU, though.
name old time/op new time/op delta
Template 178ms ± 4% 173ms ± 3% -2.90% (p=0.000 n=94+96)
Unicode 85.0ms ± 4% 83.9ms ± 4% -1.23% (p=0.000 n=96+96)
GoTypes 543ms ± 3% 528ms ± 3% -2.73% (p=0.000 n=98+96)
Flate 116ms ± 3% 113ms ± 4% -2.34% (p=0.000 n=96+99)
GoParser 144ms ± 3% 140ms ± 4% -2.80% (p=0.000 n=99+97)
Reflect 344ms ± 3% 334ms ± 4% -3.02% (p=0.000 n=100+99)
Tar 106ms ± 5% 103ms ± 4% -3.30% (p=0.000 n=98+94)
XML 198ms ± 5% 192ms ± 4% -2.88% (p=0.000 n=92+95)
[Geo mean] 178ms 173ms -2.65%
name old user-time/op new user-time/op delta
Template 229ms ± 5% 224ms ± 5% -2.36% (p=0.000 n=95+99)
Unicode 107ms ± 6% 106ms ± 5% -1.13% (p=0.001 n=93+95)
GoTypes 696ms ± 4% 679ms ± 4% -2.45% (p=0.000 n=97+99)
Flate 137ms ± 4% 134ms ± 5% -2.66% (p=0.000 n=99+96)
GoParser 176ms ± 5% 172ms ± 8% -2.27% (p=0.000 n=98+100)
Reflect 430ms ± 6% 411ms ± 5% -4.46% (p=0.000 n=100+92)
Tar 128ms ±13% 123ms ±13% -4.21% (p=0.000 n=100+100)
XML 239ms ± 6% 233ms ± 6% -2.50% (p=0.000 n=95+97)
[Geo mean] 220ms 213ms -2.76%
Change-Id: I15c7d6268347f8358e75066dfdbd77db24e8d0c1
Reviewed-on: https://go-review.googlesource.com/42145
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2017-04-28 14:12:28 -07:00
|
|
|
"cmd/compile/internal/types"
|
2016-12-06 17:08:06 -08:00
|
|
|
"cmd/internal/src"
|
cmd/compile: add wasm stack optimization
Go's SSA instructions only operate on registers. For example, an add
instruction would read two registers, do the addition and then write
to a register. WebAssembly's instructions, on the other hand, operate
on the stack. The add instruction first pops two values from the stack,
does the addition, then pushes the result to the stack. To fulfill
Go's semantics, one needs to map Go's single add instruction to
4 WebAssembly instructions:
- Push the value of local variable A to the stack
- Push the value of local variable B to the stack
- Do addition
- Write value from stack to local variable C
Now consider that B was set to the constant 42 before the addition:
- Push constant 42 to the stack
- Write value from stack to local variable B
This works, but is inefficient. Instead, the stack is used directly
by inlining instructions if possible. With inlining it becomes:
- Push the value of local variable A to the stack (add)
- Push constant 42 to the stack (constant)
- Do addition (add)
- Write value from stack to local variable C (add)
Note that the two SSA instructions can not be generated sequentially
anymore, because their WebAssembly instructions are interleaved.
Design doc: https://docs.google.com/document/d/131vjr4DH6JFnb-blm_uRdaC0_Nv3OUwjEY5qVCxCup4
Updates #18892
Change-Id: Ie35e1c0bebf4985fddda0d6330eb2066f9ad6dec
Reviewed-on: https://go-review.googlesource.com/103535
Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Cherry Zhang <cherryyz@google.com>
2018-03-09 00:14:58 +01:00
|
|
|
"cmd/internal/sys"
|
2015-08-11 12:51:33 -07:00
|
|
|
"fmt"
|
2021-04-15 23:05:49 -04:00
|
|
|
"internal/buildcfg"
|
2023-07-12 15:31:25 -07:00
|
|
|
"math"
|
2018-06-21 20:43:10 +03:00
|
|
|
"math/bits"
|
2015-08-11 12:51:33 -07:00
|
|
|
"unsafe"
|
|
|
|
|
)
|
2015-05-05 16:19:12 -07:00
|
|
|
|
2016-03-10 17:52:57 -06:00
|
|
|
const (
|
2016-03-21 11:32:04 -04:00
|
|
|
moveSpills = iota
|
|
|
|
|
logSpills
|
2016-03-10 17:52:57 -06:00
|
|
|
regDebug
|
|
|
|
|
stackDebug
|
|
|
|
|
)
|
2015-08-11 12:51:33 -07:00
|
|
|
|
2016-03-02 15:18:40 -08:00
|
|
|
// distance is a measure of how far into the future values are used.
|
|
|
|
|
// distance is measured in units of instructions.
|
|
|
|
|
const (
|
|
|
|
|
likelyDistance = 1
|
|
|
|
|
normalDistance = 10
|
|
|
|
|
unlikelyDistance = 100
|
|
|
|
|
)
|
|
|
|
|
|
2016-03-01 23:21:55 +00:00
|
|
|
// regalloc performs register allocation on f. It sets f.RegAlloc
|
2015-08-11 12:51:33 -07:00
|
|
|
// to the resulting allocation.
|
|
|
|
|
func regalloc(f *Func) {
|
|
|
|
|
var s regAllocState
|
|
|
|
|
s.init(f)
|
|
|
|
|
s.regalloc(f)
|
2022-10-18 16:07:36 -07:00
|
|
|
s.close()
|
2015-05-05 16:19:12 -07:00
|
|
|
}
|
|
|
|
|
|
2015-08-11 12:51:33 -07:00
|
|
|
type register uint8
|
|
|
|
|
|
|
|
|
|
const noRegister register = 255
|
2015-05-05 16:19:12 -07:00
|
|
|
|
2021-02-18 15:50:37 -05:00
|
|
|
// For bulk initializing
|
|
|
|
|
var noRegisters [32]register = [32]register{
|
|
|
|
|
noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister,
|
|
|
|
|
noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister,
|
|
|
|
|
noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister,
|
|
|
|
|
noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister,
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-24 12:26:58 -07:00
|
|
|
// A regMask encodes a set of machine registers.
|
|
|
|
|
// TODO: regMask -> regSet?
|
2015-06-06 16:03:33 -07:00
|
|
|
type regMask uint64
|
|
|
|
|
|
2015-08-11 12:51:33 -07:00
|
|
|
func (m regMask) String() string {
|
|
|
|
|
s := ""
|
2016-03-21 22:57:26 -07:00
|
|
|
for r := register(0); m != 0; r++ {
|
2015-08-11 12:51:33 -07:00
|
|
|
if m>>r&1 == 0 {
|
|
|
|
|
continue
|
|
|
|
|
}
|
2016-03-21 22:57:26 -07:00
|
|
|
m &^= regMask(1) << r
|
2015-08-11 12:51:33 -07:00
|
|
|
if s != "" {
|
|
|
|
|
s += " "
|
|
|
|
|
}
|
|
|
|
|
s += fmt.Sprintf("r%d", r)
|
|
|
|
|
}
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
2024-11-24 15:29:56 -08:00
|
|
|
func (m regMask) contains(r register) bool {
|
|
|
|
|
return m>>r&1 != 0
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-17 09:09:07 -07:00
|
|
|
func (s *regAllocState) RegMaskString(m regMask) string {
|
|
|
|
|
str := ""
|
|
|
|
|
for r := register(0); m != 0; r++ {
|
|
|
|
|
if m>>r&1 == 0 {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
m &^= regMask(1) << r
|
|
|
|
|
if str != "" {
|
|
|
|
|
str += " "
|
|
|
|
|
}
|
|
|
|
|
str += s.registers[r].String()
|
|
|
|
|
}
|
|
|
|
|
return str
|
|
|
|
|
}
|
|
|
|
|
|
2015-05-05 16:19:12 -07:00
|
|
|
// countRegs returns the number of set bits in the register mask.
|
|
|
|
|
func countRegs(r regMask) int {
|
2018-06-21 20:43:10 +03:00
|
|
|
return bits.OnesCount64(uint64(r))
|
2015-05-05 16:19:12 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// pickReg picks an arbitrary register from the register mask.
|
|
|
|
|
func pickReg(r regMask) register {
|
|
|
|
|
if r == 0 {
|
|
|
|
|
panic("can't pick a register from an empty set")
|
|
|
|
|
}
|
2018-06-21 20:43:10 +03:00
|
|
|
// pick the lowest one
|
|
|
|
|
return register(bits.TrailingZeros64(uint64(r)))
|
2015-05-05 16:19:12 -07:00
|
|
|
}
|
|
|
|
|
|
2015-08-11 12:51:33 -07:00
|
|
|
type use struct {
|
2023-07-12 15:31:25 -07:00
|
|
|
// distance from start of the block to a use of a value
|
|
|
|
|
// dist == 0 used by first instruction in block
|
|
|
|
|
// dist == len(b.Values)-1 used by last instruction in block
|
|
|
|
|
// dist == len(b.Values) used by block's control value
|
|
|
|
|
// dist > len(b.Values) used by a subsequent block
|
|
|
|
|
dist int32
|
2016-12-15 17:17:01 -08:00
|
|
|
pos src.XPos // source position of the use
|
|
|
|
|
next *use // linked list of uses of a value in nondecreasing dist order
|
2015-08-11 12:51:33 -07:00
|
|
|
}
|
2015-05-05 16:19:12 -07:00
|
|
|
|
2017-03-07 14:45:46 -05:00
|
|
|
// A valState records the register allocation state for a (pre-regalloc) value.
|
2015-08-11 12:51:33 -07:00
|
|
|
type valState struct {
|
2015-12-17 10:01:24 -08:00
|
|
|
regs regMask // the set of registers holding a Value (usually just one)
|
|
|
|
|
uses *use // list of uses in this block
|
2017-03-07 14:45:46 -05:00
|
|
|
spill *Value // spilled copy of the Value (if any)
|
|
|
|
|
restoreMin int32 // minimum of all restores' blocks' sdom.entry
|
|
|
|
|
restoreMax int32 // maximum of all restores' blocks' sdom.exit
|
|
|
|
|
needReg bool // cached value of !v.Type.IsMemory() && !v.Type.IsVoid() && !.v.Type.IsFlags()
|
|
|
|
|
rematerializeable bool // cached value of v.rematerializeable()
|
2015-08-11 12:51:33 -07:00
|
|
|
}
|
2015-05-05 16:19:12 -07:00
|
|
|
|
2015-08-11 12:51:33 -07:00
|
|
|
type regState struct {
|
|
|
|
|
v *Value // Original (preregalloc) Value stored in this register.
|
2015-11-05 14:59:47 -08:00
|
|
|
c *Value // A Value equal to v which is currently in a register. Might be v or a copy of it.
|
2015-08-11 12:51:33 -07:00
|
|
|
// If a register is unused, v==c==nil
|
|
|
|
|
}
|
2015-05-05 16:19:12 -07:00
|
|
|
|
2015-08-11 12:51:33 -07:00
|
|
|
type regAllocState struct {
|
|
|
|
|
f *Func
|
|
|
|
|
|
2017-03-07 14:45:46 -05:00
|
|
|
sdom SparseTree
|
2016-04-15 12:49:30 -07:00
|
|
|
registers []Register
|
|
|
|
|
numRegs register
|
|
|
|
|
SPReg register
|
|
|
|
|
SBReg register
|
2016-05-31 14:01:34 -04:00
|
|
|
GReg register
|
2024-11-24 15:29:56 -08:00
|
|
|
ZeroIntReg register
|
2016-04-15 12:49:30 -07:00
|
|
|
allocatable regMask
|
2016-03-21 22:57:26 -07:00
|
|
|
|
2015-11-05 14:59:47 -08:00
|
|
|
// live values at the end of each block. live[b.ID] is a list of value IDs
|
|
|
|
|
// which are live at the end of b, together with a count of how many instructions
|
|
|
|
|
// forward to the next use.
|
|
|
|
|
live [][]liveInfo
|
2016-04-15 12:49:30 -07:00
|
|
|
// desired register assignments at the end of each block.
|
|
|
|
|
// Note that this is a static map computed before allocation occurs. Dynamic
|
|
|
|
|
// register desires (from partially completed allocations) will trump
|
|
|
|
|
// this information.
|
|
|
|
|
desired []desiredState
|
2015-08-11 12:51:33 -07:00
|
|
|
|
|
|
|
|
// current state of each (preregalloc) Value
|
|
|
|
|
values []valState
|
|
|
|
|
|
2017-03-22 21:34:12 -04:00
|
|
|
// ID of SP, SB values
|
|
|
|
|
sp, sb ID
|
|
|
|
|
|
2015-10-29 13:41:02 -07:00
|
|
|
// For each Value, map from its value ID back to the
|
|
|
|
|
// preregalloc Value it was derived from.
|
|
|
|
|
orig []*Value
|
|
|
|
|
|
2024-11-24 15:29:56 -08:00
|
|
|
// current state of each register.
|
|
|
|
|
// Includes only registers in allocatable.
|
2015-08-11 12:51:33 -07:00
|
|
|
regs []regState
|
|
|
|
|
|
|
|
|
|
// registers that contain values which can't be kicked out
|
|
|
|
|
nospill regMask
|
|
|
|
|
|
|
|
|
|
// mask of registers currently in use
|
|
|
|
|
used regMask
|
|
|
|
|
|
cmd/compile/internal/ssa: drop overwritten regalloc basic block input requirements
For the following description, consider the following basic block graph:
b1 ───┐┌──── b2
││
││
▼▼
b3
For register allocator transitions between basic blocks, there are two
key passes (significant paraphrasing):
First, each basic block is visited in some predetermined visit order.
This is the core visitOrder range loop in regAllocState.regalloc. The
specific ordering heuristics aren't important here, except that the
order guarantees that when visiting a basic block at least one of its
predecessors has already been visited.
Upon visiting a basic block, that block sets its expected starting
register state (regAllocState.startRegs) based on the ending register
state (regAlloc.State.endRegs) of one of its predecessors. (How it
chooses which predecessor to use is not important here.)
From that starting state, registers are assigned for all values in the
block, ultimately resulting in some ending register state.
After all blocks have been visited, the shuffle pass
(regAllocState.shuffle) ensures that for each edge, endRegs of the
predecessor == startRegs of the successor. That is, it makes sure that
the startRegs assumptions actually hold true for each edge. It does this
by adding moves to the end of the predecessor block to place values in
the expected register for the successor block. These may be moves from
other registers, or from memory if the value is spilled.
Now on to the actual problem:
Assume that b1 places some value v1 into register R10, and thus ends
with endRegs containing R10 = v1.
When b3 is visited, it selects b1 as its model predecessor and sets
startRegs with R10 = v1.
b2 does not have v1 in R10, so later in the shuffle pass, we will add a
move of v1 into R10 to the end of b2 to ensure it is available for b3.
This is all perfectly fine and exactly how things should work.
Now suppose that b3 does not use v1. It does need to use some other
value v2, which is not currently in a register. When assigning v2 to a
register, it finds all registers are already in use and it needs to dump
a value. Ultimately, it decides to dump v1 from R10 and replace it with
v2.
This is fine, but it has downstream effects on shuffle in b2. b3's
startRegs still state that R10 = v1, so b2 will add a move to R10 even
though b3 will unconditionally overwrite it. i.e., the move at the end
of b2 is completely useless and can result in code like:
// end of b2
MOV n(SP), R10 // R10 = v1 <-- useless
// start of b3
MOV m(SP), R10 // R10 = v2
This is precisely what happened in #58298.
This CL addresses this problem by dropping registers from startRegs if
they are never used in the basic block prior to getting dumped. This
allows the shuffle pass to avoid placing those useless values into the
register.
There is a significant limitation to this CL, which is that it only
impacts the immediate predecessors of an overwriting block. We can
discuss this by zooming out a bit on the previous graph:
b4 ───┐┌──── b5
││
││
▼▼
b1 ───┐┌──── b2
││
││
▼▼
b3
Here we have the same graph, except we can see the two predecessors of
b1.
Now suppose that rather than b1 assigning R10 = v1 as above, the
assignment is done in b4. b1 has startRegs R10 = v1, doesn't use the
value at all, and simply passes it through to endRegs R10 = v1.
Now the shuffle pass will require both b2 and b5 to add a move to
assigned R10 = v1, because that is specified in their successor
startRegs.
With this CL, b3 drops R10 = v1 from startRegs, but there is no
backwards propagation, so b1 still has R10 = v1 in startRegs, and b5
still needs to add a useless move.
Extending this CL with such propagation may significantly increase the
number of useless moves we can remove, though it will add complexity to
maintenance and could potentially impact build performance depending on
how efficiently we could implement the propagation (something I haven't
considered carefully).
As-is, this optimization does not impact much code. In bent .text size
geomean is -0.02%. In the container/heap test binary, 18 of ~2500
functions are impacted by this CL. Bent and sweet do not show a
noticeable performance impact one way or another, however #58298 does
show a case where this can have impact if the useless instructions end
up in the hot path of a tight loop.
For #58298.
Change-Id: I2fcef37c955159d068fa0725f995a1848add8a5f
Reviewed-on: https://go-review.googlesource.com/c/go/+/471158
Run-TryBot: Michael Pratt <mpratt@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
Reviewed-by: David Chase <drchase@google.com>
2023-02-21 13:20:49 -05:00
|
|
|
// mask of registers used since the start of the current block
|
|
|
|
|
usedSinceBlockStart regMask
|
|
|
|
|
|
2016-09-23 09:15:51 -04:00
|
|
|
// mask of registers used in the current instruction
|
|
|
|
|
tmpused regMask
|
|
|
|
|
|
2015-08-11 12:51:33 -07:00
|
|
|
// current block we're working on
|
|
|
|
|
curBlock *Block
|
2015-11-05 14:59:47 -08:00
|
|
|
|
|
|
|
|
// cache of use records
|
|
|
|
|
freeUseRecords *use
|
2015-12-17 10:01:24 -08:00
|
|
|
|
|
|
|
|
// endRegs[blockid] is the register state at the end of each block.
|
|
|
|
|
// encoded as a set of endReg records.
|
|
|
|
|
endRegs [][]endReg
|
|
|
|
|
|
|
|
|
|
// startRegs[blockid] is the register state at the start of merge blocks.
|
|
|
|
|
// saved state does not include the state of phi ops in the block.
|
|
|
|
|
startRegs [][]startReg
|
|
|
|
|
|
cmd/compile/internal/ssa: drop overwritten regalloc basic block input requirements
For the following description, consider the following basic block graph:
b1 ───┐┌──── b2
││
││
▼▼
b3
For register allocator transitions between basic blocks, there are two
key passes (significant paraphrasing):
First, each basic block is visited in some predetermined visit order.
This is the core visitOrder range loop in regAllocState.regalloc. The
specific ordering heuristics aren't important here, except that the
order guarantees that when visiting a basic block at least one of its
predecessors has already been visited.
Upon visiting a basic block, that block sets its expected starting
register state (regAllocState.startRegs) based on the ending register
state (regAlloc.State.endRegs) of one of its predecessors. (How it
chooses which predecessor to use is not important here.)
From that starting state, registers are assigned for all values in the
block, ultimately resulting in some ending register state.
After all blocks have been visited, the shuffle pass
(regAllocState.shuffle) ensures that for each edge, endRegs of the
predecessor == startRegs of the successor. That is, it makes sure that
the startRegs assumptions actually hold true for each edge. It does this
by adding moves to the end of the predecessor block to place values in
the expected register for the successor block. These may be moves from
other registers, or from memory if the value is spilled.
Now on to the actual problem:
Assume that b1 places some value v1 into register R10, and thus ends
with endRegs containing R10 = v1.
When b3 is visited, it selects b1 as its model predecessor and sets
startRegs with R10 = v1.
b2 does not have v1 in R10, so later in the shuffle pass, we will add a
move of v1 into R10 to the end of b2 to ensure it is available for b3.
This is all perfectly fine and exactly how things should work.
Now suppose that b3 does not use v1. It does need to use some other
value v2, which is not currently in a register. When assigning v2 to a
register, it finds all registers are already in use and it needs to dump
a value. Ultimately, it decides to dump v1 from R10 and replace it with
v2.
This is fine, but it has downstream effects on shuffle in b2. b3's
startRegs still state that R10 = v1, so b2 will add a move to R10 even
though b3 will unconditionally overwrite it. i.e., the move at the end
of b2 is completely useless and can result in code like:
// end of b2
MOV n(SP), R10 // R10 = v1 <-- useless
// start of b3
MOV m(SP), R10 // R10 = v2
This is precisely what happened in #58298.
This CL addresses this problem by dropping registers from startRegs if
they are never used in the basic block prior to getting dumped. This
allows the shuffle pass to avoid placing those useless values into the
register.
There is a significant limitation to this CL, which is that it only
impacts the immediate predecessors of an overwriting block. We can
discuss this by zooming out a bit on the previous graph:
b4 ───┐┌──── b5
││
││
▼▼
b1 ───┐┌──── b2
││
││
▼▼
b3
Here we have the same graph, except we can see the two predecessors of
b1.
Now suppose that rather than b1 assigning R10 = v1 as above, the
assignment is done in b4. b1 has startRegs R10 = v1, doesn't use the
value at all, and simply passes it through to endRegs R10 = v1.
Now the shuffle pass will require both b2 and b5 to add a move to
assigned R10 = v1, because that is specified in their successor
startRegs.
With this CL, b3 drops R10 = v1 from startRegs, but there is no
backwards propagation, so b1 still has R10 = v1 in startRegs, and b5
still needs to add a useless move.
Extending this CL with such propagation may significantly increase the
number of useless moves we can remove, though it will add complexity to
maintenance and could potentially impact build performance depending on
how efficiently we could implement the propagation (something I haven't
considered carefully).
As-is, this optimization does not impact much code. In bent .text size
geomean is -0.02%. In the container/heap test binary, 18 of ~2500
functions are impacted by this CL. Bent and sweet do not show a
noticeable performance impact one way or another, however #58298 does
show a case where this can have impact if the useless instructions end
up in the hot path of a tight loop.
For #58298.
Change-Id: I2fcef37c955159d068fa0725f995a1848add8a5f
Reviewed-on: https://go-review.googlesource.com/c/go/+/471158
Run-TryBot: Michael Pratt <mpratt@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
Reviewed-by: David Chase <drchase@google.com>
2023-02-21 13:20:49 -05:00
|
|
|
// startRegsMask is a mask of the registers in startRegs[curBlock.ID].
|
|
|
|
|
// Registers dropped from startRegsMask are later synchronoized back to
|
|
|
|
|
// startRegs by dropping from there as well.
|
|
|
|
|
startRegsMask regMask
|
|
|
|
|
|
2015-12-17 10:01:24 -08:00
|
|
|
// spillLive[blockid] is the set of live spills at the end of each block
|
|
|
|
|
spillLive [][]ID
|
2016-03-10 14:42:52 -05:00
|
|
|
|
2016-09-23 09:15:51 -04:00
|
|
|
// a set of copies we generated to move things around, and
|
|
|
|
|
// whether it is used in shuffle. Unused copies will be deleted.
|
|
|
|
|
copies map[*Value]bool
|
|
|
|
|
|
2016-03-10 14:42:52 -05:00
|
|
|
loopnest *loopnest
|
2017-06-30 16:20:10 -04:00
|
|
|
|
|
|
|
|
// choose a good order in which to visit blocks for allocation purposes.
|
|
|
|
|
visitOrder []*Block
|
cmd/compile: use depth first topological sort algorithm for layout
The current layout algorithm tries to put consecutive blocks together,
so the priority of the successor block is higher than the priority of
the zero indegree block. This algorithm is beneficial for subsequent
register allocation, but will result in more branch instructions.
The depth-first topological sorting algorithm is a well-known layout
algorithm, which has applications in many languages, and it helps to
reduce branch instructions. This CL applies it to the layout pass.
The test results show that it helps to reduce the code size.
This CL also includes the following changes:
1, Removed the primary predecessor mechanism. The new layout algorithm is
not very friendly to register allocator in some cases, in order to adapt
to the new layout algorithm, a new primary predecessor selection strategy
is introduced.
2, Since the new layout implementation may place non-loop blocks between
loop blocks, some adaptive modifications have also been made to looprotate
pass.
3, The layout also affects the results of codegen, so this CL also adjusted
several codegen tests accordingly.
It is inevitable that this CL will cause the code size or performance of a
few functions to decrease, but the number of cases it improves is much larger
than the number of cases it drops.
Statistical data from compilecmp on linux/amd64 is as follow:
name old time/op new time/op delta
Template 382ms ± 4% 382ms ± 4% ~ (p=0.497 n=49+50)
Unicode 170ms ± 9% 169ms ± 8% ~ (p=0.344 n=48+50)
GoTypes 2.01s ± 4% 2.01s ± 4% ~ (p=0.628 n=50+48)
Compiler 190ms ±10% 189ms ± 9% ~ (p=0.734 n=50+50)
SSA 11.8s ± 2% 11.8s ± 3% ~ (p=0.877 n=50+50)
Flate 241ms ± 9% 241ms ± 8% ~ (p=0.897 n=50+49)
GoParser 366ms ± 3% 361ms ± 4% -1.21% (p=0.004 n=47+50)
Reflect 835ms ± 3% 838ms ± 3% ~ (p=0.275 n=50+49)
Tar 336ms ± 4% 335ms ± 3% ~ (p=0.454 n=48+48)
XML 433ms ± 4% 431ms ± 3% ~ (p=0.071 n=49+48)
LinkCompiler 706ms ± 4% 705ms ± 4% ~ (p=0.608 n=50+49)
ExternalLinkCompiler 1.85s ± 3% 1.83s ± 2% -1.47% (p=0.000 n=49+48)
LinkWithoutDebugCompiler 437ms ± 5% 437ms ± 6% ~ (p=0.953 n=49+50)
[Geo mean] 615ms 613ms -0.37%
name old alloc/op new alloc/op delta
Template 38.7MB ± 1% 38.7MB ± 1% ~ (p=0.834 n=50+50)
Unicode 28.1MB ± 0% 28.1MB ± 0% -0.22% (p=0.000 n=49+50)
GoTypes 168MB ± 1% 168MB ± 1% ~ (p=0.054 n=47+47)
Compiler 23.0MB ± 1% 23.0MB ± 1% ~ (p=0.432 n=50+50)
SSA 1.54GB ± 0% 1.54GB ± 0% +0.21% (p=0.000 n=50+50)
Flate 23.6MB ± 1% 23.6MB ± 1% ~ (p=0.153 n=43+46)
GoParser 35.1MB ± 1% 35.1MB ± 2% ~ (p=0.202 n=50+50)
Reflect 84.7MB ± 1% 84.7MB ± 1% ~ (p=0.333 n=48+49)
Tar 34.5MB ± 1% 34.5MB ± 1% ~ (p=0.406 n=46+49)
XML 44.3MB ± 2% 44.2MB ± 3% ~ (p=0.981 n=50+50)
LinkCompiler 131MB ± 0% 128MB ± 0% -2.74% (p=0.000 n=50+50)
ExternalLinkCompiler 120MB ± 0% 120MB ± 0% +0.01% (p=0.007 n=50+50)
LinkWithoutDebugCompiler 77.3MB ± 0% 77.3MB ± 0% -0.02% (p=0.000 n=50+50)
[Geo mean] 69.3MB 69.1MB -0.22%
file before after Δ %
addr2line 4104220 4043684 -60536 -1.475%
api 5342502 5249678 -92824 -1.737%
asm 4973785 4858257 -115528 -2.323%
buildid 2667844 2625660 -42184 -1.581%
cgo 4686849 4616313 -70536 -1.505%
compile 23667431 23268406 -399025 -1.686%
cover 4959676 4874108 -85568 -1.725%
dist 3515934 3450422 -65512 -1.863%
doc 3995581 3925469 -70112 -1.755%
fix 3379202 3318522 -60680 -1.796%
link 6743249 6629913 -113336 -1.681%
nm 4047529 3991777 -55752 -1.377%
objdump 4456151 4388151 -68000 -1.526%
pack 2435040 2398072 -36968 -1.518%
pprof 13804080 13565808 -238272 -1.726%
test2json 2690043 2645987 -44056 -1.638%
trace 10418492 10232716 -185776 -1.783%
vet 7258259 7121259 -137000 -1.888%
total 113145867 111204202 -1941665 -1.716%
The situation on linux/arm64 is as follow:
name old time/op new time/op delta
Template 280ms ± 1% 282ms ± 1% +0.75% (p=0.000 n=46+48)
Unicode 124ms ± 2% 124ms ± 2% +0.37% (p=0.045 n=50+50)
GoTypes 1.69s ± 1% 1.70s ± 1% +0.56% (p=0.000 n=49+50)
Compiler 122ms ± 1% 123ms ± 1% +0.93% (p=0.000 n=50+50)
SSA 12.6s ± 1% 12.7s ± 0% +0.72% (p=0.000 n=50+50)
Flate 170ms ± 1% 172ms ± 1% +0.97% (p=0.000 n=49+49)
GoParser 262ms ± 1% 263ms ± 1% +0.39% (p=0.000 n=49+48)
Reflect 639ms ± 1% 650ms ± 1% +1.63% (p=0.000 n=49+49)
Tar 243ms ± 1% 245ms ± 1% +0.82% (p=0.000 n=50+50)
XML 324ms ± 1% 327ms ± 1% +0.72% (p=0.000 n=50+49)
LinkCompiler 597ms ± 1% 596ms ± 1% -0.27% (p=0.001 n=48+47)
ExternalLinkCompiler 1.90s ± 1% 1.88s ± 1% -1.00% (p=0.000 n=50+50)
LinkWithoutDebugCompiler 364ms ± 1% 363ms ± 1% ~ (p=0.220 n=49+50)
[Geo mean] 485ms 488ms +0.49%
name old alloc/op new alloc/op delta
Template 38.7MB ± 0% 38.8MB ± 1% ~ (p=0.093 n=43+49)
Unicode 28.4MB ± 0% 28.4MB ± 0% +0.03% (p=0.000 n=49+45)
GoTypes 169MB ± 1% 169MB ± 1% +0.23% (p=0.010 n=50+50)
Compiler 23.2MB ± 1% 23.2MB ± 1% +0.11% (p=0.000 n=40+44)
SSA 1.54GB ± 0% 1.55GB ± 0% +0.45% (p=0.000 n=47+49)
Flate 23.8MB ± 2% 23.8MB ± 1% ~ (p=0.543 n=50+50)
GoParser 35.3MB ± 1% 35.4MB ± 1% ~ (p=0.792 n=50+50)
Reflect 85.2MB ± 1% 85.2MB ± 0% ~ (p=0.055 n=50+47)
Tar 34.5MB ± 1% 34.5MB ± 1% +0.06% (p=0.015 n=50+50)
XML 43.8MB ± 2% 43.9MB ± 2% +0.19% (p=0.000 n=48+48)
LinkCompiler 137MB ± 0% 136MB ± 0% -0.92% (p=0.000 n=50+50)
ExternalLinkCompiler 127MB ± 0% 127MB ± 0% ~ (p=0.516 n=50+50)
LinkWithoutDebugCompiler 84.0MB ± 0% 84.0MB ± 0% ~ (p=0.057 n=50+50)
[Geo mean] 70.4MB 70.4MB +0.01%
file before after Δ %
addr2line 4021557 4002933 -18624 -0.463%
api 5127847 5028503 -99344 -1.937%
asm 5034716 4936836 -97880 -1.944%
buildid 2608118 2594094 -14024 -0.538%
cgo 4488592 4398320 -90272 -2.011%
compile 22501129 22213592 -287537 -1.278%
cover 4742301 4713573 -28728 -0.606%
dist 3388071 3365311 -22760 -0.672%
doc 3802250 3776082 -26168 -0.688%
fix 3306147 3216939 -89208 -2.698%
link 6404483 6363699 -40784 -0.637%
nm 3941026 3921930 -19096 -0.485%
objdump 4383330 4295122 -88208 -2.012%
pack 2404547 2389515 -15032 -0.625%
pprof 12996234 12856818 -139416 -1.073%
test2json 2668500 2586788 -81712 -3.062%
trace 9816276 9609580 -206696 -2.106%
vet 6900682 6787338 -113344 -1.643%
total 108535806 107056973 -1478833 -1.363%
Change-Id: Iaec1cdcaacca8025e9babb0fb8a532fddb70c87d
Reviewed-on: https://go-review.googlesource.com/c/go/+/255239
Reviewed-by: eric fang <eric.fang@arm.com>
Reviewed-by: Keith Randall <khr@golang.org>
Trust: eric fang <eric.fang@arm.com>
2020-07-23 10:24:56 +08:00
|
|
|
|
|
|
|
|
// blockOrder[b.ID] corresponds to the index of block b in visitOrder.
|
|
|
|
|
blockOrder []int32
|
2021-03-17 19:15:38 -04:00
|
|
|
|
|
|
|
|
// whether to insert instructions that clobber dead registers at call sites
|
|
|
|
|
doClobber bool
|
2023-07-12 15:31:25 -07:00
|
|
|
|
|
|
|
|
// For each instruction index in a basic block, the index of the next call
|
|
|
|
|
// at or after that instruction index.
|
|
|
|
|
// If there is no next call, returns maxInt32.
|
|
|
|
|
// nextCall for a call instruction points to itself.
|
|
|
|
|
// (Indexes and results are pre-regalloc.)
|
|
|
|
|
nextCall []int32
|
|
|
|
|
|
|
|
|
|
// Index of the instruction we're currently working on.
|
|
|
|
|
// Index is expressed in terms of the pre-regalloc b.Values list.
|
|
|
|
|
curIdx int
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type endReg struct {
|
|
|
|
|
r register
|
|
|
|
|
v *Value // pre-regalloc value held in this register (TODO: can we use ID here?)
|
|
|
|
|
c *Value // cached version of the value
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type startReg struct {
|
2016-12-08 13:49:51 -08:00
|
|
|
r register
|
2017-03-07 14:45:46 -05:00
|
|
|
v *Value // pre-regalloc value needed in this register
|
|
|
|
|
c *Value // cached version of the value
|
2016-12-15 17:17:01 -08:00
|
|
|
pos src.XPos // source position of use of this register
|
2015-08-11 12:51:33 -07:00
|
|
|
}
|
|
|
|
|
|
2016-03-01 23:21:55 +00:00
|
|
|
// freeReg frees up register r. Any current user of r is kicked out.
|
2015-08-11 12:51:33 -07:00
|
|
|
func (s *regAllocState) freeReg(r register) {
|
2024-11-24 15:29:56 -08:00
|
|
|
if !s.allocatable.contains(r) && !s.isGReg(r) {
|
|
|
|
|
return
|
|
|
|
|
}
|
2015-08-11 12:51:33 -07:00
|
|
|
v := s.regs[r].v
|
|
|
|
|
if v == nil {
|
|
|
|
|
s.f.Fatalf("tried to free an already free register %d\n", r)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Mark r as unused.
|
2016-03-10 17:52:57 -06:00
|
|
|
if s.f.pass.debug > regDebug {
|
2017-08-17 12:23:34 -07:00
|
|
|
fmt.Printf("freeReg %s (dump %s/%s)\n", &s.registers[r], v, s.regs[r].c)
|
2015-08-11 12:51:33 -07:00
|
|
|
}
|
|
|
|
|
s.regs[r] = regState{}
|
|
|
|
|
s.values[v.ID].regs &^= regMask(1) << r
|
|
|
|
|
s.used &^= regMask(1) << r
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// freeRegs frees up all registers listed in m.
|
|
|
|
|
func (s *regAllocState) freeRegs(m regMask) {
|
|
|
|
|
for m&s.used != 0 {
|
|
|
|
|
s.freeReg(pickReg(m & s.used))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-17 19:15:38 -04:00
|
|
|
// clobberRegs inserts instructions that clobber registers listed in m.
|
|
|
|
|
func (s *regAllocState) clobberRegs(m regMask) {
|
|
|
|
|
m &= s.allocatable & s.f.Config.gpRegMask // only integer register can contain pointers, only clobber them
|
|
|
|
|
for m != 0 {
|
|
|
|
|
r := pickReg(m)
|
|
|
|
|
m &^= 1 << r
|
|
|
|
|
x := s.curBlock.NewValue0(src.NoXPos, OpClobberReg, types.TypeVoid)
|
|
|
|
|
s.f.setHome(x, &s.registers[r])
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-10-29 13:41:02 -07:00
|
|
|
// setOrig records that c's original value is the same as
|
|
|
|
|
// v's original value.
|
|
|
|
|
func (s *regAllocState) setOrig(c *Value, v *Value) {
|
2022-10-18 16:07:36 -07:00
|
|
|
if int(c.ID) >= cap(s.orig) {
|
|
|
|
|
x := s.f.Cache.allocValueSlice(int(c.ID) + 1)
|
|
|
|
|
copy(x, s.orig)
|
|
|
|
|
s.f.Cache.freeValueSlice(s.orig)
|
|
|
|
|
s.orig = x
|
|
|
|
|
}
|
2015-10-29 13:41:02 -07:00
|
|
|
for int(c.ID) >= len(s.orig) {
|
|
|
|
|
s.orig = append(s.orig, nil)
|
|
|
|
|
}
|
|
|
|
|
if s.orig[c.ID] != nil {
|
|
|
|
|
s.f.Fatalf("orig value set twice %s %s", c, v)
|
|
|
|
|
}
|
|
|
|
|
s.orig[c.ID] = s.orig[v.ID]
|
|
|
|
|
}
|
|
|
|
|
|
2015-08-11 12:51:33 -07:00
|
|
|
// assignReg assigns register r to hold c, a copy of v.
|
|
|
|
|
// r must be unused.
|
|
|
|
|
func (s *regAllocState) assignReg(r register, v *Value, c *Value) {
|
2016-03-10 17:52:57 -06:00
|
|
|
if s.f.pass.debug > regDebug {
|
2017-08-17 12:23:34 -07:00
|
|
|
fmt.Printf("assignReg %s %s/%s\n", &s.registers[r], v, c)
|
2015-08-11 12:51:33 -07:00
|
|
|
}
|
2024-11-24 15:29:56 -08:00
|
|
|
// Allocate v to r.
|
|
|
|
|
s.values[v.ID].regs |= regMask(1) << r
|
|
|
|
|
s.f.setHome(c, &s.registers[r])
|
|
|
|
|
|
|
|
|
|
// Allocate r to v.
|
|
|
|
|
if !s.allocatable.contains(r) && !s.isGReg(r) {
|
|
|
|
|
return
|
|
|
|
|
}
|
2015-08-11 12:51:33 -07:00
|
|
|
if s.regs[r].v != nil {
|
|
|
|
|
s.f.Fatalf("tried to assign register %d to %s/%s but it is already used by %s", r, v, c, s.regs[r].v)
|
|
|
|
|
}
|
|
|
|
|
s.regs[r] = regState{v, c}
|
|
|
|
|
s.used |= regMask(1) << r
|
|
|
|
|
}
|
|
|
|
|
|
2016-07-13 16:15:54 -07:00
|
|
|
// allocReg chooses a register from the set of registers in mask.
|
2016-01-18 20:00:15 -08:00
|
|
|
// If there is no unused register, a Value will be kicked out of
|
|
|
|
|
// a register to make room.
|
2016-08-02 13:17:09 -07:00
|
|
|
func (s *regAllocState) allocReg(mask regMask, v *Value) register {
|
cmd/compile: add wasm stack optimization
Go's SSA instructions only operate on registers. For example, an add
instruction would read two registers, do the addition and then write
to a register. WebAssembly's instructions, on the other hand, operate
on the stack. The add instruction first pops two values from the stack,
does the addition, then pushes the result to the stack. To fulfill
Go's semantics, one needs to map Go's single add instruction to
4 WebAssembly instructions:
- Push the value of local variable A to the stack
- Push the value of local variable B to the stack
- Do addition
- Write value from stack to local variable C
Now consider that B was set to the constant 42 before the addition:
- Push constant 42 to the stack
- Write value from stack to local variable B
This works, but is inefficient. Instead, the stack is used directly
by inlining instructions if possible. With inlining it becomes:
- Push the value of local variable A to the stack (add)
- Push constant 42 to the stack (constant)
- Do addition (add)
- Write value from stack to local variable C (add)
Note that the two SSA instructions can not be generated sequentially
anymore, because their WebAssembly instructions are interleaved.
Design doc: https://docs.google.com/document/d/131vjr4DH6JFnb-blm_uRdaC0_Nv3OUwjEY5qVCxCup4
Updates #18892
Change-Id: Ie35e1c0bebf4985fddda0d6330eb2066f9ad6dec
Reviewed-on: https://go-review.googlesource.com/103535
Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Cherry Zhang <cherryyz@google.com>
2018-03-09 00:14:58 +01:00
|
|
|
if v.OnWasmStack {
|
|
|
|
|
return noRegister
|
|
|
|
|
}
|
|
|
|
|
|
2016-04-15 12:49:30 -07:00
|
|
|
mask &= s.allocatable
|
2015-08-11 12:51:33 -07:00
|
|
|
mask &^= s.nospill
|
|
|
|
|
if mask == 0 {
|
2018-03-01 11:40:36 -05:00
|
|
|
s.f.Fatalf("no register available for %s", v.LongString())
|
2015-08-11 12:51:33 -07:00
|
|
|
}
|
|
|
|
|
|
2016-01-18 20:00:15 -08:00
|
|
|
// Pick an unused register if one is available.
|
|
|
|
|
if mask&^s.used != 0 {
|
cmd/compile/internal/ssa: drop overwritten regalloc basic block input requirements
For the following description, consider the following basic block graph:
b1 ───┐┌──── b2
││
││
▼▼
b3
For register allocator transitions between basic blocks, there are two
key passes (significant paraphrasing):
First, each basic block is visited in some predetermined visit order.
This is the core visitOrder range loop in regAllocState.regalloc. The
specific ordering heuristics aren't important here, except that the
order guarantees that when visiting a basic block at least one of its
predecessors has already been visited.
Upon visiting a basic block, that block sets its expected starting
register state (regAllocState.startRegs) based on the ending register
state (regAlloc.State.endRegs) of one of its predecessors. (How it
chooses which predecessor to use is not important here.)
From that starting state, registers are assigned for all values in the
block, ultimately resulting in some ending register state.
After all blocks have been visited, the shuffle pass
(regAllocState.shuffle) ensures that for each edge, endRegs of the
predecessor == startRegs of the successor. That is, it makes sure that
the startRegs assumptions actually hold true for each edge. It does this
by adding moves to the end of the predecessor block to place values in
the expected register for the successor block. These may be moves from
other registers, or from memory if the value is spilled.
Now on to the actual problem:
Assume that b1 places some value v1 into register R10, and thus ends
with endRegs containing R10 = v1.
When b3 is visited, it selects b1 as its model predecessor and sets
startRegs with R10 = v1.
b2 does not have v1 in R10, so later in the shuffle pass, we will add a
move of v1 into R10 to the end of b2 to ensure it is available for b3.
This is all perfectly fine and exactly how things should work.
Now suppose that b3 does not use v1. It does need to use some other
value v2, which is not currently in a register. When assigning v2 to a
register, it finds all registers are already in use and it needs to dump
a value. Ultimately, it decides to dump v1 from R10 and replace it with
v2.
This is fine, but it has downstream effects on shuffle in b2. b3's
startRegs still state that R10 = v1, so b2 will add a move to R10 even
though b3 will unconditionally overwrite it. i.e., the move at the end
of b2 is completely useless and can result in code like:
// end of b2
MOV n(SP), R10 // R10 = v1 <-- useless
// start of b3
MOV m(SP), R10 // R10 = v2
This is precisely what happened in #58298.
This CL addresses this problem by dropping registers from startRegs if
they are never used in the basic block prior to getting dumped. This
allows the shuffle pass to avoid placing those useless values into the
register.
There is a significant limitation to this CL, which is that it only
impacts the immediate predecessors of an overwriting block. We can
discuss this by zooming out a bit on the previous graph:
b4 ───┐┌──── b5
││
││
▼▼
b1 ───┐┌──── b2
││
││
▼▼
b3
Here we have the same graph, except we can see the two predecessors of
b1.
Now suppose that rather than b1 assigning R10 = v1 as above, the
assignment is done in b4. b1 has startRegs R10 = v1, doesn't use the
value at all, and simply passes it through to endRegs R10 = v1.
Now the shuffle pass will require both b2 and b5 to add a move to
assigned R10 = v1, because that is specified in their successor
startRegs.
With this CL, b3 drops R10 = v1 from startRegs, but there is no
backwards propagation, so b1 still has R10 = v1 in startRegs, and b5
still needs to add a useless move.
Extending this CL with such propagation may significantly increase the
number of useless moves we can remove, though it will add complexity to
maintenance and could potentially impact build performance depending on
how efficiently we could implement the propagation (something I haven't
considered carefully).
As-is, this optimization does not impact much code. In bent .text size
geomean is -0.02%. In the container/heap test binary, 18 of ~2500
functions are impacted by this CL. Bent and sweet do not show a
noticeable performance impact one way or another, however #58298 does
show a case where this can have impact if the useless instructions end
up in the hot path of a tight loop.
For #58298.
Change-Id: I2fcef37c955159d068fa0725f995a1848add8a5f
Reviewed-on: https://go-review.googlesource.com/c/go/+/471158
Run-TryBot: Michael Pratt <mpratt@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
Reviewed-by: David Chase <drchase@google.com>
2023-02-21 13:20:49 -05:00
|
|
|
r := pickReg(mask &^ s.used)
|
|
|
|
|
s.usedSinceBlockStart |= regMask(1) << r
|
|
|
|
|
return r
|
2015-08-11 12:51:33 -07:00
|
|
|
}
|
2016-01-18 20:00:15 -08:00
|
|
|
|
2016-03-01 23:21:55 +00:00
|
|
|
// Pick a value to spill. Spill the value with the
|
2015-08-11 12:51:33 -07:00
|
|
|
// farthest-in-the-future use.
|
|
|
|
|
// TODO: Prefer registers with already spilled Values?
|
|
|
|
|
// TODO: Modify preference using affinity graph.
|
2015-11-05 14:59:47 -08:00
|
|
|
// TODO: if a single value is in multiple registers, spill one of them
|
|
|
|
|
// before spilling a value in just a single register.
|
2015-09-11 16:40:05 -04:00
|
|
|
|
2016-03-01 23:21:55 +00:00
|
|
|
// Find a register to spill. We spill the register containing the value
|
2015-11-05 14:59:47 -08:00
|
|
|
// whose next use is as far in the future as possible.
|
|
|
|
|
// https://en.wikipedia.org/wiki/Page_replacement_algorithm#The_theoretically_optimal_page_replacement_algorithm
|
2016-01-18 20:00:15 -08:00
|
|
|
var r register
|
2015-08-11 12:51:33 -07:00
|
|
|
maxuse := int32(-1)
|
2016-03-21 22:57:26 -07:00
|
|
|
for t := register(0); t < s.numRegs; t++ {
|
2015-08-11 12:51:33 -07:00
|
|
|
if mask>>t&1 == 0 {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
v := s.regs[t].v
|
2015-11-05 14:59:47 -08:00
|
|
|
if n := s.values[v.ID].uses.dist; n > maxuse {
|
|
|
|
|
// v's next use is farther in the future than any value
|
2016-03-01 23:21:55 +00:00
|
|
|
// we've seen so far. A new best spill candidate.
|
2015-08-11 12:51:33 -07:00
|
|
|
r = t
|
|
|
|
|
maxuse = n
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if maxuse == -1 {
|
2016-09-14 10:01:05 -07:00
|
|
|
s.f.Fatalf("couldn't find register to spill")
|
2015-08-11 12:51:33 -07:00
|
|
|
}
|
2016-09-23 09:15:51 -04:00
|
|
|
|
cmd/compile: add wasm stack optimization
Go's SSA instructions only operate on registers. For example, an add
instruction would read two registers, do the addition and then write
to a register. WebAssembly's instructions, on the other hand, operate
on the stack. The add instruction first pops two values from the stack,
does the addition, then pushes the result to the stack. To fulfill
Go's semantics, one needs to map Go's single add instruction to
4 WebAssembly instructions:
- Push the value of local variable A to the stack
- Push the value of local variable B to the stack
- Do addition
- Write value from stack to local variable C
Now consider that B was set to the constant 42 before the addition:
- Push constant 42 to the stack
- Write value from stack to local variable B
This works, but is inefficient. Instead, the stack is used directly
by inlining instructions if possible. With inlining it becomes:
- Push the value of local variable A to the stack (add)
- Push constant 42 to the stack (constant)
- Do addition (add)
- Write value from stack to local variable C (add)
Note that the two SSA instructions can not be generated sequentially
anymore, because their WebAssembly instructions are interleaved.
Design doc: https://docs.google.com/document/d/131vjr4DH6JFnb-blm_uRdaC0_Nv3OUwjEY5qVCxCup4
Updates #18892
Change-Id: Ie35e1c0bebf4985fddda0d6330eb2066f9ad6dec
Reviewed-on: https://go-review.googlesource.com/103535
Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Cherry Zhang <cherryyz@google.com>
2018-03-09 00:14:58 +01:00
|
|
|
if s.f.Config.ctxt.Arch.Arch == sys.ArchWasm {
|
|
|
|
|
// TODO(neelance): In theory this should never happen, because all wasm registers are equal.
|
2019-09-08 19:36:13 +03:00
|
|
|
// So if there is still a free register, the allocation should have picked that one in the first place instead of
|
cmd/compile: add wasm stack optimization
Go's SSA instructions only operate on registers. For example, an add
instruction would read two registers, do the addition and then write
to a register. WebAssembly's instructions, on the other hand, operate
on the stack. The add instruction first pops two values from the stack,
does the addition, then pushes the result to the stack. To fulfill
Go's semantics, one needs to map Go's single add instruction to
4 WebAssembly instructions:
- Push the value of local variable A to the stack
- Push the value of local variable B to the stack
- Do addition
- Write value from stack to local variable C
Now consider that B was set to the constant 42 before the addition:
- Push constant 42 to the stack
- Write value from stack to local variable B
This works, but is inefficient. Instead, the stack is used directly
by inlining instructions if possible. With inlining it becomes:
- Push the value of local variable A to the stack (add)
- Push constant 42 to the stack (constant)
- Do addition (add)
- Write value from stack to local variable C (add)
Note that the two SSA instructions can not be generated sequentially
anymore, because their WebAssembly instructions are interleaved.
Design doc: https://docs.google.com/document/d/131vjr4DH6JFnb-blm_uRdaC0_Nv3OUwjEY5qVCxCup4
Updates #18892
Change-Id: Ie35e1c0bebf4985fddda0d6330eb2066f9ad6dec
Reviewed-on: https://go-review.googlesource.com/103535
Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Cherry Zhang <cherryyz@google.com>
2018-03-09 00:14:58 +01:00
|
|
|
// trying to kick some other value out. In practice, this case does happen and it breaks the stack optimization.
|
|
|
|
|
s.freeReg(r)
|
|
|
|
|
return r
|
|
|
|
|
}
|
|
|
|
|
|
2016-09-23 09:15:51 -04:00
|
|
|
// Try to move it around before kicking out, if there is a free register.
|
|
|
|
|
// We generate a Copy and record it. It will be deleted if never used.
|
|
|
|
|
v2 := s.regs[r].v
|
|
|
|
|
m := s.compatRegs(v2.Type) &^ s.used &^ s.tmpused &^ (regMask(1) << r)
|
2016-10-05 14:35:47 -07:00
|
|
|
if m != 0 && !s.values[v2.ID].rematerializeable && countRegs(s.values[v2.ID].regs) == 1 {
|
cmd/compile/internal/ssa: drop overwritten regalloc basic block input requirements
For the following description, consider the following basic block graph:
b1 ───┐┌──── b2
││
││
▼▼
b3
For register allocator transitions between basic blocks, there are two
key passes (significant paraphrasing):
First, each basic block is visited in some predetermined visit order.
This is the core visitOrder range loop in regAllocState.regalloc. The
specific ordering heuristics aren't important here, except that the
order guarantees that when visiting a basic block at least one of its
predecessors has already been visited.
Upon visiting a basic block, that block sets its expected starting
register state (regAllocState.startRegs) based on the ending register
state (regAlloc.State.endRegs) of one of its predecessors. (How it
chooses which predecessor to use is not important here.)
From that starting state, registers are assigned for all values in the
block, ultimately resulting in some ending register state.
After all blocks have been visited, the shuffle pass
(regAllocState.shuffle) ensures that for each edge, endRegs of the
predecessor == startRegs of the successor. That is, it makes sure that
the startRegs assumptions actually hold true for each edge. It does this
by adding moves to the end of the predecessor block to place values in
the expected register for the successor block. These may be moves from
other registers, or from memory if the value is spilled.
Now on to the actual problem:
Assume that b1 places some value v1 into register R10, and thus ends
with endRegs containing R10 = v1.
When b3 is visited, it selects b1 as its model predecessor and sets
startRegs with R10 = v1.
b2 does not have v1 in R10, so later in the shuffle pass, we will add a
move of v1 into R10 to the end of b2 to ensure it is available for b3.
This is all perfectly fine and exactly how things should work.
Now suppose that b3 does not use v1. It does need to use some other
value v2, which is not currently in a register. When assigning v2 to a
register, it finds all registers are already in use and it needs to dump
a value. Ultimately, it decides to dump v1 from R10 and replace it with
v2.
This is fine, but it has downstream effects on shuffle in b2. b3's
startRegs still state that R10 = v1, so b2 will add a move to R10 even
though b3 will unconditionally overwrite it. i.e., the move at the end
of b2 is completely useless and can result in code like:
// end of b2
MOV n(SP), R10 // R10 = v1 <-- useless
// start of b3
MOV m(SP), R10 // R10 = v2
This is precisely what happened in #58298.
This CL addresses this problem by dropping registers from startRegs if
they are never used in the basic block prior to getting dumped. This
allows the shuffle pass to avoid placing those useless values into the
register.
There is a significant limitation to this CL, which is that it only
impacts the immediate predecessors of an overwriting block. We can
discuss this by zooming out a bit on the previous graph:
b4 ───┐┌──── b5
││
││
▼▼
b1 ───┐┌──── b2
││
││
▼▼
b3
Here we have the same graph, except we can see the two predecessors of
b1.
Now suppose that rather than b1 assigning R10 = v1 as above, the
assignment is done in b4. b1 has startRegs R10 = v1, doesn't use the
value at all, and simply passes it through to endRegs R10 = v1.
Now the shuffle pass will require both b2 and b5 to add a move to
assigned R10 = v1, because that is specified in their successor
startRegs.
With this CL, b3 drops R10 = v1 from startRegs, but there is no
backwards propagation, so b1 still has R10 = v1 in startRegs, and b5
still needs to add a useless move.
Extending this CL with such propagation may significantly increase the
number of useless moves we can remove, though it will add complexity to
maintenance and could potentially impact build performance depending on
how efficiently we could implement the propagation (something I haven't
considered carefully).
As-is, this optimization does not impact much code. In bent .text size
geomean is -0.02%. In the container/heap test binary, 18 of ~2500
functions are impacted by this CL. Bent and sweet do not show a
noticeable performance impact one way or another, however #58298 does
show a case where this can have impact if the useless instructions end
up in the hot path of a tight loop.
For #58298.
Change-Id: I2fcef37c955159d068fa0725f995a1848add8a5f
Reviewed-on: https://go-review.googlesource.com/c/go/+/471158
Run-TryBot: Michael Pratt <mpratt@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
Reviewed-by: David Chase <drchase@google.com>
2023-02-21 13:20:49 -05:00
|
|
|
s.usedSinceBlockStart |= regMask(1) << r
|
2016-09-23 09:15:51 -04:00
|
|
|
r2 := pickReg(m)
|
2016-12-07 18:14:35 -08:00
|
|
|
c := s.curBlock.NewValue1(v2.Pos, OpCopy, v2.Type, s.regs[r].c)
|
2016-09-23 09:15:51 -04:00
|
|
|
s.copies[c] = false
|
|
|
|
|
if s.f.pass.debug > regDebug {
|
2017-08-17 12:23:34 -07:00
|
|
|
fmt.Printf("copy %s to %s : %s\n", v2, c, &s.registers[r2])
|
2016-09-23 09:15:51 -04:00
|
|
|
}
|
|
|
|
|
s.setOrig(c, v2)
|
|
|
|
|
s.assignReg(r2, v2, c)
|
|
|
|
|
}
|
cmd/compile/internal/ssa: drop overwritten regalloc basic block input requirements
For the following description, consider the following basic block graph:
b1 ───┐┌──── b2
││
││
▼▼
b3
For register allocator transitions between basic blocks, there are two
key passes (significant paraphrasing):
First, each basic block is visited in some predetermined visit order.
This is the core visitOrder range loop in regAllocState.regalloc. The
specific ordering heuristics aren't important here, except that the
order guarantees that when visiting a basic block at least one of its
predecessors has already been visited.
Upon visiting a basic block, that block sets its expected starting
register state (regAllocState.startRegs) based on the ending register
state (regAlloc.State.endRegs) of one of its predecessors. (How it
chooses which predecessor to use is not important here.)
From that starting state, registers are assigned for all values in the
block, ultimately resulting in some ending register state.
After all blocks have been visited, the shuffle pass
(regAllocState.shuffle) ensures that for each edge, endRegs of the
predecessor == startRegs of the successor. That is, it makes sure that
the startRegs assumptions actually hold true for each edge. It does this
by adding moves to the end of the predecessor block to place values in
the expected register for the successor block. These may be moves from
other registers, or from memory if the value is spilled.
Now on to the actual problem:
Assume that b1 places some value v1 into register R10, and thus ends
with endRegs containing R10 = v1.
When b3 is visited, it selects b1 as its model predecessor and sets
startRegs with R10 = v1.
b2 does not have v1 in R10, so later in the shuffle pass, we will add a
move of v1 into R10 to the end of b2 to ensure it is available for b3.
This is all perfectly fine and exactly how things should work.
Now suppose that b3 does not use v1. It does need to use some other
value v2, which is not currently in a register. When assigning v2 to a
register, it finds all registers are already in use and it needs to dump
a value. Ultimately, it decides to dump v1 from R10 and replace it with
v2.
This is fine, but it has downstream effects on shuffle in b2. b3's
startRegs still state that R10 = v1, so b2 will add a move to R10 even
though b3 will unconditionally overwrite it. i.e., the move at the end
of b2 is completely useless and can result in code like:
// end of b2
MOV n(SP), R10 // R10 = v1 <-- useless
// start of b3
MOV m(SP), R10 // R10 = v2
This is precisely what happened in #58298.
This CL addresses this problem by dropping registers from startRegs if
they are never used in the basic block prior to getting dumped. This
allows the shuffle pass to avoid placing those useless values into the
register.
There is a significant limitation to this CL, which is that it only
impacts the immediate predecessors of an overwriting block. We can
discuss this by zooming out a bit on the previous graph:
b4 ───┐┌──── b5
││
││
▼▼
b1 ───┐┌──── b2
││
││
▼▼
b3
Here we have the same graph, except we can see the two predecessors of
b1.
Now suppose that rather than b1 assigning R10 = v1 as above, the
assignment is done in b4. b1 has startRegs R10 = v1, doesn't use the
value at all, and simply passes it through to endRegs R10 = v1.
Now the shuffle pass will require both b2 and b5 to add a move to
assigned R10 = v1, because that is specified in their successor
startRegs.
With this CL, b3 drops R10 = v1 from startRegs, but there is no
backwards propagation, so b1 still has R10 = v1 in startRegs, and b5
still needs to add a useless move.
Extending this CL with such propagation may significantly increase the
number of useless moves we can remove, though it will add complexity to
maintenance and could potentially impact build performance depending on
how efficiently we could implement the propagation (something I haven't
considered carefully).
As-is, this optimization does not impact much code. In bent .text size
geomean is -0.02%. In the container/heap test binary, 18 of ~2500
functions are impacted by this CL. Bent and sweet do not show a
noticeable performance impact one way or another, however #58298 does
show a case where this can have impact if the useless instructions end
up in the hot path of a tight loop.
For #58298.
Change-Id: I2fcef37c955159d068fa0725f995a1848add8a5f
Reviewed-on: https://go-review.googlesource.com/c/go/+/471158
Run-TryBot: Michael Pratt <mpratt@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
Reviewed-by: David Chase <drchase@google.com>
2023-02-21 13:20:49 -05:00
|
|
|
|
|
|
|
|
// If the evicted register isn't used between the start of the block
|
|
|
|
|
// and now then there is no reason to even request it on entry. We can
|
|
|
|
|
// drop from startRegs in that case.
|
2023-03-20 13:01:14 -07:00
|
|
|
if s.usedSinceBlockStart&(regMask(1)<<r) == 0 {
|
|
|
|
|
if s.startRegsMask&(regMask(1)<<r) == 1 {
|
cmd/compile/internal/ssa: drop overwritten regalloc basic block input requirements
For the following description, consider the following basic block graph:
b1 ───┐┌──── b2
││
││
▼▼
b3
For register allocator transitions between basic blocks, there are two
key passes (significant paraphrasing):
First, each basic block is visited in some predetermined visit order.
This is the core visitOrder range loop in regAllocState.regalloc. The
specific ordering heuristics aren't important here, except that the
order guarantees that when visiting a basic block at least one of its
predecessors has already been visited.
Upon visiting a basic block, that block sets its expected starting
register state (regAllocState.startRegs) based on the ending register
state (regAlloc.State.endRegs) of one of its predecessors. (How it
chooses which predecessor to use is not important here.)
From that starting state, registers are assigned for all values in the
block, ultimately resulting in some ending register state.
After all blocks have been visited, the shuffle pass
(regAllocState.shuffle) ensures that for each edge, endRegs of the
predecessor == startRegs of the successor. That is, it makes sure that
the startRegs assumptions actually hold true for each edge. It does this
by adding moves to the end of the predecessor block to place values in
the expected register for the successor block. These may be moves from
other registers, or from memory if the value is spilled.
Now on to the actual problem:
Assume that b1 places some value v1 into register R10, and thus ends
with endRegs containing R10 = v1.
When b3 is visited, it selects b1 as its model predecessor and sets
startRegs with R10 = v1.
b2 does not have v1 in R10, so later in the shuffle pass, we will add a
move of v1 into R10 to the end of b2 to ensure it is available for b3.
This is all perfectly fine and exactly how things should work.
Now suppose that b3 does not use v1. It does need to use some other
value v2, which is not currently in a register. When assigning v2 to a
register, it finds all registers are already in use and it needs to dump
a value. Ultimately, it decides to dump v1 from R10 and replace it with
v2.
This is fine, but it has downstream effects on shuffle in b2. b3's
startRegs still state that R10 = v1, so b2 will add a move to R10 even
though b3 will unconditionally overwrite it. i.e., the move at the end
of b2 is completely useless and can result in code like:
// end of b2
MOV n(SP), R10 // R10 = v1 <-- useless
// start of b3
MOV m(SP), R10 // R10 = v2
This is precisely what happened in #58298.
This CL addresses this problem by dropping registers from startRegs if
they are never used in the basic block prior to getting dumped. This
allows the shuffle pass to avoid placing those useless values into the
register.
There is a significant limitation to this CL, which is that it only
impacts the immediate predecessors of an overwriting block. We can
discuss this by zooming out a bit on the previous graph:
b4 ───┐┌──── b5
││
││
▼▼
b1 ───┐┌──── b2
││
││
▼▼
b3
Here we have the same graph, except we can see the two predecessors of
b1.
Now suppose that rather than b1 assigning R10 = v1 as above, the
assignment is done in b4. b1 has startRegs R10 = v1, doesn't use the
value at all, and simply passes it through to endRegs R10 = v1.
Now the shuffle pass will require both b2 and b5 to add a move to
assigned R10 = v1, because that is specified in their successor
startRegs.
With this CL, b3 drops R10 = v1 from startRegs, but there is no
backwards propagation, so b1 still has R10 = v1 in startRegs, and b5
still needs to add a useless move.
Extending this CL with such propagation may significantly increase the
number of useless moves we can remove, though it will add complexity to
maintenance and could potentially impact build performance depending on
how efficiently we could implement the propagation (something I haven't
considered carefully).
As-is, this optimization does not impact much code. In bent .text size
geomean is -0.02%. In the container/heap test binary, 18 of ~2500
functions are impacted by this CL. Bent and sweet do not show a
noticeable performance impact one way or another, however #58298 does
show a case where this can have impact if the useless instructions end
up in the hot path of a tight loop.
For #58298.
Change-Id: I2fcef37c955159d068fa0725f995a1848add8a5f
Reviewed-on: https://go-review.googlesource.com/c/go/+/471158
Run-TryBot: Michael Pratt <mpratt@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
Reviewed-by: David Chase <drchase@google.com>
2023-02-21 13:20:49 -05:00
|
|
|
if s.f.pass.debug > regDebug {
|
|
|
|
|
fmt.Printf("dropped from startRegs: %s\n", &s.registers[r])
|
|
|
|
|
}
|
|
|
|
|
s.startRegsMask &^= regMask(1) << r
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-08-11 12:51:33 -07:00
|
|
|
s.freeReg(r)
|
cmd/compile/internal/ssa: drop overwritten regalloc basic block input requirements
For the following description, consider the following basic block graph:
b1 ───┐┌──── b2
││
││
▼▼
b3
For register allocator transitions between basic blocks, there are two
key passes (significant paraphrasing):
First, each basic block is visited in some predetermined visit order.
This is the core visitOrder range loop in regAllocState.regalloc. The
specific ordering heuristics aren't important here, except that the
order guarantees that when visiting a basic block at least one of its
predecessors has already been visited.
Upon visiting a basic block, that block sets its expected starting
register state (regAllocState.startRegs) based on the ending register
state (regAlloc.State.endRegs) of one of its predecessors. (How it
chooses which predecessor to use is not important here.)
From that starting state, registers are assigned for all values in the
block, ultimately resulting in some ending register state.
After all blocks have been visited, the shuffle pass
(regAllocState.shuffle) ensures that for each edge, endRegs of the
predecessor == startRegs of the successor. That is, it makes sure that
the startRegs assumptions actually hold true for each edge. It does this
by adding moves to the end of the predecessor block to place values in
the expected register for the successor block. These may be moves from
other registers, or from memory if the value is spilled.
Now on to the actual problem:
Assume that b1 places some value v1 into register R10, and thus ends
with endRegs containing R10 = v1.
When b3 is visited, it selects b1 as its model predecessor and sets
startRegs with R10 = v1.
b2 does not have v1 in R10, so later in the shuffle pass, we will add a
move of v1 into R10 to the end of b2 to ensure it is available for b3.
This is all perfectly fine and exactly how things should work.
Now suppose that b3 does not use v1. It does need to use some other
value v2, which is not currently in a register. When assigning v2 to a
register, it finds all registers are already in use and it needs to dump
a value. Ultimately, it decides to dump v1 from R10 and replace it with
v2.
This is fine, but it has downstream effects on shuffle in b2. b3's
startRegs still state that R10 = v1, so b2 will add a move to R10 even
though b3 will unconditionally overwrite it. i.e., the move at the end
of b2 is completely useless and can result in code like:
// end of b2
MOV n(SP), R10 // R10 = v1 <-- useless
// start of b3
MOV m(SP), R10 // R10 = v2
This is precisely what happened in #58298.
This CL addresses this problem by dropping registers from startRegs if
they are never used in the basic block prior to getting dumped. This
allows the shuffle pass to avoid placing those useless values into the
register.
There is a significant limitation to this CL, which is that it only
impacts the immediate predecessors of an overwriting block. We can
discuss this by zooming out a bit on the previous graph:
b4 ───┐┌──── b5
││
││
▼▼
b1 ───┐┌──── b2
││
││
▼▼
b3
Here we have the same graph, except we can see the two predecessors of
b1.
Now suppose that rather than b1 assigning R10 = v1 as above, the
assignment is done in b4. b1 has startRegs R10 = v1, doesn't use the
value at all, and simply passes it through to endRegs R10 = v1.
Now the shuffle pass will require both b2 and b5 to add a move to
assigned R10 = v1, because that is specified in their successor
startRegs.
With this CL, b3 drops R10 = v1 from startRegs, but there is no
backwards propagation, so b1 still has R10 = v1 in startRegs, and b5
still needs to add a useless move.
Extending this CL with such propagation may significantly increase the
number of useless moves we can remove, though it will add complexity to
maintenance and could potentially impact build performance depending on
how efficiently we could implement the propagation (something I haven't
considered carefully).
As-is, this optimization does not impact much code. In bent .text size
geomean is -0.02%. In the container/heap test binary, 18 of ~2500
functions are impacted by this CL. Bent and sweet do not show a
noticeable performance impact one way or another, however #58298 does
show a case where this can have impact if the useless instructions end
up in the hot path of a tight loop.
For #58298.
Change-Id: I2fcef37c955159d068fa0725f995a1848add8a5f
Reviewed-on: https://go-review.googlesource.com/c/go/+/471158
Run-TryBot: Michael Pratt <mpratt@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
Reviewed-by: David Chase <drchase@google.com>
2023-02-21 13:20:49 -05:00
|
|
|
s.usedSinceBlockStart |= regMask(1) << r
|
2015-08-11 12:51:33 -07:00
|
|
|
return r
|
|
|
|
|
}
|
|
|
|
|
|
2017-03-07 14:45:46 -05:00
|
|
|
// makeSpill returns a Value which represents the spilled value of v.
|
|
|
|
|
// b is the block in which the spill is used.
|
|
|
|
|
func (s *regAllocState) makeSpill(v *Value, b *Block) *Value {
|
|
|
|
|
vi := &s.values[v.ID]
|
|
|
|
|
if vi.spill != nil {
|
|
|
|
|
// Final block not known - keep track of subtree where restores reside.
|
2024-09-03 13:29:42 -07:00
|
|
|
vi.restoreMin = min(vi.restoreMin, s.sdom[b.ID].entry)
|
|
|
|
|
vi.restoreMax = max(vi.restoreMax, s.sdom[b.ID].exit)
|
2017-03-07 14:45:46 -05:00
|
|
|
return vi.spill
|
|
|
|
|
}
|
|
|
|
|
// Make a spill for v. We don't know where we want
|
|
|
|
|
// to put it yet, so we leave it blockless for now.
|
|
|
|
|
spill := s.f.newValueNoBlock(OpStoreReg, v.Type, v.Pos)
|
|
|
|
|
// We also don't know what the spill's arg will be.
|
|
|
|
|
// Leave it argless for now.
|
|
|
|
|
s.setOrig(spill, v)
|
|
|
|
|
vi.spill = spill
|
|
|
|
|
vi.restoreMin = s.sdom[b.ID].entry
|
|
|
|
|
vi.restoreMax = s.sdom[b.ID].exit
|
|
|
|
|
return spill
|
|
|
|
|
}
|
|
|
|
|
|
2015-08-11 12:51:33 -07:00
|
|
|
// allocValToReg allocates v to a register selected from regMask and
|
|
|
|
|
// returns the register copy of v. Any previous user is kicked out and spilled
|
|
|
|
|
// (if necessary). Load code is added at the current pc. If nospill is set the
|
|
|
|
|
// allocated register is marked nospill so the assignment cannot be
|
|
|
|
|
// undone until the caller allows it by clearing nospill. Returns a
|
|
|
|
|
// *Value which is either v or a copy of v allocated to the chosen register.
|
2016-12-15 17:17:01 -08:00
|
|
|
func (s *regAllocState) allocValToReg(v *Value, mask regMask, nospill bool, pos src.XPos) *Value {
|
cmd/compile: add wasm stack optimization
Go's SSA instructions only operate on registers. For example, an add
instruction would read two registers, do the addition and then write
to a register. WebAssembly's instructions, on the other hand, operate
on the stack. The add instruction first pops two values from the stack,
does the addition, then pushes the result to the stack. To fulfill
Go's semantics, one needs to map Go's single add instruction to
4 WebAssembly instructions:
- Push the value of local variable A to the stack
- Push the value of local variable B to the stack
- Do addition
- Write value from stack to local variable C
Now consider that B was set to the constant 42 before the addition:
- Push constant 42 to the stack
- Write value from stack to local variable B
This works, but is inefficient. Instead, the stack is used directly
by inlining instructions if possible. With inlining it becomes:
- Push the value of local variable A to the stack (add)
- Push constant 42 to the stack (constant)
- Do addition (add)
- Write value from stack to local variable C (add)
Note that the two SSA instructions can not be generated sequentially
anymore, because their WebAssembly instructions are interleaved.
Design doc: https://docs.google.com/document/d/131vjr4DH6JFnb-blm_uRdaC0_Nv3OUwjEY5qVCxCup4
Updates #18892
Change-Id: Ie35e1c0bebf4985fddda0d6330eb2066f9ad6dec
Reviewed-on: https://go-review.googlesource.com/103535
Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Cherry Zhang <cherryyz@google.com>
2018-03-09 00:14:58 +01:00
|
|
|
if s.f.Config.ctxt.Arch.Arch == sys.ArchWasm && v.rematerializeable() {
|
|
|
|
|
c := v.copyIntoWithXPos(s.curBlock, pos)
|
|
|
|
|
c.OnWasmStack = true
|
|
|
|
|
s.setOrig(c, v)
|
|
|
|
|
return c
|
|
|
|
|
}
|
|
|
|
|
if v.OnWasmStack {
|
|
|
|
|
return v
|
|
|
|
|
}
|
|
|
|
|
|
2015-08-11 12:51:33 -07:00
|
|
|
vi := &s.values[v.ID]
|
2018-03-02 20:33:15 -05:00
|
|
|
pos = pos.WithNotStmt()
|
2015-08-11 12:51:33 -07:00
|
|
|
// Check if v is already in a requested register.
|
|
|
|
|
if mask&vi.regs != 0 {
|
2025-08-13 09:41:17 -07:00
|
|
|
mask &= vi.regs
|
|
|
|
|
r := pickReg(mask)
|
|
|
|
|
if mask.contains(s.SPReg) {
|
|
|
|
|
// Prefer the stack pointer if it is allowed.
|
|
|
|
|
// (Needed because the op might have an Aux symbol
|
|
|
|
|
// that needs SP as its base.)
|
|
|
|
|
r = s.SPReg
|
|
|
|
|
}
|
2024-11-24 15:29:56 -08:00
|
|
|
if !s.allocatable.contains(r) {
|
|
|
|
|
return v // v is in a fixed register
|
|
|
|
|
}
|
2015-08-11 12:51:33 -07:00
|
|
|
if s.regs[r].v != v || s.regs[r].c == nil {
|
|
|
|
|
panic("bad register state")
|
|
|
|
|
}
|
|
|
|
|
if nospill {
|
|
|
|
|
s.nospill |= regMask(1) << r
|
|
|
|
|
}
|
cmd/compile/internal/ssa: drop overwritten regalloc basic block input requirements
For the following description, consider the following basic block graph:
b1 ───┐┌──── b2
││
││
▼▼
b3
For register allocator transitions between basic blocks, there are two
key passes (significant paraphrasing):
First, each basic block is visited in some predetermined visit order.
This is the core visitOrder range loop in regAllocState.regalloc. The
specific ordering heuristics aren't important here, except that the
order guarantees that when visiting a basic block at least one of its
predecessors has already been visited.
Upon visiting a basic block, that block sets its expected starting
register state (regAllocState.startRegs) based on the ending register
state (regAlloc.State.endRegs) of one of its predecessors. (How it
chooses which predecessor to use is not important here.)
From that starting state, registers are assigned for all values in the
block, ultimately resulting in some ending register state.
After all blocks have been visited, the shuffle pass
(regAllocState.shuffle) ensures that for each edge, endRegs of the
predecessor == startRegs of the successor. That is, it makes sure that
the startRegs assumptions actually hold true for each edge. It does this
by adding moves to the end of the predecessor block to place values in
the expected register for the successor block. These may be moves from
other registers, or from memory if the value is spilled.
Now on to the actual problem:
Assume that b1 places some value v1 into register R10, and thus ends
with endRegs containing R10 = v1.
When b3 is visited, it selects b1 as its model predecessor and sets
startRegs with R10 = v1.
b2 does not have v1 in R10, so later in the shuffle pass, we will add a
move of v1 into R10 to the end of b2 to ensure it is available for b3.
This is all perfectly fine and exactly how things should work.
Now suppose that b3 does not use v1. It does need to use some other
value v2, which is not currently in a register. When assigning v2 to a
register, it finds all registers are already in use and it needs to dump
a value. Ultimately, it decides to dump v1 from R10 and replace it with
v2.
This is fine, but it has downstream effects on shuffle in b2. b3's
startRegs still state that R10 = v1, so b2 will add a move to R10 even
though b3 will unconditionally overwrite it. i.e., the move at the end
of b2 is completely useless and can result in code like:
// end of b2
MOV n(SP), R10 // R10 = v1 <-- useless
// start of b3
MOV m(SP), R10 // R10 = v2
This is precisely what happened in #58298.
This CL addresses this problem by dropping registers from startRegs if
they are never used in the basic block prior to getting dumped. This
allows the shuffle pass to avoid placing those useless values into the
register.
There is a significant limitation to this CL, which is that it only
impacts the immediate predecessors of an overwriting block. We can
discuss this by zooming out a bit on the previous graph:
b4 ───┐┌──── b5
││
││
▼▼
b1 ───┐┌──── b2
││
││
▼▼
b3
Here we have the same graph, except we can see the two predecessors of
b1.
Now suppose that rather than b1 assigning R10 = v1 as above, the
assignment is done in b4. b1 has startRegs R10 = v1, doesn't use the
value at all, and simply passes it through to endRegs R10 = v1.
Now the shuffle pass will require both b2 and b5 to add a move to
assigned R10 = v1, because that is specified in their successor
startRegs.
With this CL, b3 drops R10 = v1 from startRegs, but there is no
backwards propagation, so b1 still has R10 = v1 in startRegs, and b5
still needs to add a useless move.
Extending this CL with such propagation may significantly increase the
number of useless moves we can remove, though it will add complexity to
maintenance and could potentially impact build performance depending on
how efficiently we could implement the propagation (something I haven't
considered carefully).
As-is, this optimization does not impact much code. In bent .text size
geomean is -0.02%. In the container/heap test binary, 18 of ~2500
functions are impacted by this CL. Bent and sweet do not show a
noticeable performance impact one way or another, however #58298 does
show a case where this can have impact if the useless instructions end
up in the hot path of a tight loop.
For #58298.
Change-Id: I2fcef37c955159d068fa0725f995a1848add8a5f
Reviewed-on: https://go-review.googlesource.com/c/go/+/471158
Run-TryBot: Michael Pratt <mpratt@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
Reviewed-by: David Chase <drchase@google.com>
2023-02-21 13:20:49 -05:00
|
|
|
s.usedSinceBlockStart |= regMask(1) << r
|
2015-08-11 12:51:33 -07:00
|
|
|
return s.regs[r].c
|
|
|
|
|
}
|
|
|
|
|
|
cmd/compile: add wasm stack optimization
Go's SSA instructions only operate on registers. For example, an add
instruction would read two registers, do the addition and then write
to a register. WebAssembly's instructions, on the other hand, operate
on the stack. The add instruction first pops two values from the stack,
does the addition, then pushes the result to the stack. To fulfill
Go's semantics, one needs to map Go's single add instruction to
4 WebAssembly instructions:
- Push the value of local variable A to the stack
- Push the value of local variable B to the stack
- Do addition
- Write value from stack to local variable C
Now consider that B was set to the constant 42 before the addition:
- Push constant 42 to the stack
- Write value from stack to local variable B
This works, but is inefficient. Instead, the stack is used directly
by inlining instructions if possible. With inlining it becomes:
- Push the value of local variable A to the stack (add)
- Push constant 42 to the stack (constant)
- Do addition (add)
- Write value from stack to local variable C (add)
Note that the two SSA instructions can not be generated sequentially
anymore, because their WebAssembly instructions are interleaved.
Design doc: https://docs.google.com/document/d/131vjr4DH6JFnb-blm_uRdaC0_Nv3OUwjEY5qVCxCup4
Updates #18892
Change-Id: Ie35e1c0bebf4985fddda0d6330eb2066f9ad6dec
Reviewed-on: https://go-review.googlesource.com/103535
Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Cherry Zhang <cherryyz@google.com>
2018-03-09 00:14:58 +01:00
|
|
|
var r register
|
2019-09-08 19:36:13 +03:00
|
|
|
// If nospill is set, the value is used immediately, so it can live on the WebAssembly stack.
|
cmd/compile: add wasm stack optimization
Go's SSA instructions only operate on registers. For example, an add
instruction would read two registers, do the addition and then write
to a register. WebAssembly's instructions, on the other hand, operate
on the stack. The add instruction first pops two values from the stack,
does the addition, then pushes the result to the stack. To fulfill
Go's semantics, one needs to map Go's single add instruction to
4 WebAssembly instructions:
- Push the value of local variable A to the stack
- Push the value of local variable B to the stack
- Do addition
- Write value from stack to local variable C
Now consider that B was set to the constant 42 before the addition:
- Push constant 42 to the stack
- Write value from stack to local variable B
This works, but is inefficient. Instead, the stack is used directly
by inlining instructions if possible. With inlining it becomes:
- Push the value of local variable A to the stack (add)
- Push constant 42 to the stack (constant)
- Do addition (add)
- Write value from stack to local variable C (add)
Note that the two SSA instructions can not be generated sequentially
anymore, because their WebAssembly instructions are interleaved.
Design doc: https://docs.google.com/document/d/131vjr4DH6JFnb-blm_uRdaC0_Nv3OUwjEY5qVCxCup4
Updates #18892
Change-Id: Ie35e1c0bebf4985fddda0d6330eb2066f9ad6dec
Reviewed-on: https://go-review.googlesource.com/103535
Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Cherry Zhang <cherryyz@google.com>
2018-03-09 00:14:58 +01:00
|
|
|
onWasmStack := nospill && s.f.Config.ctxt.Arch.Arch == sys.ArchWasm
|
|
|
|
|
if !onWasmStack {
|
|
|
|
|
// Allocate a register.
|
|
|
|
|
r = s.allocReg(mask, v)
|
|
|
|
|
}
|
2015-08-11 12:51:33 -07:00
|
|
|
|
|
|
|
|
// Allocate v to the new register.
|
|
|
|
|
var c *Value
|
|
|
|
|
if vi.regs != 0 {
|
|
|
|
|
// Copy from a register that v is already in.
|
|
|
|
|
r2 := pickReg(vi.regs)
|
2024-11-24 15:29:56 -08:00
|
|
|
var current *Value
|
|
|
|
|
if !s.allocatable.contains(r2) {
|
|
|
|
|
current = v // v is in a fixed register
|
|
|
|
|
} else {
|
|
|
|
|
if s.regs[r2].v != v {
|
|
|
|
|
panic("bad register state")
|
|
|
|
|
}
|
|
|
|
|
current = s.regs[r2].c
|
2015-08-11 12:51:33 -07:00
|
|
|
}
|
cmd/compile/internal/ssa: drop overwritten regalloc basic block input requirements
For the following description, consider the following basic block graph:
b1 ───┐┌──── b2
││
││
▼▼
b3
For register allocator transitions between basic blocks, there are two
key passes (significant paraphrasing):
First, each basic block is visited in some predetermined visit order.
This is the core visitOrder range loop in regAllocState.regalloc. The
specific ordering heuristics aren't important here, except that the
order guarantees that when visiting a basic block at least one of its
predecessors has already been visited.
Upon visiting a basic block, that block sets its expected starting
register state (regAllocState.startRegs) based on the ending register
state (regAlloc.State.endRegs) of one of its predecessors. (How it
chooses which predecessor to use is not important here.)
From that starting state, registers are assigned for all values in the
block, ultimately resulting in some ending register state.
After all blocks have been visited, the shuffle pass
(regAllocState.shuffle) ensures that for each edge, endRegs of the
predecessor == startRegs of the successor. That is, it makes sure that
the startRegs assumptions actually hold true for each edge. It does this
by adding moves to the end of the predecessor block to place values in
the expected register for the successor block. These may be moves from
other registers, or from memory if the value is spilled.
Now on to the actual problem:
Assume that b1 places some value v1 into register R10, and thus ends
with endRegs containing R10 = v1.
When b3 is visited, it selects b1 as its model predecessor and sets
startRegs with R10 = v1.
b2 does not have v1 in R10, so later in the shuffle pass, we will add a
move of v1 into R10 to the end of b2 to ensure it is available for b3.
This is all perfectly fine and exactly how things should work.
Now suppose that b3 does not use v1. It does need to use some other
value v2, which is not currently in a register. When assigning v2 to a
register, it finds all registers are already in use and it needs to dump
a value. Ultimately, it decides to dump v1 from R10 and replace it with
v2.
This is fine, but it has downstream effects on shuffle in b2. b3's
startRegs still state that R10 = v1, so b2 will add a move to R10 even
though b3 will unconditionally overwrite it. i.e., the move at the end
of b2 is completely useless and can result in code like:
// end of b2
MOV n(SP), R10 // R10 = v1 <-- useless
// start of b3
MOV m(SP), R10 // R10 = v2
This is precisely what happened in #58298.
This CL addresses this problem by dropping registers from startRegs if
they are never used in the basic block prior to getting dumped. This
allows the shuffle pass to avoid placing those useless values into the
register.
There is a significant limitation to this CL, which is that it only
impacts the immediate predecessors of an overwriting block. We can
discuss this by zooming out a bit on the previous graph:
b4 ───┐┌──── b5
││
││
▼▼
b1 ───┐┌──── b2
││
││
▼▼
b3
Here we have the same graph, except we can see the two predecessors of
b1.
Now suppose that rather than b1 assigning R10 = v1 as above, the
assignment is done in b4. b1 has startRegs R10 = v1, doesn't use the
value at all, and simply passes it through to endRegs R10 = v1.
Now the shuffle pass will require both b2 and b5 to add a move to
assigned R10 = v1, because that is specified in their successor
startRegs.
With this CL, b3 drops R10 = v1 from startRegs, but there is no
backwards propagation, so b1 still has R10 = v1 in startRegs, and b5
still needs to add a useless move.
Extending this CL with such propagation may significantly increase the
number of useless moves we can remove, though it will add complexity to
maintenance and could potentially impact build performance depending on
how efficiently we could implement the propagation (something I haven't
considered carefully).
As-is, this optimization does not impact much code. In bent .text size
geomean is -0.02%. In the container/heap test binary, 18 of ~2500
functions are impacted by this CL. Bent and sweet do not show a
noticeable performance impact one way or another, however #58298 does
show a case where this can have impact if the useless instructions end
up in the hot path of a tight loop.
For #58298.
Change-Id: I2fcef37c955159d068fa0725f995a1848add8a5f
Reviewed-on: https://go-review.googlesource.com/c/go/+/471158
Run-TryBot: Michael Pratt <mpratt@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
Reviewed-by: David Chase <drchase@google.com>
2023-02-21 13:20:49 -05:00
|
|
|
s.usedSinceBlockStart |= regMask(1) << r2
|
2024-11-24 15:29:56 -08:00
|
|
|
c = s.curBlock.NewValue1(pos, OpCopy, v.Type, current)
|
2015-10-19 10:57:03 -07:00
|
|
|
} else if v.rematerializeable() {
|
|
|
|
|
// Rematerialize instead of loading from the spill location.
|
2017-07-21 12:00:38 -04:00
|
|
|
c = v.copyIntoWithXPos(s.curBlock, pos)
|
2015-08-11 12:51:33 -07:00
|
|
|
} else {
|
|
|
|
|
// Load v from its spill location.
|
2017-03-07 14:45:46 -05:00
|
|
|
spill := s.makeSpill(v, s.curBlock)
|
|
|
|
|
if s.f.pass.debug > logSpills {
|
2017-03-16 22:42:10 -07:00
|
|
|
s.f.Warnl(vi.spill.Pos, "load spill for %v from %v", v, spill)
|
2015-08-11 12:51:33 -07:00
|
|
|
}
|
2017-03-07 14:45:46 -05:00
|
|
|
c = s.curBlock.NewValue1(pos, OpLoadReg, v.Type, spill)
|
2015-08-11 12:51:33 -07:00
|
|
|
}
|
cmd/compile: add wasm stack optimization
Go's SSA instructions only operate on registers. For example, an add
instruction would read two registers, do the addition and then write
to a register. WebAssembly's instructions, on the other hand, operate
on the stack. The add instruction first pops two values from the stack,
does the addition, then pushes the result to the stack. To fulfill
Go's semantics, one needs to map Go's single add instruction to
4 WebAssembly instructions:
- Push the value of local variable A to the stack
- Push the value of local variable B to the stack
- Do addition
- Write value from stack to local variable C
Now consider that B was set to the constant 42 before the addition:
- Push constant 42 to the stack
- Write value from stack to local variable B
This works, but is inefficient. Instead, the stack is used directly
by inlining instructions if possible. With inlining it becomes:
- Push the value of local variable A to the stack (add)
- Push constant 42 to the stack (constant)
- Do addition (add)
- Write value from stack to local variable C (add)
Note that the two SSA instructions can not be generated sequentially
anymore, because their WebAssembly instructions are interleaved.
Design doc: https://docs.google.com/document/d/131vjr4DH6JFnb-blm_uRdaC0_Nv3OUwjEY5qVCxCup4
Updates #18892
Change-Id: Ie35e1c0bebf4985fddda0d6330eb2066f9ad6dec
Reviewed-on: https://go-review.googlesource.com/103535
Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Cherry Zhang <cherryyz@google.com>
2018-03-09 00:14:58 +01:00
|
|
|
|
2015-10-29 13:41:02 -07:00
|
|
|
s.setOrig(c, v)
|
cmd/compile: add wasm stack optimization
Go's SSA instructions only operate on registers. For example, an add
instruction would read two registers, do the addition and then write
to a register. WebAssembly's instructions, on the other hand, operate
on the stack. The add instruction first pops two values from the stack,
does the addition, then pushes the result to the stack. To fulfill
Go's semantics, one needs to map Go's single add instruction to
4 WebAssembly instructions:
- Push the value of local variable A to the stack
- Push the value of local variable B to the stack
- Do addition
- Write value from stack to local variable C
Now consider that B was set to the constant 42 before the addition:
- Push constant 42 to the stack
- Write value from stack to local variable B
This works, but is inefficient. Instead, the stack is used directly
by inlining instructions if possible. With inlining it becomes:
- Push the value of local variable A to the stack (add)
- Push constant 42 to the stack (constant)
- Do addition (add)
- Write value from stack to local variable C (add)
Note that the two SSA instructions can not be generated sequentially
anymore, because their WebAssembly instructions are interleaved.
Design doc: https://docs.google.com/document/d/131vjr4DH6JFnb-blm_uRdaC0_Nv3OUwjEY5qVCxCup4
Updates #18892
Change-Id: Ie35e1c0bebf4985fddda0d6330eb2066f9ad6dec
Reviewed-on: https://go-review.googlesource.com/103535
Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Cherry Zhang <cherryyz@google.com>
2018-03-09 00:14:58 +01:00
|
|
|
|
|
|
|
|
if onWasmStack {
|
|
|
|
|
c.OnWasmStack = true
|
|
|
|
|
return c
|
|
|
|
|
}
|
|
|
|
|
|
2015-08-11 12:51:33 -07:00
|
|
|
s.assignReg(r, v, c)
|
2018-05-25 16:08:13 -04:00
|
|
|
if c.Op == OpLoadReg && s.isGReg(r) {
|
|
|
|
|
s.f.Fatalf("allocValToReg.OpLoadReg targeting g: " + c.LongString())
|
|
|
|
|
}
|
2015-08-11 12:51:33 -07:00
|
|
|
if nospill {
|
|
|
|
|
s.nospill |= regMask(1) << r
|
|
|
|
|
}
|
|
|
|
|
return c
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-06 15:06:45 -04:00
|
|
|
// isLeaf reports whether f performs any calls.
|
|
|
|
|
func isLeaf(f *Func) bool {
|
|
|
|
|
for _, b := range f.Blocks {
|
|
|
|
|
for _, v := range b.Values {
|
2021-10-25 11:51:25 -04:00
|
|
|
if v.Op.IsCall() && !v.Op.IsTailCall() {
|
2021-10-25 16:48:07 -04:00
|
|
|
// tail call is not counted as it does not save the return PC or need a frame
|
2016-10-06 15:06:45 -04:00
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
|
2023-05-05 01:07:25 +00:00
|
|
|
// needRegister reports whether v needs a register.
|
|
|
|
|
func (v *Value) needRegister() bool {
|
|
|
|
|
return !v.Type.IsMemory() && !v.Type.IsVoid() && !v.Type.IsFlags() && !v.Type.IsTuple()
|
|
|
|
|
}
|
|
|
|
|
|
2015-08-11 12:51:33 -07:00
|
|
|
func (s *regAllocState) init(f *Func) {
|
2016-04-15 12:49:30 -07:00
|
|
|
s.f = f
|
cmd/compile: rearrange fields between ssa.Func, ssa.Cache, and ssa.Config
This makes ssa.Func, ssa.Cache, and ssa.Config fulfill
the roles laid out for them in CL 38160.
The only non-trivial change in this CL is how cached
values and blocks get IDs. Prior to this CL, their IDs were
assigned as part of resetting the cache, and only modified
IDs were reset. This required knowing how many values and
blocks were modified, which required a tight coupling between
ssa.Func and ssa.Config. To eliminate that coupling,
we now zero values and blocks during reset,
and assign their IDs when they are used.
Since unused values and blocks have ID == 0,
we can efficiently find the last used value/block,
to avoid zeroing everything.
Bulk zeroing is efficient, but not efficient enough
to obviate the need to avoid zeroing everything every time.
As a happy side-effect, ssa.Func.Free is no longer necessary.
DebugHashMatch and friends now belong in func.go.
They have been left in place for clarity and review.
I will move them in a subsequent CL.
Passes toolstash -cmp. No compiler performance impact.
No change in 'go test cmd/compile/internal/ssa' execution time.
Change-Id: I2eb7af58da067ef6a36e815a6f386cfe8634d098
Reviewed-on: https://go-review.googlesource.com/38167
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2017-03-15 11:15:13 -07:00
|
|
|
s.f.RegAlloc = s.f.Cache.locs[:0]
|
2016-03-21 22:57:26 -07:00
|
|
|
s.registers = f.Config.registers
|
2016-06-15 10:07:16 -07:00
|
|
|
if nr := len(s.registers); nr == 0 || nr > int(noRegister) || nr > int(unsafe.Sizeof(regMask(0))*8) {
|
|
|
|
|
s.f.Fatalf("bad number of registers: %d", nr)
|
|
|
|
|
} else {
|
|
|
|
|
s.numRegs = register(nr)
|
2015-08-11 12:51:33 -07:00
|
|
|
}
|
2016-06-15 10:07:16 -07:00
|
|
|
// Locate SP, SB, and g registers.
|
|
|
|
|
s.SPReg = noRegister
|
|
|
|
|
s.SBReg = noRegister
|
|
|
|
|
s.GReg = noRegister
|
2024-11-24 15:29:56 -08:00
|
|
|
s.ZeroIntReg = noRegister
|
2016-03-21 22:57:26 -07:00
|
|
|
for r := register(0); r < s.numRegs; r++ {
|
2017-08-17 12:23:34 -07:00
|
|
|
switch s.registers[r].String() {
|
2016-06-15 10:07:16 -07:00
|
|
|
case "SP":
|
2016-03-21 22:57:26 -07:00
|
|
|
s.SPReg = r
|
2016-06-15 10:07:16 -07:00
|
|
|
case "SB":
|
2016-03-21 22:57:26 -07:00
|
|
|
s.SBReg = r
|
2016-06-15 10:07:16 -07:00
|
|
|
case "g":
|
2016-05-31 14:01:34 -04:00
|
|
|
s.GReg = r
|
2024-11-24 15:29:56 -08:00
|
|
|
case "ZERO": // TODO: arch-specific?
|
|
|
|
|
s.ZeroIntReg = r
|
2016-05-31 14:01:34 -04:00
|
|
|
}
|
2016-03-21 22:57:26 -07:00
|
|
|
}
|
2016-06-15 10:07:16 -07:00
|
|
|
// Make sure we found all required registers.
|
|
|
|
|
switch noRegister {
|
|
|
|
|
case s.SPReg:
|
|
|
|
|
s.f.Fatalf("no SP register found")
|
|
|
|
|
case s.SBReg:
|
|
|
|
|
s.f.Fatalf("no SB register found")
|
|
|
|
|
case s.GReg:
|
|
|
|
|
if f.Config.hasGReg {
|
|
|
|
|
s.f.Fatalf("no g register found")
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-08-11 12:51:33 -07:00
|
|
|
|
2016-04-15 12:49:30 -07:00
|
|
|
// Figure out which registers we're allowed to use.
|
2016-08-22 12:25:23 -04:00
|
|
|
s.allocatable = s.f.Config.gpRegMask | s.f.Config.fpRegMask | s.f.Config.specialRegMask
|
2016-04-15 12:49:30 -07:00
|
|
|
s.allocatable &^= 1 << s.SPReg
|
|
|
|
|
s.allocatable &^= 1 << s.SBReg
|
2016-05-31 14:01:34 -04:00
|
|
|
if s.f.Config.hasGReg {
|
|
|
|
|
s.allocatable &^= 1 << s.GReg
|
|
|
|
|
}
|
2024-11-24 15:29:56 -08:00
|
|
|
if s.ZeroIntReg != noRegister {
|
|
|
|
|
s.allocatable &^= 1 << s.ZeroIntReg
|
|
|
|
|
}
|
2021-04-15 23:05:49 -04:00
|
|
|
if buildcfg.FramePointerEnabled && s.f.Config.FPReg >= 0 {
|
2016-05-19 12:33:30 -04:00
|
|
|
s.allocatable &^= 1 << uint(s.f.Config.FPReg)
|
2016-04-15 12:49:30 -07:00
|
|
|
}
|
2016-10-06 15:06:45 -04:00
|
|
|
if s.f.Config.LinkReg != -1 {
|
|
|
|
|
if isLeaf(f) {
|
|
|
|
|
// Leaf functions don't save/restore the link register.
|
|
|
|
|
s.allocatable &^= 1 << uint(s.f.Config.LinkReg)
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-06-03 18:03:29 -04:00
|
|
|
if s.f.Config.ctxt.Flag_dynlink {
|
|
|
|
|
switch s.f.Config.arch {
|
[dev.ssa] cmd/compile: fix PIC for SSA-generated code
Access to globals requires a 2-instruction sequence on PIC 386.
MOVL foo(SB), AX
is translated by the obj package into:
CALL getPCofNextInstructionInTempRegister(SB)
MOVL (&foo-&thisInstruction)(tmpReg), AX
The call returns the PC of the next instruction in a register.
The next instruction then offsets from that register to get the
address required. The tricky part is the allocation of the
temp register. The legacy compiler always used CX, and forbid
the register allocator from allocating CX when in PIC mode.
We can't easily do that in SSA because CX is actually a required
register for shift instructions. (I think the old backend got away
with this because the register allocator never uses CX, only
codegen knows that shifts must use CX.)
Instead, we allow the temp register to be anything. When the
destination of the MOV (or LEA) is an integer register, we can
use that register. Otherwise, we make sure to compile the
operation using an LEA to reference the global. So
MOVL AX, foo(SB)
is never generated directly. Instead, SSA generates:
LEAL foo(SB), DX
MOVL AX, (DX)
which is then rewritten by the obj package to:
CALL getPcInDX(SB)
LEAL (&foo-&thisInstruction)(DX), AX
MOVL AX, (DX)
So this CL modifies the obj package to use different thunks
to materialize the pc into different registers. We use the
registers that regalloc chose so that SSA can still allocate
the full set of registers.
Change-Id: Ie095644f7164a026c62e95baf9d18a8bcaed0bba
Reviewed-on: https://go-review.googlesource.com/25442
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2016-08-03 13:00:49 -07:00
|
|
|
case "386":
|
|
|
|
|
// nothing to do.
|
|
|
|
|
// Note that for Flag_shared (position independent code)
|
|
|
|
|
// we do need to be careful, but that carefulness is hidden
|
|
|
|
|
// in the rewrite rules so we always have a free register
|
2022-10-19 21:24:52 -07:00
|
|
|
// available for global load/stores. See _gen/386.rules (search for Flag_shared).
|
2021-07-16 02:36:52 +10:00
|
|
|
case "amd64":
|
|
|
|
|
s.allocatable &^= 1 << 15 // R15
|
|
|
|
|
case "arm":
|
|
|
|
|
s.allocatable &^= 1 << 9 // R9
|
|
|
|
|
case "arm64":
|
|
|
|
|
// nothing to do
|
2022-12-13 16:51:02 +08:00
|
|
|
case "loong64": // R2 (aka TP) already reserved.
|
|
|
|
|
// nothing to do
|
2021-07-16 02:36:52 +10:00
|
|
|
case "ppc64le": // R2 already reserved.
|
|
|
|
|
// nothing to do
|
2021-07-16 03:06:18 +10:00
|
|
|
case "riscv64": // X3 (aka GP) and X4 (aka TP) already reserved.
|
|
|
|
|
// nothing to do
|
2016-09-12 14:50:10 -04:00
|
|
|
case "s390x":
|
2018-04-30 16:55:13 +01:00
|
|
|
s.allocatable &^= 1 << 11 // R11
|
2016-06-03 18:03:29 -04:00
|
|
|
default:
|
2017-03-16 22:42:10 -07:00
|
|
|
s.f.fe.Fatalf(src.NoXPos, "arch %s not implemented", s.f.Config.arch)
|
2016-06-03 18:03:29 -04:00
|
|
|
}
|
2016-04-15 12:49:30 -07:00
|
|
|
}
|
|
|
|
|
|
2017-06-30 16:20:10 -04:00
|
|
|
// Linear scan register allocation can be influenced by the order in which blocks appear.
|
|
|
|
|
// Decouple the register allocation order from the generated block order.
|
|
|
|
|
// This also creates an opportunity for experiments to find a better order.
|
|
|
|
|
s.visitOrder = layoutRegallocOrder(f)
|
|
|
|
|
|
|
|
|
|
// Compute block order. This array allows us to distinguish forward edges
|
|
|
|
|
// from backward edges and compute how far they go.
|
cmd/compile: use depth first topological sort algorithm for layout
The current layout algorithm tries to put consecutive blocks together,
so the priority of the successor block is higher than the priority of
the zero indegree block. This algorithm is beneficial for subsequent
register allocation, but will result in more branch instructions.
The depth-first topological sorting algorithm is a well-known layout
algorithm, which has applications in many languages, and it helps to
reduce branch instructions. This CL applies it to the layout pass.
The test results show that it helps to reduce the code size.
This CL also includes the following changes:
1, Removed the primary predecessor mechanism. The new layout algorithm is
not very friendly to register allocator in some cases, in order to adapt
to the new layout algorithm, a new primary predecessor selection strategy
is introduced.
2, Since the new layout implementation may place non-loop blocks between
loop blocks, some adaptive modifications have also been made to looprotate
pass.
3, The layout also affects the results of codegen, so this CL also adjusted
several codegen tests accordingly.
It is inevitable that this CL will cause the code size or performance of a
few functions to decrease, but the number of cases it improves is much larger
than the number of cases it drops.
Statistical data from compilecmp on linux/amd64 is as follow:
name old time/op new time/op delta
Template 382ms ± 4% 382ms ± 4% ~ (p=0.497 n=49+50)
Unicode 170ms ± 9% 169ms ± 8% ~ (p=0.344 n=48+50)
GoTypes 2.01s ± 4% 2.01s ± 4% ~ (p=0.628 n=50+48)
Compiler 190ms ±10% 189ms ± 9% ~ (p=0.734 n=50+50)
SSA 11.8s ± 2% 11.8s ± 3% ~ (p=0.877 n=50+50)
Flate 241ms ± 9% 241ms ± 8% ~ (p=0.897 n=50+49)
GoParser 366ms ± 3% 361ms ± 4% -1.21% (p=0.004 n=47+50)
Reflect 835ms ± 3% 838ms ± 3% ~ (p=0.275 n=50+49)
Tar 336ms ± 4% 335ms ± 3% ~ (p=0.454 n=48+48)
XML 433ms ± 4% 431ms ± 3% ~ (p=0.071 n=49+48)
LinkCompiler 706ms ± 4% 705ms ± 4% ~ (p=0.608 n=50+49)
ExternalLinkCompiler 1.85s ± 3% 1.83s ± 2% -1.47% (p=0.000 n=49+48)
LinkWithoutDebugCompiler 437ms ± 5% 437ms ± 6% ~ (p=0.953 n=49+50)
[Geo mean] 615ms 613ms -0.37%
name old alloc/op new alloc/op delta
Template 38.7MB ± 1% 38.7MB ± 1% ~ (p=0.834 n=50+50)
Unicode 28.1MB ± 0% 28.1MB ± 0% -0.22% (p=0.000 n=49+50)
GoTypes 168MB ± 1% 168MB ± 1% ~ (p=0.054 n=47+47)
Compiler 23.0MB ± 1% 23.0MB ± 1% ~ (p=0.432 n=50+50)
SSA 1.54GB ± 0% 1.54GB ± 0% +0.21% (p=0.000 n=50+50)
Flate 23.6MB ± 1% 23.6MB ± 1% ~ (p=0.153 n=43+46)
GoParser 35.1MB ± 1% 35.1MB ± 2% ~ (p=0.202 n=50+50)
Reflect 84.7MB ± 1% 84.7MB ± 1% ~ (p=0.333 n=48+49)
Tar 34.5MB ± 1% 34.5MB ± 1% ~ (p=0.406 n=46+49)
XML 44.3MB ± 2% 44.2MB ± 3% ~ (p=0.981 n=50+50)
LinkCompiler 131MB ± 0% 128MB ± 0% -2.74% (p=0.000 n=50+50)
ExternalLinkCompiler 120MB ± 0% 120MB ± 0% +0.01% (p=0.007 n=50+50)
LinkWithoutDebugCompiler 77.3MB ± 0% 77.3MB ± 0% -0.02% (p=0.000 n=50+50)
[Geo mean] 69.3MB 69.1MB -0.22%
file before after Δ %
addr2line 4104220 4043684 -60536 -1.475%
api 5342502 5249678 -92824 -1.737%
asm 4973785 4858257 -115528 -2.323%
buildid 2667844 2625660 -42184 -1.581%
cgo 4686849 4616313 -70536 -1.505%
compile 23667431 23268406 -399025 -1.686%
cover 4959676 4874108 -85568 -1.725%
dist 3515934 3450422 -65512 -1.863%
doc 3995581 3925469 -70112 -1.755%
fix 3379202 3318522 -60680 -1.796%
link 6743249 6629913 -113336 -1.681%
nm 4047529 3991777 -55752 -1.377%
objdump 4456151 4388151 -68000 -1.526%
pack 2435040 2398072 -36968 -1.518%
pprof 13804080 13565808 -238272 -1.726%
test2json 2690043 2645987 -44056 -1.638%
trace 10418492 10232716 -185776 -1.783%
vet 7258259 7121259 -137000 -1.888%
total 113145867 111204202 -1941665 -1.716%
The situation on linux/arm64 is as follow:
name old time/op new time/op delta
Template 280ms ± 1% 282ms ± 1% +0.75% (p=0.000 n=46+48)
Unicode 124ms ± 2% 124ms ± 2% +0.37% (p=0.045 n=50+50)
GoTypes 1.69s ± 1% 1.70s ± 1% +0.56% (p=0.000 n=49+50)
Compiler 122ms ± 1% 123ms ± 1% +0.93% (p=0.000 n=50+50)
SSA 12.6s ± 1% 12.7s ± 0% +0.72% (p=0.000 n=50+50)
Flate 170ms ± 1% 172ms ± 1% +0.97% (p=0.000 n=49+49)
GoParser 262ms ± 1% 263ms ± 1% +0.39% (p=0.000 n=49+48)
Reflect 639ms ± 1% 650ms ± 1% +1.63% (p=0.000 n=49+49)
Tar 243ms ± 1% 245ms ± 1% +0.82% (p=0.000 n=50+50)
XML 324ms ± 1% 327ms ± 1% +0.72% (p=0.000 n=50+49)
LinkCompiler 597ms ± 1% 596ms ± 1% -0.27% (p=0.001 n=48+47)
ExternalLinkCompiler 1.90s ± 1% 1.88s ± 1% -1.00% (p=0.000 n=50+50)
LinkWithoutDebugCompiler 364ms ± 1% 363ms ± 1% ~ (p=0.220 n=49+50)
[Geo mean] 485ms 488ms +0.49%
name old alloc/op new alloc/op delta
Template 38.7MB ± 0% 38.8MB ± 1% ~ (p=0.093 n=43+49)
Unicode 28.4MB ± 0% 28.4MB ± 0% +0.03% (p=0.000 n=49+45)
GoTypes 169MB ± 1% 169MB ± 1% +0.23% (p=0.010 n=50+50)
Compiler 23.2MB ± 1% 23.2MB ± 1% +0.11% (p=0.000 n=40+44)
SSA 1.54GB ± 0% 1.55GB ± 0% +0.45% (p=0.000 n=47+49)
Flate 23.8MB ± 2% 23.8MB ± 1% ~ (p=0.543 n=50+50)
GoParser 35.3MB ± 1% 35.4MB ± 1% ~ (p=0.792 n=50+50)
Reflect 85.2MB ± 1% 85.2MB ± 0% ~ (p=0.055 n=50+47)
Tar 34.5MB ± 1% 34.5MB ± 1% +0.06% (p=0.015 n=50+50)
XML 43.8MB ± 2% 43.9MB ± 2% +0.19% (p=0.000 n=48+48)
LinkCompiler 137MB ± 0% 136MB ± 0% -0.92% (p=0.000 n=50+50)
ExternalLinkCompiler 127MB ± 0% 127MB ± 0% ~ (p=0.516 n=50+50)
LinkWithoutDebugCompiler 84.0MB ± 0% 84.0MB ± 0% ~ (p=0.057 n=50+50)
[Geo mean] 70.4MB 70.4MB +0.01%
file before after Δ %
addr2line 4021557 4002933 -18624 -0.463%
api 5127847 5028503 -99344 -1.937%
asm 5034716 4936836 -97880 -1.944%
buildid 2608118 2594094 -14024 -0.538%
cgo 4488592 4398320 -90272 -2.011%
compile 22501129 22213592 -287537 -1.278%
cover 4742301 4713573 -28728 -0.606%
dist 3388071 3365311 -22760 -0.672%
doc 3802250 3776082 -26168 -0.688%
fix 3306147 3216939 -89208 -2.698%
link 6404483 6363699 -40784 -0.637%
nm 3941026 3921930 -19096 -0.485%
objdump 4383330 4295122 -88208 -2.012%
pack 2404547 2389515 -15032 -0.625%
pprof 12996234 12856818 -139416 -1.073%
test2json 2668500 2586788 -81712 -3.062%
trace 9816276 9609580 -206696 -2.106%
vet 6900682 6787338 -113344 -1.643%
total 108535806 107056973 -1478833 -1.363%
Change-Id: Iaec1cdcaacca8025e9babb0fb8a532fddb70c87d
Reviewed-on: https://go-review.googlesource.com/c/go/+/255239
Reviewed-by: eric fang <eric.fang@arm.com>
Reviewed-by: Keith Randall <khr@golang.org>
Trust: eric fang <eric.fang@arm.com>
2020-07-23 10:24:56 +08:00
|
|
|
s.blockOrder = make([]int32, f.NumBlocks())
|
2017-06-30 16:20:10 -04:00
|
|
|
for i, b := range s.visitOrder {
|
cmd/compile: use depth first topological sort algorithm for layout
The current layout algorithm tries to put consecutive blocks together,
so the priority of the successor block is higher than the priority of
the zero indegree block. This algorithm is beneficial for subsequent
register allocation, but will result in more branch instructions.
The depth-first topological sorting algorithm is a well-known layout
algorithm, which has applications in many languages, and it helps to
reduce branch instructions. This CL applies it to the layout pass.
The test results show that it helps to reduce the code size.
This CL also includes the following changes:
1, Removed the primary predecessor mechanism. The new layout algorithm is
not very friendly to register allocator in some cases, in order to adapt
to the new layout algorithm, a new primary predecessor selection strategy
is introduced.
2, Since the new layout implementation may place non-loop blocks between
loop blocks, some adaptive modifications have also been made to looprotate
pass.
3, The layout also affects the results of codegen, so this CL also adjusted
several codegen tests accordingly.
It is inevitable that this CL will cause the code size or performance of a
few functions to decrease, but the number of cases it improves is much larger
than the number of cases it drops.
Statistical data from compilecmp on linux/amd64 is as follow:
name old time/op new time/op delta
Template 382ms ± 4% 382ms ± 4% ~ (p=0.497 n=49+50)
Unicode 170ms ± 9% 169ms ± 8% ~ (p=0.344 n=48+50)
GoTypes 2.01s ± 4% 2.01s ± 4% ~ (p=0.628 n=50+48)
Compiler 190ms ±10% 189ms ± 9% ~ (p=0.734 n=50+50)
SSA 11.8s ± 2% 11.8s ± 3% ~ (p=0.877 n=50+50)
Flate 241ms ± 9% 241ms ± 8% ~ (p=0.897 n=50+49)
GoParser 366ms ± 3% 361ms ± 4% -1.21% (p=0.004 n=47+50)
Reflect 835ms ± 3% 838ms ± 3% ~ (p=0.275 n=50+49)
Tar 336ms ± 4% 335ms ± 3% ~ (p=0.454 n=48+48)
XML 433ms ± 4% 431ms ± 3% ~ (p=0.071 n=49+48)
LinkCompiler 706ms ± 4% 705ms ± 4% ~ (p=0.608 n=50+49)
ExternalLinkCompiler 1.85s ± 3% 1.83s ± 2% -1.47% (p=0.000 n=49+48)
LinkWithoutDebugCompiler 437ms ± 5% 437ms ± 6% ~ (p=0.953 n=49+50)
[Geo mean] 615ms 613ms -0.37%
name old alloc/op new alloc/op delta
Template 38.7MB ± 1% 38.7MB ± 1% ~ (p=0.834 n=50+50)
Unicode 28.1MB ± 0% 28.1MB ± 0% -0.22% (p=0.000 n=49+50)
GoTypes 168MB ± 1% 168MB ± 1% ~ (p=0.054 n=47+47)
Compiler 23.0MB ± 1% 23.0MB ± 1% ~ (p=0.432 n=50+50)
SSA 1.54GB ± 0% 1.54GB ± 0% +0.21% (p=0.000 n=50+50)
Flate 23.6MB ± 1% 23.6MB ± 1% ~ (p=0.153 n=43+46)
GoParser 35.1MB ± 1% 35.1MB ± 2% ~ (p=0.202 n=50+50)
Reflect 84.7MB ± 1% 84.7MB ± 1% ~ (p=0.333 n=48+49)
Tar 34.5MB ± 1% 34.5MB ± 1% ~ (p=0.406 n=46+49)
XML 44.3MB ± 2% 44.2MB ± 3% ~ (p=0.981 n=50+50)
LinkCompiler 131MB ± 0% 128MB ± 0% -2.74% (p=0.000 n=50+50)
ExternalLinkCompiler 120MB ± 0% 120MB ± 0% +0.01% (p=0.007 n=50+50)
LinkWithoutDebugCompiler 77.3MB ± 0% 77.3MB ± 0% -0.02% (p=0.000 n=50+50)
[Geo mean] 69.3MB 69.1MB -0.22%
file before after Δ %
addr2line 4104220 4043684 -60536 -1.475%
api 5342502 5249678 -92824 -1.737%
asm 4973785 4858257 -115528 -2.323%
buildid 2667844 2625660 -42184 -1.581%
cgo 4686849 4616313 -70536 -1.505%
compile 23667431 23268406 -399025 -1.686%
cover 4959676 4874108 -85568 -1.725%
dist 3515934 3450422 -65512 -1.863%
doc 3995581 3925469 -70112 -1.755%
fix 3379202 3318522 -60680 -1.796%
link 6743249 6629913 -113336 -1.681%
nm 4047529 3991777 -55752 -1.377%
objdump 4456151 4388151 -68000 -1.526%
pack 2435040 2398072 -36968 -1.518%
pprof 13804080 13565808 -238272 -1.726%
test2json 2690043 2645987 -44056 -1.638%
trace 10418492 10232716 -185776 -1.783%
vet 7258259 7121259 -137000 -1.888%
total 113145867 111204202 -1941665 -1.716%
The situation on linux/arm64 is as follow:
name old time/op new time/op delta
Template 280ms ± 1% 282ms ± 1% +0.75% (p=0.000 n=46+48)
Unicode 124ms ± 2% 124ms ± 2% +0.37% (p=0.045 n=50+50)
GoTypes 1.69s ± 1% 1.70s ± 1% +0.56% (p=0.000 n=49+50)
Compiler 122ms ± 1% 123ms ± 1% +0.93% (p=0.000 n=50+50)
SSA 12.6s ± 1% 12.7s ± 0% +0.72% (p=0.000 n=50+50)
Flate 170ms ± 1% 172ms ± 1% +0.97% (p=0.000 n=49+49)
GoParser 262ms ± 1% 263ms ± 1% +0.39% (p=0.000 n=49+48)
Reflect 639ms ± 1% 650ms ± 1% +1.63% (p=0.000 n=49+49)
Tar 243ms ± 1% 245ms ± 1% +0.82% (p=0.000 n=50+50)
XML 324ms ± 1% 327ms ± 1% +0.72% (p=0.000 n=50+49)
LinkCompiler 597ms ± 1% 596ms ± 1% -0.27% (p=0.001 n=48+47)
ExternalLinkCompiler 1.90s ± 1% 1.88s ± 1% -1.00% (p=0.000 n=50+50)
LinkWithoutDebugCompiler 364ms ± 1% 363ms ± 1% ~ (p=0.220 n=49+50)
[Geo mean] 485ms 488ms +0.49%
name old alloc/op new alloc/op delta
Template 38.7MB ± 0% 38.8MB ± 1% ~ (p=0.093 n=43+49)
Unicode 28.4MB ± 0% 28.4MB ± 0% +0.03% (p=0.000 n=49+45)
GoTypes 169MB ± 1% 169MB ± 1% +0.23% (p=0.010 n=50+50)
Compiler 23.2MB ± 1% 23.2MB ± 1% +0.11% (p=0.000 n=40+44)
SSA 1.54GB ± 0% 1.55GB ± 0% +0.45% (p=0.000 n=47+49)
Flate 23.8MB ± 2% 23.8MB ± 1% ~ (p=0.543 n=50+50)
GoParser 35.3MB ± 1% 35.4MB ± 1% ~ (p=0.792 n=50+50)
Reflect 85.2MB ± 1% 85.2MB ± 0% ~ (p=0.055 n=50+47)
Tar 34.5MB ± 1% 34.5MB ± 1% +0.06% (p=0.015 n=50+50)
XML 43.8MB ± 2% 43.9MB ± 2% +0.19% (p=0.000 n=48+48)
LinkCompiler 137MB ± 0% 136MB ± 0% -0.92% (p=0.000 n=50+50)
ExternalLinkCompiler 127MB ± 0% 127MB ± 0% ~ (p=0.516 n=50+50)
LinkWithoutDebugCompiler 84.0MB ± 0% 84.0MB ± 0% ~ (p=0.057 n=50+50)
[Geo mean] 70.4MB 70.4MB +0.01%
file before after Δ %
addr2line 4021557 4002933 -18624 -0.463%
api 5127847 5028503 -99344 -1.937%
asm 5034716 4936836 -97880 -1.944%
buildid 2608118 2594094 -14024 -0.538%
cgo 4488592 4398320 -90272 -2.011%
compile 22501129 22213592 -287537 -1.278%
cover 4742301 4713573 -28728 -0.606%
dist 3388071 3365311 -22760 -0.672%
doc 3802250 3776082 -26168 -0.688%
fix 3306147 3216939 -89208 -2.698%
link 6404483 6363699 -40784 -0.637%
nm 3941026 3921930 -19096 -0.485%
objdump 4383330 4295122 -88208 -2.012%
pack 2404547 2389515 -15032 -0.625%
pprof 12996234 12856818 -139416 -1.073%
test2json 2668500 2586788 -81712 -3.062%
trace 9816276 9609580 -206696 -2.106%
vet 6900682 6787338 -113344 -1.643%
total 108535806 107056973 -1478833 -1.363%
Change-Id: Iaec1cdcaacca8025e9babb0fb8a532fddb70c87d
Reviewed-on: https://go-review.googlesource.com/c/go/+/255239
Reviewed-by: eric fang <eric.fang@arm.com>
Reviewed-by: Keith Randall <khr@golang.org>
Trust: eric fang <eric.fang@arm.com>
2020-07-23 10:24:56 +08:00
|
|
|
s.blockOrder[b.ID] = int32(i)
|
2017-06-30 16:20:10 -04:00
|
|
|
}
|
|
|
|
|
|
2016-03-21 22:57:26 -07:00
|
|
|
s.regs = make([]regState, s.numRegs)
|
2019-05-09 11:31:04 -07:00
|
|
|
nv := f.NumValues()
|
|
|
|
|
if cap(s.f.Cache.regallocValues) >= nv {
|
|
|
|
|
s.f.Cache.regallocValues = s.f.Cache.regallocValues[:nv]
|
|
|
|
|
} else {
|
|
|
|
|
s.f.Cache.regallocValues = make([]valState, nv)
|
|
|
|
|
}
|
|
|
|
|
s.values = s.f.Cache.regallocValues
|
2022-10-18 16:07:36 -07:00
|
|
|
s.orig = s.f.Cache.allocValueSlice(nv)
|
2016-09-23 09:15:51 -04:00
|
|
|
s.copies = make(map[*Value]bool)
|
2017-06-30 16:20:10 -04:00
|
|
|
for _, b := range s.visitOrder {
|
2015-10-29 13:41:02 -07:00
|
|
|
for _, v := range b.Values {
|
2023-05-05 01:07:25 +00:00
|
|
|
if v.needRegister() {
|
2015-12-17 10:01:24 -08:00
|
|
|
s.values[v.ID].needReg = true
|
|
|
|
|
s.values[v.ID].rematerializeable = v.rematerializeable()
|
|
|
|
|
s.orig[v.ID] = v
|
2015-11-05 14:59:47 -08:00
|
|
|
}
|
2016-07-13 16:15:54 -07:00
|
|
|
// Note: needReg is false for values returning Tuple types.
|
|
|
|
|
// Instead, we mark the corresponding Selects as needReg.
|
2015-10-29 13:41:02 -07:00
|
|
|
}
|
|
|
|
|
}
|
2015-11-05 14:59:47 -08:00
|
|
|
s.computeLive()
|
2015-05-18 16:44:20 -07:00
|
|
|
|
2015-12-17 10:01:24 -08:00
|
|
|
s.endRegs = make([][]endReg, f.NumBlocks())
|
|
|
|
|
s.startRegs = make([][]startReg, f.NumBlocks())
|
|
|
|
|
s.spillLive = make([][]ID, f.NumBlocks())
|
2019-11-01 14:04:08 -07:00
|
|
|
s.sdom = f.Sdom()
|
cmd/compile: add wasm stack optimization
Go's SSA instructions only operate on registers. For example, an add
instruction would read two registers, do the addition and then write
to a register. WebAssembly's instructions, on the other hand, operate
on the stack. The add instruction first pops two values from the stack,
does the addition, then pushes the result to the stack. To fulfill
Go's semantics, one needs to map Go's single add instruction to
4 WebAssembly instructions:
- Push the value of local variable A to the stack
- Push the value of local variable B to the stack
- Do addition
- Write value from stack to local variable C
Now consider that B was set to the constant 42 before the addition:
- Push constant 42 to the stack
- Write value from stack to local variable B
This works, but is inefficient. Instead, the stack is used directly
by inlining instructions if possible. With inlining it becomes:
- Push the value of local variable A to the stack (add)
- Push constant 42 to the stack (constant)
- Do addition (add)
- Write value from stack to local variable C (add)
Note that the two SSA instructions can not be generated sequentially
anymore, because their WebAssembly instructions are interleaved.
Design doc: https://docs.google.com/document/d/131vjr4DH6JFnb-blm_uRdaC0_Nv3OUwjEY5qVCxCup4
Updates #18892
Change-Id: Ie35e1c0bebf4985fddda0d6330eb2066f9ad6dec
Reviewed-on: https://go-review.googlesource.com/103535
Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Cherry Zhang <cherryyz@google.com>
2018-03-09 00:14:58 +01:00
|
|
|
|
|
|
|
|
// wasm: Mark instructions that can be optimized to have their values only on the WebAssembly stack.
|
|
|
|
|
if f.Config.ctxt.Arch.Arch == sys.ArchWasm {
|
|
|
|
|
canLiveOnStack := f.newSparseSet(f.NumValues())
|
|
|
|
|
defer f.retSparseSet(canLiveOnStack)
|
|
|
|
|
for _, b := range f.Blocks {
|
|
|
|
|
// New block. Clear candidate set.
|
|
|
|
|
canLiveOnStack.clear()
|
2019-08-12 20:19:58 +01:00
|
|
|
for _, c := range b.ControlValues() {
|
|
|
|
|
if c.Uses == 1 && !opcodeTable[c.Op].generic {
|
|
|
|
|
canLiveOnStack.add(c.ID)
|
|
|
|
|
}
|
cmd/compile: add wasm stack optimization
Go's SSA instructions only operate on registers. For example, an add
instruction would read two registers, do the addition and then write
to a register. WebAssembly's instructions, on the other hand, operate
on the stack. The add instruction first pops two values from the stack,
does the addition, then pushes the result to the stack. To fulfill
Go's semantics, one needs to map Go's single add instruction to
4 WebAssembly instructions:
- Push the value of local variable A to the stack
- Push the value of local variable B to the stack
- Do addition
- Write value from stack to local variable C
Now consider that B was set to the constant 42 before the addition:
- Push constant 42 to the stack
- Write value from stack to local variable B
This works, but is inefficient. Instead, the stack is used directly
by inlining instructions if possible. With inlining it becomes:
- Push the value of local variable A to the stack (add)
- Push constant 42 to the stack (constant)
- Do addition (add)
- Write value from stack to local variable C (add)
Note that the two SSA instructions can not be generated sequentially
anymore, because their WebAssembly instructions are interleaved.
Design doc: https://docs.google.com/document/d/131vjr4DH6JFnb-blm_uRdaC0_Nv3OUwjEY5qVCxCup4
Updates #18892
Change-Id: Ie35e1c0bebf4985fddda0d6330eb2066f9ad6dec
Reviewed-on: https://go-review.googlesource.com/103535
Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Cherry Zhang <cherryyz@google.com>
2018-03-09 00:14:58 +01:00
|
|
|
}
|
|
|
|
|
// Walking backwards.
|
|
|
|
|
for i := len(b.Values) - 1; i >= 0; i-- {
|
|
|
|
|
v := b.Values[i]
|
|
|
|
|
if canLiveOnStack.contains(v.ID) {
|
|
|
|
|
v.OnWasmStack = true
|
|
|
|
|
} else {
|
|
|
|
|
// Value can not live on stack. Values are not allowed to be reordered, so clear candidate set.
|
|
|
|
|
canLiveOnStack.clear()
|
|
|
|
|
}
|
|
|
|
|
for _, arg := range v.Args {
|
|
|
|
|
// Value can live on the stack if:
|
|
|
|
|
// - it is only used once
|
|
|
|
|
// - it is used in the same basic block
|
|
|
|
|
// - it is not a "mem" value
|
|
|
|
|
// - it is a WebAssembly op
|
|
|
|
|
if arg.Uses == 1 && arg.Block == v.Block && !arg.Type.IsMemory() && !opcodeTable[arg.Op].generic {
|
|
|
|
|
canLiveOnStack.add(arg.ID)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-03-17 19:15:38 -04:00
|
|
|
|
|
|
|
|
// The clobberdeadreg experiment inserts code to clobber dead registers
|
|
|
|
|
// at call sites.
|
|
|
|
|
// Ignore huge functions to avoid doing too much work.
|
|
|
|
|
if base.Flag.ClobberDeadReg && len(s.f.Blocks) <= 10000 {
|
|
|
|
|
// TODO: honor GOCLOBBERDEADHASH, or maybe GOSSAHASH.
|
|
|
|
|
s.doClobber = true
|
|
|
|
|
}
|
2015-11-05 14:59:47 -08:00
|
|
|
}
|
2015-05-05 16:19:12 -07:00
|
|
|
|
2022-10-18 16:07:36 -07:00
|
|
|
func (s *regAllocState) close() {
|
|
|
|
|
s.f.Cache.freeValueSlice(s.orig)
|
|
|
|
|
}
|
|
|
|
|
|
2015-11-05 14:59:47 -08:00
|
|
|
// Adds a use record for id at distance dist from the start of the block.
|
|
|
|
|
// All calls to addUse must happen with nonincreasing dist.
|
2016-12-15 17:17:01 -08:00
|
|
|
func (s *regAllocState) addUse(id ID, dist int32, pos src.XPos) {
|
2015-11-05 14:59:47 -08:00
|
|
|
r := s.freeUseRecords
|
|
|
|
|
if r != nil {
|
|
|
|
|
s.freeUseRecords = r.next
|
|
|
|
|
} else {
|
|
|
|
|
r = &use{}
|
2015-08-11 12:51:33 -07:00
|
|
|
}
|
2015-11-05 14:59:47 -08:00
|
|
|
r.dist = dist
|
2016-12-08 13:49:51 -08:00
|
|
|
r.pos = pos
|
2015-11-05 14:59:47 -08:00
|
|
|
r.next = s.values[id].uses
|
|
|
|
|
s.values[id].uses = r
|
|
|
|
|
if r.next != nil && dist > r.next.dist {
|
|
|
|
|
s.f.Fatalf("uses added in wrong order")
|
2015-08-11 12:51:33 -07:00
|
|
|
}
|
|
|
|
|
}
|
2015-05-05 16:19:12 -07:00
|
|
|
|
2015-11-05 14:59:47 -08:00
|
|
|
// advanceUses advances the uses of v's args from the state before v to the state after v.
|
|
|
|
|
// Any values which have no more uses are deallocated from registers.
|
|
|
|
|
func (s *regAllocState) advanceUses(v *Value) {
|
|
|
|
|
for _, a := range v.Args {
|
2015-12-17 10:01:24 -08:00
|
|
|
if !s.values[a.ID].needReg {
|
2015-08-11 12:51:33 -07:00
|
|
|
continue
|
|
|
|
|
}
|
2015-11-05 14:59:47 -08:00
|
|
|
ai := &s.values[a.ID]
|
|
|
|
|
r := ai.uses
|
|
|
|
|
ai.uses = r.next
|
2024-11-24 15:29:56 -08:00
|
|
|
if r.next == nil || (!opcodeTable[a.Op].fixedReg && r.next.dist > s.nextCall[s.curIdx]) {
|
2023-07-12 15:31:25 -07:00
|
|
|
// Value is dead (or is not used again until after a call), free all registers that hold it.
|
2015-11-05 14:59:47 -08:00
|
|
|
s.freeRegs(ai.regs)
|
|
|
|
|
}
|
|
|
|
|
r.next = s.freeUseRecords
|
|
|
|
|
s.freeUseRecords = r
|
2015-08-11 12:51:33 -07:00
|
|
|
}
|
2023-07-12 15:31:25 -07:00
|
|
|
s.dropIfUnused(v)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Drop v from registers if it isn't used again, or its only uses are after
|
|
|
|
|
// a call instruction.
|
|
|
|
|
func (s *regAllocState) dropIfUnused(v *Value) {
|
|
|
|
|
if !s.values[v.ID].needReg {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
vi := &s.values[v.ID]
|
|
|
|
|
r := vi.uses
|
2024-11-24 15:29:56 -08:00
|
|
|
if r == nil || (!opcodeTable[v.Op].fixedReg && r.dist > s.nextCall[s.curIdx]) {
|
2023-07-12 15:31:25 -07:00
|
|
|
s.freeRegs(vi.regs)
|
|
|
|
|
}
|
2015-08-11 12:51:33 -07:00
|
|
|
}
|
2015-05-05 16:19:12 -07:00
|
|
|
|
2016-04-10 08:26:43 -07:00
|
|
|
// liveAfterCurrentInstruction reports whether v is live after
|
|
|
|
|
// the current instruction is completed. v must be used by the
|
|
|
|
|
// current instruction.
|
|
|
|
|
func (s *regAllocState) liveAfterCurrentInstruction(v *Value) bool {
|
|
|
|
|
u := s.values[v.ID].uses
|
2021-02-04 16:42:35 -05:00
|
|
|
if u == nil {
|
|
|
|
|
panic(fmt.Errorf("u is nil, v = %s, s.values[v.ID] = %v", v.LongString(), s.values[v.ID]))
|
|
|
|
|
}
|
2016-04-10 08:26:43 -07:00
|
|
|
d := u.dist
|
|
|
|
|
for u != nil && u.dist == d {
|
|
|
|
|
u = u.next
|
|
|
|
|
}
|
|
|
|
|
return u != nil && u.dist > d
|
|
|
|
|
}
|
|
|
|
|
|
2015-12-17 10:01:24 -08:00
|
|
|
// Sets the state of the registers to that encoded in regs.
|
|
|
|
|
func (s *regAllocState) setState(regs []endReg) {
|
cmd/compile: reimplement location list generation
Completely redesign and reimplement location list generation to be more
efficient, and hopefully not too hard to understand.
RegKills are gone. Instead of using the regalloc's liveness
calculations, redo them using the Ops' clobber information. Besides
saving a lot of Values, this avoids adding RegKills to blocks that would
be empty otherwise, which was messing up optimizations. This does mean
that it's much harder to tell whether the generation process is buggy
(there's nothing to cross-check it with), and there may be disagreements
with GC liveness. But the performance gain is significant, and it's nice
not to be messing with earlier compiler phases.
The intermediate representations are gone. Instead of producing
ssa.BlockDebugs, then dwarf.LocationLists, and then finally real
location lists, go directly from the SSA to a (mostly) real location
list. Because the SSA analysis happens before assembly, it stores
encoded block/value IDs where PCs would normally go. It would be easier
to do the SSA analysis after assembly, but I didn't want to retain the
SSA just for that.
Generation proceeds in two phases: first, it traverses the function in
CFG order, storing the state of the block at the beginning and end. End
states are used to produce the start states of the successor blocks. In
the second phase, it traverses in program text order and produces the
location lists. The processing in the second phase is redundant, but
much cheaper than storing the intermediate representation. It might be
possible to combine the two phases somewhat to take advantage of cases
where the CFG matches the block layout, but I haven't tried.
Location lists are finalized by adding a base address selection entry,
translating each encoded block/value ID to a real PC, and adding the
terminating zero entry. This probably won't work on OSX, where dsymutil
will choke on the base address selection. I tried emitting CU-relative
relocations for each address, and it was *very* bad for performance --
it uses more memory storing all the relocations than it does for the
actual location list bytes. I think I'm going to end up synthesizing the
relocations in the linker only on OSX, but TBD.
TestNexting needs updating: with more optimizations working, the
debugger doesn't stop on the continue (line 88) any more, and the test's
duplicate suppression kicks in. Also, dx and dy live a little longer
now, but they have the correct values.
Change-Id: Ie772dfe23a4e389ca573624fac4d05401ae32307
Reviewed-on: https://go-review.googlesource.com/89356
Run-TryBot: Heschi Kreinick <heschi@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2017-10-26 15:40:17 -04:00
|
|
|
s.freeRegs(s.used)
|
2015-12-17 10:01:24 -08:00
|
|
|
for _, x := range regs {
|
|
|
|
|
s.assignReg(x.r, x.v, x.c)
|
2015-08-11 12:51:33 -07:00
|
|
|
}
|
|
|
|
|
}
|
2015-05-18 16:44:20 -07:00
|
|
|
|
2015-12-17 10:01:24 -08:00
|
|
|
// compatRegs returns the set of registers which can store a type t.
|
cmd/compile: change ssa.Type into *types.Type
When package ssa was created, Type was in package gc.
To avoid circular dependencies, we used an interface (ssa.Type)
to represent type information in SSA.
In the Go 1.9 cycle, gri extricated the Type type from package gc.
As a result, we can now use it in package ssa.
Now, instead of package types depending on package ssa,
it is the other way.
This is a more sensible dependency tree,
and helps compiler performance a bit.
Though this is a big CL, most of the changes are
mechanical and uninteresting.
Interesting bits:
* Add new singleton globals to package types for the special
SSA types Memory, Void, Invalid, Flags, and Int128.
* Add two new Types, TSSA for the special types,
and TTUPLE, for SSA tuple types.
ssa.MakeTuple is now types.NewTuple.
* Move type comparison result constants CMPlt, CMPeq, and CMPgt
to package types.
* We had picked the name "types" in our rules for the handy
list of types provided by ssa.Config. That conflicted with
the types package name, so change it to "typ".
* Update the type comparison routine to handle tuples and special
types inline.
* Teach gc/fmt.go how to print special types.
* We can now eliminate ElemTypes in favor of just Elem,
and probably also some other duplicated Type methods
designed to return ssa.Type instead of *types.Type.
* The ssa tests were using their own dummy types,
and they were not particularly careful about types in general.
Of necessity, this CL switches them to use *types.Type;
it does not make them more type-accurate.
Unfortunately, using types.Type means initializing a bit
of the types universe.
This is prime for refactoring and improvement.
This shrinks ssa.Value; it now fits in a smaller size class
on 64 bit systems. This doesn't have a giant impact,
though, since most Values are preallocated in a chunk.
name old alloc/op new alloc/op delta
Template 37.9MB ± 0% 37.7MB ± 0% -0.57% (p=0.000 n=10+8)
Unicode 28.9MB ± 0% 28.7MB ± 0% -0.52% (p=0.000 n=10+10)
GoTypes 110MB ± 0% 109MB ± 0% -0.88% (p=0.000 n=10+10)
Flate 24.7MB ± 0% 24.6MB ± 0% -0.66% (p=0.000 n=10+10)
GoParser 31.1MB ± 0% 30.9MB ± 0% -0.61% (p=0.000 n=10+9)
Reflect 73.9MB ± 0% 73.4MB ± 0% -0.62% (p=0.000 n=10+8)
Tar 25.8MB ± 0% 25.6MB ± 0% -0.77% (p=0.000 n=9+10)
XML 41.2MB ± 0% 40.9MB ± 0% -0.80% (p=0.000 n=10+10)
[Geo mean] 40.5MB 40.3MB -0.68%
name old allocs/op new allocs/op delta
Template 385k ± 0% 386k ± 0% ~ (p=0.356 n=10+9)
Unicode 343k ± 1% 344k ± 0% ~ (p=0.481 n=10+10)
GoTypes 1.16M ± 0% 1.16M ± 0% -0.16% (p=0.004 n=10+10)
Flate 238k ± 1% 238k ± 1% ~ (p=0.853 n=10+10)
GoParser 320k ± 0% 320k ± 0% ~ (p=0.720 n=10+9)
Reflect 957k ± 0% 957k ± 0% ~ (p=0.460 n=10+8)
Tar 252k ± 0% 252k ± 0% ~ (p=0.133 n=9+10)
XML 400k ± 0% 400k ± 0% ~ (p=0.796 n=10+10)
[Geo mean] 428k 428k -0.01%
Removing all the interface calls helps non-trivially with CPU, though.
name old time/op new time/op delta
Template 178ms ± 4% 173ms ± 3% -2.90% (p=0.000 n=94+96)
Unicode 85.0ms ± 4% 83.9ms ± 4% -1.23% (p=0.000 n=96+96)
GoTypes 543ms ± 3% 528ms ± 3% -2.73% (p=0.000 n=98+96)
Flate 116ms ± 3% 113ms ± 4% -2.34% (p=0.000 n=96+99)
GoParser 144ms ± 3% 140ms ± 4% -2.80% (p=0.000 n=99+97)
Reflect 344ms ± 3% 334ms ± 4% -3.02% (p=0.000 n=100+99)
Tar 106ms ± 5% 103ms ± 4% -3.30% (p=0.000 n=98+94)
XML 198ms ± 5% 192ms ± 4% -2.88% (p=0.000 n=92+95)
[Geo mean] 178ms 173ms -2.65%
name old user-time/op new user-time/op delta
Template 229ms ± 5% 224ms ± 5% -2.36% (p=0.000 n=95+99)
Unicode 107ms ± 6% 106ms ± 5% -1.13% (p=0.001 n=93+95)
GoTypes 696ms ± 4% 679ms ± 4% -2.45% (p=0.000 n=97+99)
Flate 137ms ± 4% 134ms ± 5% -2.66% (p=0.000 n=99+96)
GoParser 176ms ± 5% 172ms ± 8% -2.27% (p=0.000 n=98+100)
Reflect 430ms ± 6% 411ms ± 5% -4.46% (p=0.000 n=100+92)
Tar 128ms ±13% 123ms ±13% -4.21% (p=0.000 n=100+100)
XML 239ms ± 6% 233ms ± 6% -2.50% (p=0.000 n=95+97)
[Geo mean] 220ms 213ms -2.76%
Change-Id: I15c7d6268347f8358e75066dfdbd77db24e8d0c1
Reviewed-on: https://go-review.googlesource.com/42145
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2017-04-28 14:12:28 -07:00
|
|
|
func (s *regAllocState) compatRegs(t *types.Type) regMask {
|
2015-10-22 13:07:38 -07:00
|
|
|
var m regMask
|
2016-09-23 09:15:51 -04:00
|
|
|
if t.IsTuple() || t.IsFlags() {
|
|
|
|
|
return 0
|
|
|
|
|
}
|
2025-07-07 03:08:01 +00:00
|
|
|
if t.IsSIMD() {
|
|
|
|
|
if t.Size() > 8 {
|
|
|
|
|
return s.f.Config.fpRegMask & s.allocatable
|
|
|
|
|
} else {
|
|
|
|
|
// K mask
|
|
|
|
|
return s.f.Config.gpRegMask & s.allocatable
|
|
|
|
|
}
|
|
|
|
|
}
|
cmd/compile: change ssa.Type into *types.Type
When package ssa was created, Type was in package gc.
To avoid circular dependencies, we used an interface (ssa.Type)
to represent type information in SSA.
In the Go 1.9 cycle, gri extricated the Type type from package gc.
As a result, we can now use it in package ssa.
Now, instead of package types depending on package ssa,
it is the other way.
This is a more sensible dependency tree,
and helps compiler performance a bit.
Though this is a big CL, most of the changes are
mechanical and uninteresting.
Interesting bits:
* Add new singleton globals to package types for the special
SSA types Memory, Void, Invalid, Flags, and Int128.
* Add two new Types, TSSA for the special types,
and TTUPLE, for SSA tuple types.
ssa.MakeTuple is now types.NewTuple.
* Move type comparison result constants CMPlt, CMPeq, and CMPgt
to package types.
* We had picked the name "types" in our rules for the handy
list of types provided by ssa.Config. That conflicted with
the types package name, so change it to "typ".
* Update the type comparison routine to handle tuples and special
types inline.
* Teach gc/fmt.go how to print special types.
* We can now eliminate ElemTypes in favor of just Elem,
and probably also some other duplicated Type methods
designed to return ssa.Type instead of *types.Type.
* The ssa tests were using their own dummy types,
and they were not particularly careful about types in general.
Of necessity, this CL switches them to use *types.Type;
it does not make them more type-accurate.
Unfortunately, using types.Type means initializing a bit
of the types universe.
This is prime for refactoring and improvement.
This shrinks ssa.Value; it now fits in a smaller size class
on 64 bit systems. This doesn't have a giant impact,
though, since most Values are preallocated in a chunk.
name old alloc/op new alloc/op delta
Template 37.9MB ± 0% 37.7MB ± 0% -0.57% (p=0.000 n=10+8)
Unicode 28.9MB ± 0% 28.7MB ± 0% -0.52% (p=0.000 n=10+10)
GoTypes 110MB ± 0% 109MB ± 0% -0.88% (p=0.000 n=10+10)
Flate 24.7MB ± 0% 24.6MB ± 0% -0.66% (p=0.000 n=10+10)
GoParser 31.1MB ± 0% 30.9MB ± 0% -0.61% (p=0.000 n=10+9)
Reflect 73.9MB ± 0% 73.4MB ± 0% -0.62% (p=0.000 n=10+8)
Tar 25.8MB ± 0% 25.6MB ± 0% -0.77% (p=0.000 n=9+10)
XML 41.2MB ± 0% 40.9MB ± 0% -0.80% (p=0.000 n=10+10)
[Geo mean] 40.5MB 40.3MB -0.68%
name old allocs/op new allocs/op delta
Template 385k ± 0% 386k ± 0% ~ (p=0.356 n=10+9)
Unicode 343k ± 1% 344k ± 0% ~ (p=0.481 n=10+10)
GoTypes 1.16M ± 0% 1.16M ± 0% -0.16% (p=0.004 n=10+10)
Flate 238k ± 1% 238k ± 1% ~ (p=0.853 n=10+10)
GoParser 320k ± 0% 320k ± 0% ~ (p=0.720 n=10+9)
Reflect 957k ± 0% 957k ± 0% ~ (p=0.460 n=10+8)
Tar 252k ± 0% 252k ± 0% ~ (p=0.133 n=9+10)
XML 400k ± 0% 400k ± 0% ~ (p=0.796 n=10+10)
[Geo mean] 428k 428k -0.01%
Removing all the interface calls helps non-trivially with CPU, though.
name old time/op new time/op delta
Template 178ms ± 4% 173ms ± 3% -2.90% (p=0.000 n=94+96)
Unicode 85.0ms ± 4% 83.9ms ± 4% -1.23% (p=0.000 n=96+96)
GoTypes 543ms ± 3% 528ms ± 3% -2.73% (p=0.000 n=98+96)
Flate 116ms ± 3% 113ms ± 4% -2.34% (p=0.000 n=96+99)
GoParser 144ms ± 3% 140ms ± 4% -2.80% (p=0.000 n=99+97)
Reflect 344ms ± 3% 334ms ± 4% -3.02% (p=0.000 n=100+99)
Tar 106ms ± 5% 103ms ± 4% -3.30% (p=0.000 n=98+94)
XML 198ms ± 5% 192ms ± 4% -2.88% (p=0.000 n=92+95)
[Geo mean] 178ms 173ms -2.65%
name old user-time/op new user-time/op delta
Template 229ms ± 5% 224ms ± 5% -2.36% (p=0.000 n=95+99)
Unicode 107ms ± 6% 106ms ± 5% -1.13% (p=0.001 n=93+95)
GoTypes 696ms ± 4% 679ms ± 4% -2.45% (p=0.000 n=97+99)
Flate 137ms ± 4% 134ms ± 5% -2.66% (p=0.000 n=99+96)
GoParser 176ms ± 5% 172ms ± 8% -2.27% (p=0.000 n=98+100)
Reflect 430ms ± 6% 411ms ± 5% -4.46% (p=0.000 n=100+92)
Tar 128ms ±13% 123ms ±13% -4.21% (p=0.000 n=100+100)
XML 239ms ± 6% 233ms ± 6% -2.50% (p=0.000 n=95+97)
[Geo mean] 220ms 213ms -2.76%
Change-Id: I15c7d6268347f8358e75066dfdbd77db24e8d0c1
Reviewed-on: https://go-review.googlesource.com/42145
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2017-04-28 14:12:28 -07:00
|
|
|
if t.IsFloat() || t == types.TypeInt128 {
|
2020-12-01 03:25:29 -08:00
|
|
|
if t.Kind() == types.TFLOAT32 && s.f.Config.fp32RegMask != 0 {
|
2019-09-12 21:05:45 +02:00
|
|
|
m = s.f.Config.fp32RegMask
|
2020-12-01 03:25:29 -08:00
|
|
|
} else if t.Kind() == types.TFLOAT64 && s.f.Config.fp64RegMask != 0 {
|
2019-09-12 21:05:45 +02:00
|
|
|
m = s.f.Config.fp64RegMask
|
|
|
|
|
} else {
|
|
|
|
|
m = s.f.Config.fpRegMask
|
|
|
|
|
}
|
2015-10-22 13:07:38 -07:00
|
|
|
} else {
|
2016-05-19 12:33:30 -04:00
|
|
|
m = s.f.Config.gpRegMask
|
2015-08-25 22:49:59 -05:00
|
|
|
}
|
2016-04-15 12:49:30 -07:00
|
|
|
return m & s.allocatable
|
2015-08-25 22:49:59 -05:00
|
|
|
}
|
|
|
|
|
|
cmd/compile: don't lower OpConvert
Currently, each architecture lowers OpConvert to an arch-specific
OpXXXconvert. This is silly because OpConvert means the same thing on
all architectures and is logically a no-op that exists only to keep
track of conversions to and from unsafe.Pointer. Furthermore, lowering
it makes it harder to recognize in other analyses, particularly
liveness analysis.
This CL eliminates the lowering of OpConvert, leaving it as the
generic op until code generation time.
The main complexity here is that we still need to register-allocate
OpConvert operations. Currently, each arch's lowered OpConvert
specifies all GP registers in its register mask. Ideally, OpConvert
wouldn't affect value homing at all, and we could just copy the home
of OpConvert's source, but this can potentially home an OpConvert in a
LocalSlot, which neither regalloc nor stackalloc expect. Rather than
try to disentangle this assumption from regalloc and stackalloc, we
continue to register-allocate OpConvert, but teach regalloc that
OpConvert can be allocated to any allocatable GP register.
For #24543.
Change-Id: I795a6aee5fd94d4444a7bafac3838a400c9f7bb6
Reviewed-on: https://go-review.googlesource.com/108496
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2018-04-02 16:08:09 -04:00
|
|
|
// regspec returns the regInfo for operation op.
|
2021-02-13 10:49:37 -05:00
|
|
|
func (s *regAllocState) regspec(v *Value) regInfo {
|
|
|
|
|
op := v.Op
|
cmd/compile: don't lower OpConvert
Currently, each architecture lowers OpConvert to an arch-specific
OpXXXconvert. This is silly because OpConvert means the same thing on
all architectures and is logically a no-op that exists only to keep
track of conversions to and from unsafe.Pointer. Furthermore, lowering
it makes it harder to recognize in other analyses, particularly
liveness analysis.
This CL eliminates the lowering of OpConvert, leaving it as the
generic op until code generation time.
The main complexity here is that we still need to register-allocate
OpConvert operations. Currently, each arch's lowered OpConvert
specifies all GP registers in its register mask. Ideally, OpConvert
wouldn't affect value homing at all, and we could just copy the home
of OpConvert's source, but this can potentially home an OpConvert in a
LocalSlot, which neither regalloc nor stackalloc expect. Rather than
try to disentangle this assumption from regalloc and stackalloc, we
continue to register-allocate OpConvert, but teach regalloc that
OpConvert can be allocated to any allocatable GP register.
For #24543.
Change-Id: I795a6aee5fd94d4444a7bafac3838a400c9f7bb6
Reviewed-on: https://go-review.googlesource.com/108496
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2018-04-02 16:08:09 -04:00
|
|
|
if op == OpConvert {
|
|
|
|
|
// OpConvert is a generic op, so it doesn't have a
|
|
|
|
|
// register set in the static table. It can use any
|
|
|
|
|
// allocatable integer register.
|
|
|
|
|
m := s.allocatable & s.f.Config.gpRegMask
|
|
|
|
|
return regInfo{inputs: []inputInfo{{regs: m}}, outputs: []outputInfo{{regs: m}}}
|
|
|
|
|
}
|
2021-02-13 10:49:37 -05:00
|
|
|
if op == OpArgIntReg {
|
|
|
|
|
reg := v.Block.Func.Config.intParamRegs[v.AuxInt8()]
|
|
|
|
|
return regInfo{outputs: []outputInfo{{regs: 1 << uint(reg)}}}
|
|
|
|
|
}
|
|
|
|
|
if op == OpArgFloatReg {
|
|
|
|
|
reg := v.Block.Func.Config.floatParamRegs[v.AuxInt8()]
|
|
|
|
|
return regInfo{outputs: []outputInfo{{regs: 1 << uint(reg)}}}
|
|
|
|
|
}
|
|
|
|
|
if op.IsCall() {
|
|
|
|
|
if ac, ok := v.Aux.(*AuxCall); ok && ac.reg != nil {
|
2021-02-18 15:50:37 -05:00
|
|
|
return *ac.Reg(&opcodeTable[op].reg, s.f.Config)
|
2021-02-13 10:49:37 -05:00
|
|
|
}
|
|
|
|
|
}
|
2021-02-22 21:51:35 -05:00
|
|
|
if op == OpMakeResult && s.f.OwnAux.reg != nil {
|
|
|
|
|
return *s.f.OwnAux.ResultReg(s.f.Config)
|
|
|
|
|
}
|
cmd/compile: don't lower OpConvert
Currently, each architecture lowers OpConvert to an arch-specific
OpXXXconvert. This is silly because OpConvert means the same thing on
all architectures and is logically a no-op that exists only to keep
track of conversions to and from unsafe.Pointer. Furthermore, lowering
it makes it harder to recognize in other analyses, particularly
liveness analysis.
This CL eliminates the lowering of OpConvert, leaving it as the
generic op until code generation time.
The main complexity here is that we still need to register-allocate
OpConvert operations. Currently, each arch's lowered OpConvert
specifies all GP registers in its register mask. Ideally, OpConvert
wouldn't affect value homing at all, and we could just copy the home
of OpConvert's source, but this can potentially home an OpConvert in a
LocalSlot, which neither regalloc nor stackalloc expect. Rather than
try to disentangle this assumption from regalloc and stackalloc, we
continue to register-allocate OpConvert, but teach regalloc that
OpConvert can be allocated to any allocatable GP register.
For #24543.
Change-Id: I795a6aee5fd94d4444a7bafac3838a400c9f7bb6
Reviewed-on: https://go-review.googlesource.com/108496
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2018-04-02 16:08:09 -04:00
|
|
|
return opcodeTable[op].reg
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-25 16:08:13 -04:00
|
|
|
func (s *regAllocState) isGReg(r register) bool {
|
|
|
|
|
return s.f.Config.hasGReg && s.GReg == r
|
|
|
|
|
}
|
|
|
|
|
|
2022-04-05 15:07:29 -07:00
|
|
|
// Dummy value used to represent the value being held in a temporary register.
|
|
|
|
|
var tmpVal Value
|
|
|
|
|
|
2015-08-11 12:51:33 -07:00
|
|
|
func (s *regAllocState) regalloc(f *Func) {
|
2017-03-22 21:34:12 -04:00
|
|
|
regValLiveSet := f.newSparseSet(f.NumValues()) // set of values that may be live in register
|
|
|
|
|
defer f.retSparseSet(regValLiveSet)
|
2015-08-11 12:51:33 -07:00
|
|
|
var oldSched []*Value
|
|
|
|
|
var phis []*Value
|
2015-11-05 14:59:47 -08:00
|
|
|
var phiRegs []register
|
|
|
|
|
var args []*Value
|
2015-08-11 12:51:33 -07:00
|
|
|
|
2016-04-15 12:49:30 -07:00
|
|
|
// Data structure used for computing desired registers.
|
|
|
|
|
var desired desiredState
|
2024-11-23 10:58:47 -08:00
|
|
|
desiredSecondReg := map[ID][4]register{} // desired register allocation for 2nd part of a tuple
|
2016-04-15 12:49:30 -07:00
|
|
|
|
|
|
|
|
// Desired registers for inputs & outputs for each instruction in the block.
|
|
|
|
|
type dentry struct {
|
|
|
|
|
out [4]register // desired output registers
|
|
|
|
|
in [3][4]register // desired input registers (for inputs 0,1, and 2)
|
|
|
|
|
}
|
|
|
|
|
var dinfo []dentry
|
|
|
|
|
|
2015-08-11 12:51:33 -07:00
|
|
|
if f.Entry != f.Blocks[0] {
|
|
|
|
|
f.Fatalf("entry block must be first")
|
|
|
|
|
}
|
|
|
|
|
|
2017-06-30 16:20:10 -04:00
|
|
|
for _, b := range s.visitOrder {
|
[dev.debug] cmd/compile: better DWARF with optimizations on
Debuggers use DWARF information to find local variables on the
stack and in registers. Prior to this CL, the DWARF information for
functions claimed that all variables were on the stack at all times.
That's incorrect when optimizations are enabled, and results in
debuggers showing data that is out of date or complete gibberish.
After this CL, the compiler is capable of representing variable
locations more accurately, and attempts to do so. Due to limitations of
the SSA backend, it's not possible to be completely correct.
There are a number of problems in the current design. One of the easier
to understand is that variable names currently must be attached to an
SSA value, but not all assignments in the source code actually result
in machine code. For example:
type myint int
var a int
b := myint(int)
and
b := (*uint64)(unsafe.Pointer(a))
don't generate machine code because the underlying representation is the
same, so the correct value of b will not be set when the user would
expect.
Generating the more precise debug information is behind a flag,
dwarflocationlists. Because of the issues described above, setting the
flag may not make the debugging experience much better, and may actually
make it worse in cases where the variable actually is on the stack and
the more complicated analysis doesn't realize it.
A number of changes are included:
- Add a new pseudo-instruction, RegKill, which indicates that the value
in the register has been clobbered.
- Adjust regalloc to emit RegKills in the right places. Significantly,
this means that phis are mixed with StoreReg and RegKills after
regalloc.
- Track variable decomposition in ssa.LocalSlots.
- After the SSA backend is done, analyze the result and build location
lists for each LocalSlot.
- After assembly is done, update the location lists with the assembled
PC offsets, recompose variables, and build DWARF location lists. Emit the
list as a new linker symbol, one per function.
- In the linker, aggregate the location lists into a .debug_loc section.
TODO:
- currently disabled for non-X86/AMD64 because there are no data tables.
go build -toolexec 'toolstash -cmp' -a std succeeds.
With -dwarflocationlists false:
before: f02812195637909ff675782c0b46836a8ff01976
after: 06f61e8112a42ac34fb80e0c818b3cdb84a5e7ec
benchstat -geomean /tmp/220352263 /tmp/621364410
completed 15 of 15, estimated time remaining 0s (eta 3:52PM)
name old time/op new time/op delta
Template 199ms ± 3% 198ms ± 2% ~ (p=0.400 n=15+14)
Unicode 96.6ms ± 5% 96.4ms ± 5% ~ (p=0.838 n=15+15)
GoTypes 653ms ± 2% 647ms ± 2% ~ (p=0.102 n=15+14)
Flate 133ms ± 6% 129ms ± 3% -2.62% (p=0.041 n=15+15)
GoParser 164ms ± 5% 159ms ± 3% -3.05% (p=0.000 n=15+15)
Reflect 428ms ± 4% 422ms ± 3% ~ (p=0.156 n=15+13)
Tar 123ms ±10% 124ms ± 8% ~ (p=0.461 n=15+15)
XML 228ms ± 3% 224ms ± 3% -1.57% (p=0.045 n=15+15)
[Geo mean] 206ms 377ms +82.86%
name old user-time/op new user-time/op delta
Template 292ms ±10% 301ms ±12% ~ (p=0.189 n=15+15)
Unicode 166ms ±37% 158ms ±14% ~ (p=0.418 n=15+14)
GoTypes 962ms ± 6% 963ms ± 7% ~ (p=0.976 n=15+15)
Flate 207ms ±19% 200ms ±14% ~ (p=0.345 n=14+15)
GoParser 246ms ±22% 240ms ±15% ~ (p=0.587 n=15+15)
Reflect 611ms ±13% 587ms ±14% ~ (p=0.085 n=15+13)
Tar 211ms ±12% 217ms ±14% ~ (p=0.355 n=14+15)
XML 335ms ±15% 320ms ±18% ~ (p=0.169 n=15+15)
[Geo mean] 317ms 583ms +83.72%
name old alloc/op new alloc/op delta
Template 40.2MB ± 0% 40.2MB ± 0% -0.15% (p=0.000 n=14+15)
Unicode 29.2MB ± 0% 29.3MB ± 0% ~ (p=0.624 n=15+15)
GoTypes 114MB ± 0% 114MB ± 0% -0.15% (p=0.000 n=15+14)
Flate 25.7MB ± 0% 25.6MB ± 0% -0.18% (p=0.000 n=13+15)
GoParser 32.2MB ± 0% 32.2MB ± 0% -0.14% (p=0.003 n=15+15)
Reflect 77.8MB ± 0% 77.9MB ± 0% ~ (p=0.061 n=15+15)
Tar 27.1MB ± 0% 27.0MB ± 0% -0.11% (p=0.029 n=15+15)
XML 42.7MB ± 0% 42.5MB ± 0% -0.29% (p=0.000 n=15+15)
[Geo mean] 42.1MB 75.0MB +78.05%
name old allocs/op new allocs/op delta
Template 402k ± 1% 398k ± 0% -0.91% (p=0.000 n=15+15)
Unicode 344k ± 1% 344k ± 0% ~ (p=0.715 n=15+14)
GoTypes 1.18M ± 0% 1.17M ± 0% -0.91% (p=0.000 n=15+14)
Flate 243k ± 0% 240k ± 1% -1.05% (p=0.000 n=13+15)
GoParser 327k ± 1% 324k ± 1% -0.96% (p=0.000 n=15+15)
Reflect 984k ± 1% 982k ± 0% ~ (p=0.050 n=15+15)
Tar 261k ± 1% 259k ± 1% -0.77% (p=0.000 n=15+15)
XML 411k ± 0% 404k ± 1% -1.55% (p=0.000 n=15+15)
[Geo mean] 439k 755k +72.01%
name old text-bytes new text-bytes delta
HelloSize 694kB ± 0% 694kB ± 0% -0.00% (p=0.000 n=15+15)
name old data-bytes new data-bytes delta
HelloSize 5.55kB ± 0% 5.55kB ± 0% ~ (all equal)
name old bss-bytes new bss-bytes delta
HelloSize 133kB ± 0% 133kB ± 0% ~ (all equal)
name old exe-bytes new exe-bytes delta
HelloSize 1.04MB ± 0% 1.04MB ± 0% ~ (all equal)
Change-Id: I991fc553ef175db46bb23b2128317bbd48de70d8
Reviewed-on: https://go-review.googlesource.com/41770
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2017-07-21 18:30:19 -04:00
|
|
|
if s.f.pass.debug > regDebug {
|
|
|
|
|
fmt.Printf("Begin processing block %v\n", b)
|
|
|
|
|
}
|
2015-08-11 12:51:33 -07:00
|
|
|
s.curBlock = b
|
cmd/compile/internal/ssa: drop overwritten regalloc basic block input requirements
For the following description, consider the following basic block graph:
b1 ───┐┌──── b2
││
││
▼▼
b3
For register allocator transitions between basic blocks, there are two
key passes (significant paraphrasing):
First, each basic block is visited in some predetermined visit order.
This is the core visitOrder range loop in regAllocState.regalloc. The
specific ordering heuristics aren't important here, except that the
order guarantees that when visiting a basic block at least one of its
predecessors has already been visited.
Upon visiting a basic block, that block sets its expected starting
register state (regAllocState.startRegs) based on the ending register
state (regAlloc.State.endRegs) of one of its predecessors. (How it
chooses which predecessor to use is not important here.)
From that starting state, registers are assigned for all values in the
block, ultimately resulting in some ending register state.
After all blocks have been visited, the shuffle pass
(regAllocState.shuffle) ensures that for each edge, endRegs of the
predecessor == startRegs of the successor. That is, it makes sure that
the startRegs assumptions actually hold true for each edge. It does this
by adding moves to the end of the predecessor block to place values in
the expected register for the successor block. These may be moves from
other registers, or from memory if the value is spilled.
Now on to the actual problem:
Assume that b1 places some value v1 into register R10, and thus ends
with endRegs containing R10 = v1.
When b3 is visited, it selects b1 as its model predecessor and sets
startRegs with R10 = v1.
b2 does not have v1 in R10, so later in the shuffle pass, we will add a
move of v1 into R10 to the end of b2 to ensure it is available for b3.
This is all perfectly fine and exactly how things should work.
Now suppose that b3 does not use v1. It does need to use some other
value v2, which is not currently in a register. When assigning v2 to a
register, it finds all registers are already in use and it needs to dump
a value. Ultimately, it decides to dump v1 from R10 and replace it with
v2.
This is fine, but it has downstream effects on shuffle in b2. b3's
startRegs still state that R10 = v1, so b2 will add a move to R10 even
though b3 will unconditionally overwrite it. i.e., the move at the end
of b2 is completely useless and can result in code like:
// end of b2
MOV n(SP), R10 // R10 = v1 <-- useless
// start of b3
MOV m(SP), R10 // R10 = v2
This is precisely what happened in #58298.
This CL addresses this problem by dropping registers from startRegs if
they are never used in the basic block prior to getting dumped. This
allows the shuffle pass to avoid placing those useless values into the
register.
There is a significant limitation to this CL, which is that it only
impacts the immediate predecessors of an overwriting block. We can
discuss this by zooming out a bit on the previous graph:
b4 ───┐┌──── b5
││
││
▼▼
b1 ───┐┌──── b2
││
││
▼▼
b3
Here we have the same graph, except we can see the two predecessors of
b1.
Now suppose that rather than b1 assigning R10 = v1 as above, the
assignment is done in b4. b1 has startRegs R10 = v1, doesn't use the
value at all, and simply passes it through to endRegs R10 = v1.
Now the shuffle pass will require both b2 and b5 to add a move to
assigned R10 = v1, because that is specified in their successor
startRegs.
With this CL, b3 drops R10 = v1 from startRegs, but there is no
backwards propagation, so b1 still has R10 = v1 in startRegs, and b5
still needs to add a useless move.
Extending this CL with such propagation may significantly increase the
number of useless moves we can remove, though it will add complexity to
maintenance and could potentially impact build performance depending on
how efficiently we could implement the propagation (something I haven't
considered carefully).
As-is, this optimization does not impact much code. In bent .text size
geomean is -0.02%. In the container/heap test binary, 18 of ~2500
functions are impacted by this CL. Bent and sweet do not show a
noticeable performance impact one way or another, however #58298 does
show a case where this can have impact if the useless instructions end
up in the hot path of a tight loop.
For #58298.
Change-Id: I2fcef37c955159d068fa0725f995a1848add8a5f
Reviewed-on: https://go-review.googlesource.com/c/go/+/471158
Run-TryBot: Michael Pratt <mpratt@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
Reviewed-by: David Chase <drchase@google.com>
2023-02-21 13:20:49 -05:00
|
|
|
s.startRegsMask = 0
|
|
|
|
|
s.usedSinceBlockStart = 0
|
2024-11-23 10:58:47 -08:00
|
|
|
clear(desiredSecondReg)
|
2015-05-05 16:19:12 -07:00
|
|
|
|
2017-03-22 21:34:12 -04:00
|
|
|
// Initialize regValLiveSet and uses fields for this block.
|
2015-11-05 14:59:47 -08:00
|
|
|
// Walk backwards through the block doing liveness analysis.
|
2017-03-22 21:34:12 -04:00
|
|
|
regValLiveSet.clear()
|
2015-11-05 14:59:47 -08:00
|
|
|
for _, e := range s.live[b.ID] {
|
2016-12-08 13:49:51 -08:00
|
|
|
s.addUse(e.ID, int32(len(b.Values))+e.dist, e.pos) // pseudo-uses from beyond end of block
|
2017-03-22 21:34:12 -04:00
|
|
|
regValLiveSet.add(e.ID)
|
2015-11-05 14:59:47 -08:00
|
|
|
}
|
2019-08-12 20:19:58 +01:00
|
|
|
for _, v := range b.ControlValues() {
|
|
|
|
|
if s.values[v.ID].needReg {
|
|
|
|
|
s.addUse(v.ID, int32(len(b.Values)), b.Pos) // pseudo-use by control values
|
|
|
|
|
regValLiveSet.add(v.ID)
|
|
|
|
|
}
|
2015-11-05 14:59:47 -08:00
|
|
|
}
|
2023-07-12 15:31:25 -07:00
|
|
|
if len(s.nextCall) < len(b.Values) {
|
|
|
|
|
s.nextCall = append(s.nextCall, make([]int32, len(b.Values)-len(s.nextCall))...)
|
|
|
|
|
}
|
|
|
|
|
var nextCall int32 = math.MaxInt32
|
2015-11-05 14:59:47 -08:00
|
|
|
for i := len(b.Values) - 1; i >= 0; i-- {
|
|
|
|
|
v := b.Values[i]
|
2017-03-22 21:34:12 -04:00
|
|
|
regValLiveSet.remove(v.ID)
|
2015-11-05 14:59:47 -08:00
|
|
|
if v.Op == OpPhi {
|
2015-12-17 10:01:24 -08:00
|
|
|
// Remove v from the live set, but don't add
|
2016-03-01 23:21:55 +00:00
|
|
|
// any inputs. This is the state the len(b.Preds)>1
|
2015-12-17 10:01:24 -08:00
|
|
|
// case below desires; it wants to process phis specially.
|
2023-07-12 15:31:25 -07:00
|
|
|
s.nextCall[i] = nextCall
|
2015-12-17 10:01:24 -08:00
|
|
|
continue
|
2015-11-05 14:59:47 -08:00
|
|
|
}
|
2017-03-22 21:34:12 -04:00
|
|
|
if opcodeTable[v.Op].call {
|
|
|
|
|
// Function call clobbers all the registers but SP and SB.
|
|
|
|
|
regValLiveSet.clear()
|
|
|
|
|
if s.sp != 0 && s.values[s.sp].uses != nil {
|
|
|
|
|
regValLiveSet.add(s.sp)
|
|
|
|
|
}
|
|
|
|
|
if s.sb != 0 && s.values[s.sb].uses != nil {
|
|
|
|
|
regValLiveSet.add(s.sb)
|
|
|
|
|
}
|
2023-07-12 15:31:25 -07:00
|
|
|
nextCall = int32(i)
|
2017-03-22 21:34:12 -04:00
|
|
|
}
|
2015-11-05 14:59:47 -08:00
|
|
|
for _, a := range v.Args {
|
2015-12-17 10:01:24 -08:00
|
|
|
if !s.values[a.ID].needReg {
|
2015-11-05 14:59:47 -08:00
|
|
|
continue
|
|
|
|
|
}
|
2016-12-07 18:14:35 -08:00
|
|
|
s.addUse(a.ID, int32(i), v.Pos)
|
2017-03-22 21:34:12 -04:00
|
|
|
regValLiveSet.add(a.ID)
|
2015-11-05 14:59:47 -08:00
|
|
|
}
|
2023-07-12 15:31:25 -07:00
|
|
|
s.nextCall[i] = nextCall
|
2015-11-05 14:59:47 -08:00
|
|
|
}
|
2016-03-10 17:52:57 -06:00
|
|
|
if s.f.pass.debug > regDebug {
|
2018-04-17 09:09:07 -07:00
|
|
|
fmt.Printf("use distances for %s\n", b)
|
2015-11-05 14:59:47 -08:00
|
|
|
for i := range s.values {
|
|
|
|
|
vi := &s.values[i]
|
|
|
|
|
u := vi.uses
|
|
|
|
|
if u == nil {
|
|
|
|
|
continue
|
|
|
|
|
}
|
2015-12-17 10:01:24 -08:00
|
|
|
fmt.Printf(" v%d:", i)
|
2015-11-05 14:59:47 -08:00
|
|
|
for u != nil {
|
|
|
|
|
fmt.Printf(" %d", u.dist)
|
|
|
|
|
u = u.next
|
|
|
|
|
}
|
|
|
|
|
fmt.Println()
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-08-11 12:51:33 -07:00
|
|
|
// Make a copy of the block schedule so we can generate a new one in place.
|
|
|
|
|
// We make a separate copy for phis and regular values.
|
|
|
|
|
nphi := 0
|
|
|
|
|
for _, v := range b.Values {
|
|
|
|
|
if v.Op != OpPhi {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
nphi++
|
|
|
|
|
}
|
|
|
|
|
phis = append(phis[:0], b.Values[:nphi]...)
|
|
|
|
|
oldSched = append(oldSched[:0], b.Values[nphi:]...)
|
2015-05-05 16:19:12 -07:00
|
|
|
b.Values = b.Values[:0]
|
|
|
|
|
|
2015-08-11 12:51:33 -07:00
|
|
|
// Initialize start state of block.
|
|
|
|
|
if b == f.Entry {
|
|
|
|
|
// Regalloc state is empty to start.
|
|
|
|
|
if nphi > 0 {
|
|
|
|
|
f.Fatalf("phis in entry block")
|
2015-06-06 16:03:33 -07:00
|
|
|
}
|
2015-08-11 12:51:33 -07:00
|
|
|
} else if len(b.Preds) == 1 {
|
|
|
|
|
// Start regalloc state with the end state of the previous block.
|
2016-04-28 16:52:47 -07:00
|
|
|
s.setState(s.endRegs[b.Preds[0].b.ID])
|
2015-08-11 12:51:33 -07:00
|
|
|
if nphi > 0 {
|
|
|
|
|
f.Fatalf("phis in single-predecessor block")
|
|
|
|
|
}
|
2015-11-05 14:59:47 -08:00
|
|
|
// Drop any values which are no longer live.
|
|
|
|
|
// This may happen because at the end of p, a value may be
|
|
|
|
|
// live but only used by some other successor of p.
|
2016-03-21 22:57:26 -07:00
|
|
|
for r := register(0); r < s.numRegs; r++ {
|
2015-11-05 14:59:47 -08:00
|
|
|
v := s.regs[r].v
|
2017-03-22 21:34:12 -04:00
|
|
|
if v != nil && !regValLiveSet.contains(v.ID) {
|
2015-11-05 14:59:47 -08:00
|
|
|
s.freeReg(r)
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-08-11 12:51:33 -07:00
|
|
|
} else {
|
2016-03-01 23:21:55 +00:00
|
|
|
// This is the complicated case. We have more than one predecessor,
|
2015-08-11 12:51:33 -07:00
|
|
|
// which means we may have Phi ops.
|
|
|
|
|
|
cmd/compile: use depth first topological sort algorithm for layout
The current layout algorithm tries to put consecutive blocks together,
so the priority of the successor block is higher than the priority of
the zero indegree block. This algorithm is beneficial for subsequent
register allocation, but will result in more branch instructions.
The depth-first topological sorting algorithm is a well-known layout
algorithm, which has applications in many languages, and it helps to
reduce branch instructions. This CL applies it to the layout pass.
The test results show that it helps to reduce the code size.
This CL also includes the following changes:
1, Removed the primary predecessor mechanism. The new layout algorithm is
not very friendly to register allocator in some cases, in order to adapt
to the new layout algorithm, a new primary predecessor selection strategy
is introduced.
2, Since the new layout implementation may place non-loop blocks between
loop blocks, some adaptive modifications have also been made to looprotate
pass.
3, The layout also affects the results of codegen, so this CL also adjusted
several codegen tests accordingly.
It is inevitable that this CL will cause the code size or performance of a
few functions to decrease, but the number of cases it improves is much larger
than the number of cases it drops.
Statistical data from compilecmp on linux/amd64 is as follow:
name old time/op new time/op delta
Template 382ms ± 4% 382ms ± 4% ~ (p=0.497 n=49+50)
Unicode 170ms ± 9% 169ms ± 8% ~ (p=0.344 n=48+50)
GoTypes 2.01s ± 4% 2.01s ± 4% ~ (p=0.628 n=50+48)
Compiler 190ms ±10% 189ms ± 9% ~ (p=0.734 n=50+50)
SSA 11.8s ± 2% 11.8s ± 3% ~ (p=0.877 n=50+50)
Flate 241ms ± 9% 241ms ± 8% ~ (p=0.897 n=50+49)
GoParser 366ms ± 3% 361ms ± 4% -1.21% (p=0.004 n=47+50)
Reflect 835ms ± 3% 838ms ± 3% ~ (p=0.275 n=50+49)
Tar 336ms ± 4% 335ms ± 3% ~ (p=0.454 n=48+48)
XML 433ms ± 4% 431ms ± 3% ~ (p=0.071 n=49+48)
LinkCompiler 706ms ± 4% 705ms ± 4% ~ (p=0.608 n=50+49)
ExternalLinkCompiler 1.85s ± 3% 1.83s ± 2% -1.47% (p=0.000 n=49+48)
LinkWithoutDebugCompiler 437ms ± 5% 437ms ± 6% ~ (p=0.953 n=49+50)
[Geo mean] 615ms 613ms -0.37%
name old alloc/op new alloc/op delta
Template 38.7MB ± 1% 38.7MB ± 1% ~ (p=0.834 n=50+50)
Unicode 28.1MB ± 0% 28.1MB ± 0% -0.22% (p=0.000 n=49+50)
GoTypes 168MB ± 1% 168MB ± 1% ~ (p=0.054 n=47+47)
Compiler 23.0MB ± 1% 23.0MB ± 1% ~ (p=0.432 n=50+50)
SSA 1.54GB ± 0% 1.54GB ± 0% +0.21% (p=0.000 n=50+50)
Flate 23.6MB ± 1% 23.6MB ± 1% ~ (p=0.153 n=43+46)
GoParser 35.1MB ± 1% 35.1MB ± 2% ~ (p=0.202 n=50+50)
Reflect 84.7MB ± 1% 84.7MB ± 1% ~ (p=0.333 n=48+49)
Tar 34.5MB ± 1% 34.5MB ± 1% ~ (p=0.406 n=46+49)
XML 44.3MB ± 2% 44.2MB ± 3% ~ (p=0.981 n=50+50)
LinkCompiler 131MB ± 0% 128MB ± 0% -2.74% (p=0.000 n=50+50)
ExternalLinkCompiler 120MB ± 0% 120MB ± 0% +0.01% (p=0.007 n=50+50)
LinkWithoutDebugCompiler 77.3MB ± 0% 77.3MB ± 0% -0.02% (p=0.000 n=50+50)
[Geo mean] 69.3MB 69.1MB -0.22%
file before after Δ %
addr2line 4104220 4043684 -60536 -1.475%
api 5342502 5249678 -92824 -1.737%
asm 4973785 4858257 -115528 -2.323%
buildid 2667844 2625660 -42184 -1.581%
cgo 4686849 4616313 -70536 -1.505%
compile 23667431 23268406 -399025 -1.686%
cover 4959676 4874108 -85568 -1.725%
dist 3515934 3450422 -65512 -1.863%
doc 3995581 3925469 -70112 -1.755%
fix 3379202 3318522 -60680 -1.796%
link 6743249 6629913 -113336 -1.681%
nm 4047529 3991777 -55752 -1.377%
objdump 4456151 4388151 -68000 -1.526%
pack 2435040 2398072 -36968 -1.518%
pprof 13804080 13565808 -238272 -1.726%
test2json 2690043 2645987 -44056 -1.638%
trace 10418492 10232716 -185776 -1.783%
vet 7258259 7121259 -137000 -1.888%
total 113145867 111204202 -1941665 -1.716%
The situation on linux/arm64 is as follow:
name old time/op new time/op delta
Template 280ms ± 1% 282ms ± 1% +0.75% (p=0.000 n=46+48)
Unicode 124ms ± 2% 124ms ± 2% +0.37% (p=0.045 n=50+50)
GoTypes 1.69s ± 1% 1.70s ± 1% +0.56% (p=0.000 n=49+50)
Compiler 122ms ± 1% 123ms ± 1% +0.93% (p=0.000 n=50+50)
SSA 12.6s ± 1% 12.7s ± 0% +0.72% (p=0.000 n=50+50)
Flate 170ms ± 1% 172ms ± 1% +0.97% (p=0.000 n=49+49)
GoParser 262ms ± 1% 263ms ± 1% +0.39% (p=0.000 n=49+48)
Reflect 639ms ± 1% 650ms ± 1% +1.63% (p=0.000 n=49+49)
Tar 243ms ± 1% 245ms ± 1% +0.82% (p=0.000 n=50+50)
XML 324ms ± 1% 327ms ± 1% +0.72% (p=0.000 n=50+49)
LinkCompiler 597ms ± 1% 596ms ± 1% -0.27% (p=0.001 n=48+47)
ExternalLinkCompiler 1.90s ± 1% 1.88s ± 1% -1.00% (p=0.000 n=50+50)
LinkWithoutDebugCompiler 364ms ± 1% 363ms ± 1% ~ (p=0.220 n=49+50)
[Geo mean] 485ms 488ms +0.49%
name old alloc/op new alloc/op delta
Template 38.7MB ± 0% 38.8MB ± 1% ~ (p=0.093 n=43+49)
Unicode 28.4MB ± 0% 28.4MB ± 0% +0.03% (p=0.000 n=49+45)
GoTypes 169MB ± 1% 169MB ± 1% +0.23% (p=0.010 n=50+50)
Compiler 23.2MB ± 1% 23.2MB ± 1% +0.11% (p=0.000 n=40+44)
SSA 1.54GB ± 0% 1.55GB ± 0% +0.45% (p=0.000 n=47+49)
Flate 23.8MB ± 2% 23.8MB ± 1% ~ (p=0.543 n=50+50)
GoParser 35.3MB ± 1% 35.4MB ± 1% ~ (p=0.792 n=50+50)
Reflect 85.2MB ± 1% 85.2MB ± 0% ~ (p=0.055 n=50+47)
Tar 34.5MB ± 1% 34.5MB ± 1% +0.06% (p=0.015 n=50+50)
XML 43.8MB ± 2% 43.9MB ± 2% +0.19% (p=0.000 n=48+48)
LinkCompiler 137MB ± 0% 136MB ± 0% -0.92% (p=0.000 n=50+50)
ExternalLinkCompiler 127MB ± 0% 127MB ± 0% ~ (p=0.516 n=50+50)
LinkWithoutDebugCompiler 84.0MB ± 0% 84.0MB ± 0% ~ (p=0.057 n=50+50)
[Geo mean] 70.4MB 70.4MB +0.01%
file before after Δ %
addr2line 4021557 4002933 -18624 -0.463%
api 5127847 5028503 -99344 -1.937%
asm 5034716 4936836 -97880 -1.944%
buildid 2608118 2594094 -14024 -0.538%
cgo 4488592 4398320 -90272 -2.011%
compile 22501129 22213592 -287537 -1.278%
cover 4742301 4713573 -28728 -0.606%
dist 3388071 3365311 -22760 -0.672%
doc 3802250 3776082 -26168 -0.688%
fix 3306147 3216939 -89208 -2.698%
link 6404483 6363699 -40784 -0.637%
nm 3941026 3921930 -19096 -0.485%
objdump 4383330 4295122 -88208 -2.012%
pack 2404547 2389515 -15032 -0.625%
pprof 12996234 12856818 -139416 -1.073%
test2json 2668500 2586788 -81712 -3.062%
trace 9816276 9609580 -206696 -2.106%
vet 6900682 6787338 -113344 -1.643%
total 108535806 107056973 -1478833 -1.363%
Change-Id: Iaec1cdcaacca8025e9babb0fb8a532fddb70c87d
Reviewed-on: https://go-review.googlesource.com/c/go/+/255239
Reviewed-by: eric fang <eric.fang@arm.com>
Reviewed-by: Keith Randall <khr@golang.org>
Trust: eric fang <eric.fang@arm.com>
2020-07-23 10:24:56 +08:00
|
|
|
// Start with the final register state of the predecessor with least spill values.
|
|
|
|
|
// This is based on the following points:
|
|
|
|
|
// 1, The less spill value indicates that the register pressure of this path is smaller,
|
|
|
|
|
// so the values of this block are more likely to be allocated to registers.
|
|
|
|
|
// 2, Avoid the predecessor that contains the function call, because the predecessor that
|
|
|
|
|
// contains the function call usually generates a lot of spills and lose the previous
|
|
|
|
|
// allocation state.
|
|
|
|
|
// TODO: Improve this part. At least the size of endRegs of the predecessor also has
|
|
|
|
|
// an impact on the code size and compiler speed. But it is not easy to find a simple
|
|
|
|
|
// and efficient method that combines multiple factors.
|
|
|
|
|
idx := -1
|
|
|
|
|
for i, p := range b.Preds {
|
|
|
|
|
// If the predecessor has not been visited yet, skip it because its end state
|
|
|
|
|
// (redRegs and spillLive) has not been computed yet.
|
|
|
|
|
pb := p.b
|
|
|
|
|
if s.blockOrder[pb.ID] >= s.blockOrder[b.ID] {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
if idx == -1 {
|
|
|
|
|
idx = i
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
pSel := b.Preds[idx].b
|
|
|
|
|
if len(s.spillLive[pb.ID]) < len(s.spillLive[pSel.ID]) {
|
|
|
|
|
idx = i
|
|
|
|
|
} else if len(s.spillLive[pb.ID]) == len(s.spillLive[pSel.ID]) {
|
|
|
|
|
// Use a bit of likely information. After critical pass, pb and pSel must
|
|
|
|
|
// be plain blocks, so check edge pb->pb.Preds instead of edge pb->b.
|
|
|
|
|
// TODO: improve the prediction of the likely predecessor. The following
|
|
|
|
|
// method is only suitable for the simplest cases. For complex cases,
|
|
|
|
|
// the prediction may be inaccurate, but this does not affect the
|
|
|
|
|
// correctness of the program.
|
|
|
|
|
// According to the layout algorithm, the predecessor with the
|
|
|
|
|
// smaller blockOrder is the true branch, and the test results show
|
|
|
|
|
// that it is better to choose the predecessor with a smaller
|
|
|
|
|
// blockOrder than no choice.
|
|
|
|
|
if pb.likelyBranch() && !pSel.likelyBranch() || s.blockOrder[pb.ID] < s.blockOrder[pSel.ID] {
|
|
|
|
|
idx = i
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-08-11 12:51:33 -07:00
|
|
|
if idx < 0 {
|
cmd/compile: use depth first topological sort algorithm for layout
The current layout algorithm tries to put consecutive blocks together,
so the priority of the successor block is higher than the priority of
the zero indegree block. This algorithm is beneficial for subsequent
register allocation, but will result in more branch instructions.
The depth-first topological sorting algorithm is a well-known layout
algorithm, which has applications in many languages, and it helps to
reduce branch instructions. This CL applies it to the layout pass.
The test results show that it helps to reduce the code size.
This CL also includes the following changes:
1, Removed the primary predecessor mechanism. The new layout algorithm is
not very friendly to register allocator in some cases, in order to adapt
to the new layout algorithm, a new primary predecessor selection strategy
is introduced.
2, Since the new layout implementation may place non-loop blocks between
loop blocks, some adaptive modifications have also been made to looprotate
pass.
3, The layout also affects the results of codegen, so this CL also adjusted
several codegen tests accordingly.
It is inevitable that this CL will cause the code size or performance of a
few functions to decrease, but the number of cases it improves is much larger
than the number of cases it drops.
Statistical data from compilecmp on linux/amd64 is as follow:
name old time/op new time/op delta
Template 382ms ± 4% 382ms ± 4% ~ (p=0.497 n=49+50)
Unicode 170ms ± 9% 169ms ± 8% ~ (p=0.344 n=48+50)
GoTypes 2.01s ± 4% 2.01s ± 4% ~ (p=0.628 n=50+48)
Compiler 190ms ±10% 189ms ± 9% ~ (p=0.734 n=50+50)
SSA 11.8s ± 2% 11.8s ± 3% ~ (p=0.877 n=50+50)
Flate 241ms ± 9% 241ms ± 8% ~ (p=0.897 n=50+49)
GoParser 366ms ± 3% 361ms ± 4% -1.21% (p=0.004 n=47+50)
Reflect 835ms ± 3% 838ms ± 3% ~ (p=0.275 n=50+49)
Tar 336ms ± 4% 335ms ± 3% ~ (p=0.454 n=48+48)
XML 433ms ± 4% 431ms ± 3% ~ (p=0.071 n=49+48)
LinkCompiler 706ms ± 4% 705ms ± 4% ~ (p=0.608 n=50+49)
ExternalLinkCompiler 1.85s ± 3% 1.83s ± 2% -1.47% (p=0.000 n=49+48)
LinkWithoutDebugCompiler 437ms ± 5% 437ms ± 6% ~ (p=0.953 n=49+50)
[Geo mean] 615ms 613ms -0.37%
name old alloc/op new alloc/op delta
Template 38.7MB ± 1% 38.7MB ± 1% ~ (p=0.834 n=50+50)
Unicode 28.1MB ± 0% 28.1MB ± 0% -0.22% (p=0.000 n=49+50)
GoTypes 168MB ± 1% 168MB ± 1% ~ (p=0.054 n=47+47)
Compiler 23.0MB ± 1% 23.0MB ± 1% ~ (p=0.432 n=50+50)
SSA 1.54GB ± 0% 1.54GB ± 0% +0.21% (p=0.000 n=50+50)
Flate 23.6MB ± 1% 23.6MB ± 1% ~ (p=0.153 n=43+46)
GoParser 35.1MB ± 1% 35.1MB ± 2% ~ (p=0.202 n=50+50)
Reflect 84.7MB ± 1% 84.7MB ± 1% ~ (p=0.333 n=48+49)
Tar 34.5MB ± 1% 34.5MB ± 1% ~ (p=0.406 n=46+49)
XML 44.3MB ± 2% 44.2MB ± 3% ~ (p=0.981 n=50+50)
LinkCompiler 131MB ± 0% 128MB ± 0% -2.74% (p=0.000 n=50+50)
ExternalLinkCompiler 120MB ± 0% 120MB ± 0% +0.01% (p=0.007 n=50+50)
LinkWithoutDebugCompiler 77.3MB ± 0% 77.3MB ± 0% -0.02% (p=0.000 n=50+50)
[Geo mean] 69.3MB 69.1MB -0.22%
file before after Δ %
addr2line 4104220 4043684 -60536 -1.475%
api 5342502 5249678 -92824 -1.737%
asm 4973785 4858257 -115528 -2.323%
buildid 2667844 2625660 -42184 -1.581%
cgo 4686849 4616313 -70536 -1.505%
compile 23667431 23268406 -399025 -1.686%
cover 4959676 4874108 -85568 -1.725%
dist 3515934 3450422 -65512 -1.863%
doc 3995581 3925469 -70112 -1.755%
fix 3379202 3318522 -60680 -1.796%
link 6743249 6629913 -113336 -1.681%
nm 4047529 3991777 -55752 -1.377%
objdump 4456151 4388151 -68000 -1.526%
pack 2435040 2398072 -36968 -1.518%
pprof 13804080 13565808 -238272 -1.726%
test2json 2690043 2645987 -44056 -1.638%
trace 10418492 10232716 -185776 -1.783%
vet 7258259 7121259 -137000 -1.888%
total 113145867 111204202 -1941665 -1.716%
The situation on linux/arm64 is as follow:
name old time/op new time/op delta
Template 280ms ± 1% 282ms ± 1% +0.75% (p=0.000 n=46+48)
Unicode 124ms ± 2% 124ms ± 2% +0.37% (p=0.045 n=50+50)
GoTypes 1.69s ± 1% 1.70s ± 1% +0.56% (p=0.000 n=49+50)
Compiler 122ms ± 1% 123ms ± 1% +0.93% (p=0.000 n=50+50)
SSA 12.6s ± 1% 12.7s ± 0% +0.72% (p=0.000 n=50+50)
Flate 170ms ± 1% 172ms ± 1% +0.97% (p=0.000 n=49+49)
GoParser 262ms ± 1% 263ms ± 1% +0.39% (p=0.000 n=49+48)
Reflect 639ms ± 1% 650ms ± 1% +1.63% (p=0.000 n=49+49)
Tar 243ms ± 1% 245ms ± 1% +0.82% (p=0.000 n=50+50)
XML 324ms ± 1% 327ms ± 1% +0.72% (p=0.000 n=50+49)
LinkCompiler 597ms ± 1% 596ms ± 1% -0.27% (p=0.001 n=48+47)
ExternalLinkCompiler 1.90s ± 1% 1.88s ± 1% -1.00% (p=0.000 n=50+50)
LinkWithoutDebugCompiler 364ms ± 1% 363ms ± 1% ~ (p=0.220 n=49+50)
[Geo mean] 485ms 488ms +0.49%
name old alloc/op new alloc/op delta
Template 38.7MB ± 0% 38.8MB ± 1% ~ (p=0.093 n=43+49)
Unicode 28.4MB ± 0% 28.4MB ± 0% +0.03% (p=0.000 n=49+45)
GoTypes 169MB ± 1% 169MB ± 1% +0.23% (p=0.010 n=50+50)
Compiler 23.2MB ± 1% 23.2MB ± 1% +0.11% (p=0.000 n=40+44)
SSA 1.54GB ± 0% 1.55GB ± 0% +0.45% (p=0.000 n=47+49)
Flate 23.8MB ± 2% 23.8MB ± 1% ~ (p=0.543 n=50+50)
GoParser 35.3MB ± 1% 35.4MB ± 1% ~ (p=0.792 n=50+50)
Reflect 85.2MB ± 1% 85.2MB ± 0% ~ (p=0.055 n=50+47)
Tar 34.5MB ± 1% 34.5MB ± 1% +0.06% (p=0.015 n=50+50)
XML 43.8MB ± 2% 43.9MB ± 2% +0.19% (p=0.000 n=48+48)
LinkCompiler 137MB ± 0% 136MB ± 0% -0.92% (p=0.000 n=50+50)
ExternalLinkCompiler 127MB ± 0% 127MB ± 0% ~ (p=0.516 n=50+50)
LinkWithoutDebugCompiler 84.0MB ± 0% 84.0MB ± 0% ~ (p=0.057 n=50+50)
[Geo mean] 70.4MB 70.4MB +0.01%
file before after Δ %
addr2line 4021557 4002933 -18624 -0.463%
api 5127847 5028503 -99344 -1.937%
asm 5034716 4936836 -97880 -1.944%
buildid 2608118 2594094 -14024 -0.538%
cgo 4488592 4398320 -90272 -2.011%
compile 22501129 22213592 -287537 -1.278%
cover 4742301 4713573 -28728 -0.606%
dist 3388071 3365311 -22760 -0.672%
doc 3802250 3776082 -26168 -0.688%
fix 3306147 3216939 -89208 -2.698%
link 6404483 6363699 -40784 -0.637%
nm 3941026 3921930 -19096 -0.485%
objdump 4383330 4295122 -88208 -2.012%
pack 2404547 2389515 -15032 -0.625%
pprof 12996234 12856818 -139416 -1.073%
test2json 2668500 2586788 -81712 -3.062%
trace 9816276 9609580 -206696 -2.106%
vet 6900682 6787338 -113344 -1.643%
total 108535806 107056973 -1478833 -1.363%
Change-Id: Iaec1cdcaacca8025e9babb0fb8a532fddb70c87d
Reviewed-on: https://go-review.googlesource.com/c/go/+/255239
Reviewed-by: eric fang <eric.fang@arm.com>
Reviewed-by: Keith Randall <khr@golang.org>
Trust: eric fang <eric.fang@arm.com>
2020-07-23 10:24:56 +08:00
|
|
|
f.Fatalf("bad visitOrder, no predecessor of %s has been visited before it", b)
|
2015-08-11 12:51:33 -07:00
|
|
|
}
|
2016-04-28 16:52:47 -07:00
|
|
|
p := b.Preds[idx].b
|
2015-12-17 10:01:24 -08:00
|
|
|
s.setState(s.endRegs[p.ID])
|
|
|
|
|
|
2016-03-10 17:52:57 -06:00
|
|
|
if s.f.pass.debug > regDebug {
|
2015-12-17 10:01:24 -08:00
|
|
|
fmt.Printf("starting merge block %s with end state of %s:\n", b, p)
|
|
|
|
|
for _, x := range s.endRegs[p.ID] {
|
2017-08-17 12:23:34 -07:00
|
|
|
fmt.Printf(" %s: orig:%s cache:%s\n", &s.registers[x.r], x.v, x.c)
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
|
|
|
|
}
|
2015-08-11 12:51:33 -07:00
|
|
|
|
2016-03-01 23:21:55 +00:00
|
|
|
// Decide on registers for phi ops. Use the registers determined
|
2015-08-11 12:51:33 -07:00
|
|
|
// by the primary predecessor if we can.
|
|
|
|
|
// TODO: pick best of (already processed) predecessors?
|
2017-08-19 22:33:51 +02:00
|
|
|
// Majority vote? Deepest nesting level?
|
2015-08-11 12:51:33 -07:00
|
|
|
phiRegs = phiRegs[:0]
|
2015-12-17 10:01:24 -08:00
|
|
|
var phiUsed regMask
|
2018-05-25 16:08:13 -04:00
|
|
|
|
2015-08-11 12:51:33 -07:00
|
|
|
for _, v := range phis {
|
2015-12-17 10:01:24 -08:00
|
|
|
if !s.values[v.ID].needReg {
|
2015-08-11 12:51:33 -07:00
|
|
|
phiRegs = append(phiRegs, noRegister)
|
|
|
|
|
continue
|
|
|
|
|
}
|
2015-12-17 10:01:24 -08:00
|
|
|
a := v.Args[idx]
|
2016-09-16 15:02:47 -07:00
|
|
|
// Some instructions target not-allocatable registers.
|
|
|
|
|
// They're not suitable for further (phi-function) allocation.
|
|
|
|
|
m := s.values[a.ID].regs &^ phiUsed & s.allocatable
|
2015-08-11 12:51:33 -07:00
|
|
|
if m != 0 {
|
2016-03-24 20:57:53 +11:00
|
|
|
r := pickReg(m)
|
2015-12-17 10:01:24 -08:00
|
|
|
phiUsed |= regMask(1) << r
|
|
|
|
|
phiRegs = append(phiRegs, r)
|
2015-08-11 12:51:33 -07:00
|
|
|
} else {
|
2015-12-17 10:01:24 -08:00
|
|
|
phiRegs = append(phiRegs, noRegister)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-04-14 15:46:26 +01:00
|
|
|
// Second pass - deallocate all in-register phi inputs.
|
2016-10-28 23:11:04 -04:00
|
|
|
for i, v := range phis {
|
2015-12-17 10:01:24 -08:00
|
|
|
if !s.values[v.ID].needReg {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
a := v.Args[idx]
|
2020-04-14 15:46:26 +01:00
|
|
|
r := phiRegs[i]
|
|
|
|
|
if r == noRegister {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
if regValLiveSet.contains(a.ID) {
|
|
|
|
|
// Input value is still live (it is used by something other than Phi).
|
2016-10-28 23:11:04 -04:00
|
|
|
// Try to move it around before kicking out, if there is a free register.
|
|
|
|
|
// We generate a Copy in the predecessor block and record it. It will be
|
2020-04-14 15:46:26 +01:00
|
|
|
// deleted later if never used.
|
|
|
|
|
//
|
2016-10-28 23:11:04 -04:00
|
|
|
// Pick a free register. At this point some registers used in the predecessor
|
|
|
|
|
// block may have been deallocated. Those are the ones used for Phis. Exclude
|
|
|
|
|
// them (and they are not going to be helpful anyway).
|
|
|
|
|
m := s.compatRegs(a.Type) &^ s.used &^ phiUsed
|
|
|
|
|
if m != 0 && !s.values[a.ID].rematerializeable && countRegs(s.values[a.ID].regs) == 1 {
|
|
|
|
|
r2 := pickReg(m)
|
2016-12-07 18:14:35 -08:00
|
|
|
c := p.NewValue1(a.Pos, OpCopy, a.Type, s.regs[r].c)
|
2016-10-28 23:11:04 -04:00
|
|
|
s.copies[c] = false
|
|
|
|
|
if s.f.pass.debug > regDebug {
|
2017-08-17 12:23:34 -07:00
|
|
|
fmt.Printf("copy %s to %s : %s\n", a, c, &s.registers[r2])
|
2016-10-28 23:11:04 -04:00
|
|
|
}
|
|
|
|
|
s.setOrig(c, a)
|
|
|
|
|
s.assignReg(r2, a, c)
|
|
|
|
|
s.endRegs[p.ID] = append(s.endRegs[p.ID], endReg{r2, a, c})
|
|
|
|
|
}
|
2015-08-11 12:51:33 -07:00
|
|
|
}
|
2020-04-14 15:46:26 +01:00
|
|
|
s.freeReg(r)
|
2015-08-11 12:51:33 -07:00
|
|
|
}
|
2015-12-17 10:01:24 -08:00
|
|
|
|
[dev.debug] cmd/compile: better DWARF with optimizations on
Debuggers use DWARF information to find local variables on the
stack and in registers. Prior to this CL, the DWARF information for
functions claimed that all variables were on the stack at all times.
That's incorrect when optimizations are enabled, and results in
debuggers showing data that is out of date or complete gibberish.
After this CL, the compiler is capable of representing variable
locations more accurately, and attempts to do so. Due to limitations of
the SSA backend, it's not possible to be completely correct.
There are a number of problems in the current design. One of the easier
to understand is that variable names currently must be attached to an
SSA value, but not all assignments in the source code actually result
in machine code. For example:
type myint int
var a int
b := myint(int)
and
b := (*uint64)(unsafe.Pointer(a))
don't generate machine code because the underlying representation is the
same, so the correct value of b will not be set when the user would
expect.
Generating the more precise debug information is behind a flag,
dwarflocationlists. Because of the issues described above, setting the
flag may not make the debugging experience much better, and may actually
make it worse in cases where the variable actually is on the stack and
the more complicated analysis doesn't realize it.
A number of changes are included:
- Add a new pseudo-instruction, RegKill, which indicates that the value
in the register has been clobbered.
- Adjust regalloc to emit RegKills in the right places. Significantly,
this means that phis are mixed with StoreReg and RegKills after
regalloc.
- Track variable decomposition in ssa.LocalSlots.
- After the SSA backend is done, analyze the result and build location
lists for each LocalSlot.
- After assembly is done, update the location lists with the assembled
PC offsets, recompose variables, and build DWARF location lists. Emit the
list as a new linker symbol, one per function.
- In the linker, aggregate the location lists into a .debug_loc section.
TODO:
- currently disabled for non-X86/AMD64 because there are no data tables.
go build -toolexec 'toolstash -cmp' -a std succeeds.
With -dwarflocationlists false:
before: f02812195637909ff675782c0b46836a8ff01976
after: 06f61e8112a42ac34fb80e0c818b3cdb84a5e7ec
benchstat -geomean /tmp/220352263 /tmp/621364410
completed 15 of 15, estimated time remaining 0s (eta 3:52PM)
name old time/op new time/op delta
Template 199ms ± 3% 198ms ± 2% ~ (p=0.400 n=15+14)
Unicode 96.6ms ± 5% 96.4ms ± 5% ~ (p=0.838 n=15+15)
GoTypes 653ms ± 2% 647ms ± 2% ~ (p=0.102 n=15+14)
Flate 133ms ± 6% 129ms ± 3% -2.62% (p=0.041 n=15+15)
GoParser 164ms ± 5% 159ms ± 3% -3.05% (p=0.000 n=15+15)
Reflect 428ms ± 4% 422ms ± 3% ~ (p=0.156 n=15+13)
Tar 123ms ±10% 124ms ± 8% ~ (p=0.461 n=15+15)
XML 228ms ± 3% 224ms ± 3% -1.57% (p=0.045 n=15+15)
[Geo mean] 206ms 377ms +82.86%
name old user-time/op new user-time/op delta
Template 292ms ±10% 301ms ±12% ~ (p=0.189 n=15+15)
Unicode 166ms ±37% 158ms ±14% ~ (p=0.418 n=15+14)
GoTypes 962ms ± 6% 963ms ± 7% ~ (p=0.976 n=15+15)
Flate 207ms ±19% 200ms ±14% ~ (p=0.345 n=14+15)
GoParser 246ms ±22% 240ms ±15% ~ (p=0.587 n=15+15)
Reflect 611ms ±13% 587ms ±14% ~ (p=0.085 n=15+13)
Tar 211ms ±12% 217ms ±14% ~ (p=0.355 n=14+15)
XML 335ms ±15% 320ms ±18% ~ (p=0.169 n=15+15)
[Geo mean] 317ms 583ms +83.72%
name old alloc/op new alloc/op delta
Template 40.2MB ± 0% 40.2MB ± 0% -0.15% (p=0.000 n=14+15)
Unicode 29.2MB ± 0% 29.3MB ± 0% ~ (p=0.624 n=15+15)
GoTypes 114MB ± 0% 114MB ± 0% -0.15% (p=0.000 n=15+14)
Flate 25.7MB ± 0% 25.6MB ± 0% -0.18% (p=0.000 n=13+15)
GoParser 32.2MB ± 0% 32.2MB ± 0% -0.14% (p=0.003 n=15+15)
Reflect 77.8MB ± 0% 77.9MB ± 0% ~ (p=0.061 n=15+15)
Tar 27.1MB ± 0% 27.0MB ± 0% -0.11% (p=0.029 n=15+15)
XML 42.7MB ± 0% 42.5MB ± 0% -0.29% (p=0.000 n=15+15)
[Geo mean] 42.1MB 75.0MB +78.05%
name old allocs/op new allocs/op delta
Template 402k ± 1% 398k ± 0% -0.91% (p=0.000 n=15+15)
Unicode 344k ± 1% 344k ± 0% ~ (p=0.715 n=15+14)
GoTypes 1.18M ± 0% 1.17M ± 0% -0.91% (p=0.000 n=15+14)
Flate 243k ± 0% 240k ± 1% -1.05% (p=0.000 n=13+15)
GoParser 327k ± 1% 324k ± 1% -0.96% (p=0.000 n=15+15)
Reflect 984k ± 1% 982k ± 0% ~ (p=0.050 n=15+15)
Tar 261k ± 1% 259k ± 1% -0.77% (p=0.000 n=15+15)
XML 411k ± 0% 404k ± 1% -1.55% (p=0.000 n=15+15)
[Geo mean] 439k 755k +72.01%
name old text-bytes new text-bytes delta
HelloSize 694kB ± 0% 694kB ± 0% -0.00% (p=0.000 n=15+15)
name old data-bytes new data-bytes delta
HelloSize 5.55kB ± 0% 5.55kB ± 0% ~ (all equal)
name old bss-bytes new bss-bytes delta
HelloSize 133kB ± 0% 133kB ± 0% ~ (all equal)
name old exe-bytes new exe-bytes delta
HelloSize 1.04MB ± 0% 1.04MB ± 0% ~ (all equal)
Change-Id: I991fc553ef175db46bb23b2128317bbd48de70d8
Reviewed-on: https://go-review.googlesource.com/41770
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2017-07-21 18:30:19 -04:00
|
|
|
// Copy phi ops into new schedule.
|
|
|
|
|
b.Values = append(b.Values, phis...)
|
|
|
|
|
|
2020-10-19 11:20:24 +08:00
|
|
|
// Third pass - pick registers for phis whose input
|
|
|
|
|
// was not in a register in the primary predecessor.
|
2015-08-11 12:51:33 -07:00
|
|
|
for i, v := range phis {
|
2015-12-17 10:01:24 -08:00
|
|
|
if !s.values[v.ID].needReg {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
if phiRegs[i] != noRegister {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
m := s.compatRegs(v.Type) &^ phiUsed &^ s.used
|
2020-10-19 11:20:24 +08:00
|
|
|
// If one of the other inputs of v is in a register, and the register is available,
|
|
|
|
|
// select this register, which can save some unnecessary copies.
|
|
|
|
|
for i, pe := range b.Preds {
|
cmd/compile: use depth first topological sort algorithm for layout
The current layout algorithm tries to put consecutive blocks together,
so the priority of the successor block is higher than the priority of
the zero indegree block. This algorithm is beneficial for subsequent
register allocation, but will result in more branch instructions.
The depth-first topological sorting algorithm is a well-known layout
algorithm, which has applications in many languages, and it helps to
reduce branch instructions. This CL applies it to the layout pass.
The test results show that it helps to reduce the code size.
This CL also includes the following changes:
1, Removed the primary predecessor mechanism. The new layout algorithm is
not very friendly to register allocator in some cases, in order to adapt
to the new layout algorithm, a new primary predecessor selection strategy
is introduced.
2, Since the new layout implementation may place non-loop blocks between
loop blocks, some adaptive modifications have also been made to looprotate
pass.
3, The layout also affects the results of codegen, so this CL also adjusted
several codegen tests accordingly.
It is inevitable that this CL will cause the code size or performance of a
few functions to decrease, but the number of cases it improves is much larger
than the number of cases it drops.
Statistical data from compilecmp on linux/amd64 is as follow:
name old time/op new time/op delta
Template 382ms ± 4% 382ms ± 4% ~ (p=0.497 n=49+50)
Unicode 170ms ± 9% 169ms ± 8% ~ (p=0.344 n=48+50)
GoTypes 2.01s ± 4% 2.01s ± 4% ~ (p=0.628 n=50+48)
Compiler 190ms ±10% 189ms ± 9% ~ (p=0.734 n=50+50)
SSA 11.8s ± 2% 11.8s ± 3% ~ (p=0.877 n=50+50)
Flate 241ms ± 9% 241ms ± 8% ~ (p=0.897 n=50+49)
GoParser 366ms ± 3% 361ms ± 4% -1.21% (p=0.004 n=47+50)
Reflect 835ms ± 3% 838ms ± 3% ~ (p=0.275 n=50+49)
Tar 336ms ± 4% 335ms ± 3% ~ (p=0.454 n=48+48)
XML 433ms ± 4% 431ms ± 3% ~ (p=0.071 n=49+48)
LinkCompiler 706ms ± 4% 705ms ± 4% ~ (p=0.608 n=50+49)
ExternalLinkCompiler 1.85s ± 3% 1.83s ± 2% -1.47% (p=0.000 n=49+48)
LinkWithoutDebugCompiler 437ms ± 5% 437ms ± 6% ~ (p=0.953 n=49+50)
[Geo mean] 615ms 613ms -0.37%
name old alloc/op new alloc/op delta
Template 38.7MB ± 1% 38.7MB ± 1% ~ (p=0.834 n=50+50)
Unicode 28.1MB ± 0% 28.1MB ± 0% -0.22% (p=0.000 n=49+50)
GoTypes 168MB ± 1% 168MB ± 1% ~ (p=0.054 n=47+47)
Compiler 23.0MB ± 1% 23.0MB ± 1% ~ (p=0.432 n=50+50)
SSA 1.54GB ± 0% 1.54GB ± 0% +0.21% (p=0.000 n=50+50)
Flate 23.6MB ± 1% 23.6MB ± 1% ~ (p=0.153 n=43+46)
GoParser 35.1MB ± 1% 35.1MB ± 2% ~ (p=0.202 n=50+50)
Reflect 84.7MB ± 1% 84.7MB ± 1% ~ (p=0.333 n=48+49)
Tar 34.5MB ± 1% 34.5MB ± 1% ~ (p=0.406 n=46+49)
XML 44.3MB ± 2% 44.2MB ± 3% ~ (p=0.981 n=50+50)
LinkCompiler 131MB ± 0% 128MB ± 0% -2.74% (p=0.000 n=50+50)
ExternalLinkCompiler 120MB ± 0% 120MB ± 0% +0.01% (p=0.007 n=50+50)
LinkWithoutDebugCompiler 77.3MB ± 0% 77.3MB ± 0% -0.02% (p=0.000 n=50+50)
[Geo mean] 69.3MB 69.1MB -0.22%
file before after Δ %
addr2line 4104220 4043684 -60536 -1.475%
api 5342502 5249678 -92824 -1.737%
asm 4973785 4858257 -115528 -2.323%
buildid 2667844 2625660 -42184 -1.581%
cgo 4686849 4616313 -70536 -1.505%
compile 23667431 23268406 -399025 -1.686%
cover 4959676 4874108 -85568 -1.725%
dist 3515934 3450422 -65512 -1.863%
doc 3995581 3925469 -70112 -1.755%
fix 3379202 3318522 -60680 -1.796%
link 6743249 6629913 -113336 -1.681%
nm 4047529 3991777 -55752 -1.377%
objdump 4456151 4388151 -68000 -1.526%
pack 2435040 2398072 -36968 -1.518%
pprof 13804080 13565808 -238272 -1.726%
test2json 2690043 2645987 -44056 -1.638%
trace 10418492 10232716 -185776 -1.783%
vet 7258259 7121259 -137000 -1.888%
total 113145867 111204202 -1941665 -1.716%
The situation on linux/arm64 is as follow:
name old time/op new time/op delta
Template 280ms ± 1% 282ms ± 1% +0.75% (p=0.000 n=46+48)
Unicode 124ms ± 2% 124ms ± 2% +0.37% (p=0.045 n=50+50)
GoTypes 1.69s ± 1% 1.70s ± 1% +0.56% (p=0.000 n=49+50)
Compiler 122ms ± 1% 123ms ± 1% +0.93% (p=0.000 n=50+50)
SSA 12.6s ± 1% 12.7s ± 0% +0.72% (p=0.000 n=50+50)
Flate 170ms ± 1% 172ms ± 1% +0.97% (p=0.000 n=49+49)
GoParser 262ms ± 1% 263ms ± 1% +0.39% (p=0.000 n=49+48)
Reflect 639ms ± 1% 650ms ± 1% +1.63% (p=0.000 n=49+49)
Tar 243ms ± 1% 245ms ± 1% +0.82% (p=0.000 n=50+50)
XML 324ms ± 1% 327ms ± 1% +0.72% (p=0.000 n=50+49)
LinkCompiler 597ms ± 1% 596ms ± 1% -0.27% (p=0.001 n=48+47)
ExternalLinkCompiler 1.90s ± 1% 1.88s ± 1% -1.00% (p=0.000 n=50+50)
LinkWithoutDebugCompiler 364ms ± 1% 363ms ± 1% ~ (p=0.220 n=49+50)
[Geo mean] 485ms 488ms +0.49%
name old alloc/op new alloc/op delta
Template 38.7MB ± 0% 38.8MB ± 1% ~ (p=0.093 n=43+49)
Unicode 28.4MB ± 0% 28.4MB ± 0% +0.03% (p=0.000 n=49+45)
GoTypes 169MB ± 1% 169MB ± 1% +0.23% (p=0.010 n=50+50)
Compiler 23.2MB ± 1% 23.2MB ± 1% +0.11% (p=0.000 n=40+44)
SSA 1.54GB ± 0% 1.55GB ± 0% +0.45% (p=0.000 n=47+49)
Flate 23.8MB ± 2% 23.8MB ± 1% ~ (p=0.543 n=50+50)
GoParser 35.3MB ± 1% 35.4MB ± 1% ~ (p=0.792 n=50+50)
Reflect 85.2MB ± 1% 85.2MB ± 0% ~ (p=0.055 n=50+47)
Tar 34.5MB ± 1% 34.5MB ± 1% +0.06% (p=0.015 n=50+50)
XML 43.8MB ± 2% 43.9MB ± 2% +0.19% (p=0.000 n=48+48)
LinkCompiler 137MB ± 0% 136MB ± 0% -0.92% (p=0.000 n=50+50)
ExternalLinkCompiler 127MB ± 0% 127MB ± 0% ~ (p=0.516 n=50+50)
LinkWithoutDebugCompiler 84.0MB ± 0% 84.0MB ± 0% ~ (p=0.057 n=50+50)
[Geo mean] 70.4MB 70.4MB +0.01%
file before after Δ %
addr2line 4021557 4002933 -18624 -0.463%
api 5127847 5028503 -99344 -1.937%
asm 5034716 4936836 -97880 -1.944%
buildid 2608118 2594094 -14024 -0.538%
cgo 4488592 4398320 -90272 -2.011%
compile 22501129 22213592 -287537 -1.278%
cover 4742301 4713573 -28728 -0.606%
dist 3388071 3365311 -22760 -0.672%
doc 3802250 3776082 -26168 -0.688%
fix 3306147 3216939 -89208 -2.698%
link 6404483 6363699 -40784 -0.637%
nm 3941026 3921930 -19096 -0.485%
objdump 4383330 4295122 -88208 -2.012%
pack 2404547 2389515 -15032 -0.625%
pprof 12996234 12856818 -139416 -1.073%
test2json 2668500 2586788 -81712 -3.062%
trace 9816276 9609580 -206696 -2.106%
vet 6900682 6787338 -113344 -1.643%
total 108535806 107056973 -1478833 -1.363%
Change-Id: Iaec1cdcaacca8025e9babb0fb8a532fddb70c87d
Reviewed-on: https://go-review.googlesource.com/c/go/+/255239
Reviewed-by: eric fang <eric.fang@arm.com>
Reviewed-by: Keith Randall <khr@golang.org>
Trust: eric fang <eric.fang@arm.com>
2020-07-23 10:24:56 +08:00
|
|
|
if i == idx {
|
2020-10-19 11:20:24 +08:00
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
ri := noRegister
|
|
|
|
|
for _, er := range s.endRegs[pe.b.ID] {
|
|
|
|
|
if er.v == s.orig[v.Args[i].ID] {
|
|
|
|
|
ri = er.r
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if ri != noRegister && m>>ri&1 != 0 {
|
|
|
|
|
m = regMask(1) << ri
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-12-17 10:01:24 -08:00
|
|
|
if m != 0 {
|
|
|
|
|
r := pickReg(m)
|
|
|
|
|
phiRegs[i] = r
|
|
|
|
|
phiUsed |= regMask(1) << r
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-01 23:21:55 +00:00
|
|
|
// Set registers for phis. Add phi spill code.
|
2015-12-17 10:01:24 -08:00
|
|
|
for i, v := range phis {
|
|
|
|
|
if !s.values[v.ID].needReg {
|
2015-08-11 12:51:33 -07:00
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
r := phiRegs[i]
|
|
|
|
|
if r == noRegister {
|
2015-12-17 10:01:24 -08:00
|
|
|
// stack-based phi
|
|
|
|
|
// Spills will be inserted in all the predecessors below.
|
2017-03-07 14:45:46 -05:00
|
|
|
s.values[v.ID].spill = v // v starts life spilled
|
2015-12-17 10:01:24 -08:00
|
|
|
continue
|
2015-08-11 12:51:33 -07:00
|
|
|
}
|
|
|
|
|
// register-based phi
|
|
|
|
|
s.assignReg(r, v, v)
|
|
|
|
|
}
|
|
|
|
|
|
2017-03-22 21:34:12 -04:00
|
|
|
// Deallocate any values which are no longer live. Phis are excluded.
|
|
|
|
|
for r := register(0); r < s.numRegs; r++ {
|
|
|
|
|
if phiUsed>>r&1 != 0 {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
v := s.regs[r].v
|
|
|
|
|
if v != nil && !regValLiveSet.contains(v.ID) {
|
|
|
|
|
s.freeReg(r)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-12-17 10:01:24 -08:00
|
|
|
// Save the starting state for use by merge edges.
|
2018-03-27 13:24:45 +02:00
|
|
|
// We append to a stack allocated variable that we'll
|
|
|
|
|
// later copy into s.startRegs in one fell swoop, to save
|
|
|
|
|
// on allocations.
|
|
|
|
|
regList := make([]startReg, 0, 32)
|
2016-03-21 22:57:26 -07:00
|
|
|
for r := register(0); r < s.numRegs; r++ {
|
2015-12-17 10:01:24 -08:00
|
|
|
v := s.regs[r].v
|
|
|
|
|
if v == nil {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
if phiUsed>>r&1 != 0 {
|
|
|
|
|
// Skip registers that phis used, we'll handle those
|
|
|
|
|
// specially during merge edge processing.
|
|
|
|
|
continue
|
|
|
|
|
}
|
2017-03-07 14:45:46 -05:00
|
|
|
regList = append(regList, startReg{r, v, s.regs[r].c, s.values[v.ID].uses.pos})
|
cmd/compile/internal/ssa: drop overwritten regalloc basic block input requirements
For the following description, consider the following basic block graph:
b1 ───┐┌──── b2
││
││
▼▼
b3
For register allocator transitions between basic blocks, there are two
key passes (significant paraphrasing):
First, each basic block is visited in some predetermined visit order.
This is the core visitOrder range loop in regAllocState.regalloc. The
specific ordering heuristics aren't important here, except that the
order guarantees that when visiting a basic block at least one of its
predecessors has already been visited.
Upon visiting a basic block, that block sets its expected starting
register state (regAllocState.startRegs) based on the ending register
state (regAlloc.State.endRegs) of one of its predecessors. (How it
chooses which predecessor to use is not important here.)
From that starting state, registers are assigned for all values in the
block, ultimately resulting in some ending register state.
After all blocks have been visited, the shuffle pass
(regAllocState.shuffle) ensures that for each edge, endRegs of the
predecessor == startRegs of the successor. That is, it makes sure that
the startRegs assumptions actually hold true for each edge. It does this
by adding moves to the end of the predecessor block to place values in
the expected register for the successor block. These may be moves from
other registers, or from memory if the value is spilled.
Now on to the actual problem:
Assume that b1 places some value v1 into register R10, and thus ends
with endRegs containing R10 = v1.
When b3 is visited, it selects b1 as its model predecessor and sets
startRegs with R10 = v1.
b2 does not have v1 in R10, so later in the shuffle pass, we will add a
move of v1 into R10 to the end of b2 to ensure it is available for b3.
This is all perfectly fine and exactly how things should work.
Now suppose that b3 does not use v1. It does need to use some other
value v2, which is not currently in a register. When assigning v2 to a
register, it finds all registers are already in use and it needs to dump
a value. Ultimately, it decides to dump v1 from R10 and replace it with
v2.
This is fine, but it has downstream effects on shuffle in b2. b3's
startRegs still state that R10 = v1, so b2 will add a move to R10 even
though b3 will unconditionally overwrite it. i.e., the move at the end
of b2 is completely useless and can result in code like:
// end of b2
MOV n(SP), R10 // R10 = v1 <-- useless
// start of b3
MOV m(SP), R10 // R10 = v2
This is precisely what happened in #58298.
This CL addresses this problem by dropping registers from startRegs if
they are never used in the basic block prior to getting dumped. This
allows the shuffle pass to avoid placing those useless values into the
register.
There is a significant limitation to this CL, which is that it only
impacts the immediate predecessors of an overwriting block. We can
discuss this by zooming out a bit on the previous graph:
b4 ───┐┌──── b5
││
││
▼▼
b1 ───┐┌──── b2
││
││
▼▼
b3
Here we have the same graph, except we can see the two predecessors of
b1.
Now suppose that rather than b1 assigning R10 = v1 as above, the
assignment is done in b4. b1 has startRegs R10 = v1, doesn't use the
value at all, and simply passes it through to endRegs R10 = v1.
Now the shuffle pass will require both b2 and b5 to add a move to
assigned R10 = v1, because that is specified in their successor
startRegs.
With this CL, b3 drops R10 = v1 from startRegs, but there is no
backwards propagation, so b1 still has R10 = v1 in startRegs, and b5
still needs to add a useless move.
Extending this CL with such propagation may significantly increase the
number of useless moves we can remove, though it will add complexity to
maintenance and could potentially impact build performance depending on
how efficiently we could implement the propagation (something I haven't
considered carefully).
As-is, this optimization does not impact much code. In bent .text size
geomean is -0.02%. In the container/heap test binary, 18 of ~2500
functions are impacted by this CL. Bent and sweet do not show a
noticeable performance impact one way or another, however #58298 does
show a case where this can have impact if the useless instructions end
up in the hot path of a tight loop.
For #58298.
Change-Id: I2fcef37c955159d068fa0725f995a1848add8a5f
Reviewed-on: https://go-review.googlesource.com/c/go/+/471158
Run-TryBot: Michael Pratt <mpratt@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
Reviewed-by: David Chase <drchase@google.com>
2023-02-21 13:20:49 -05:00
|
|
|
s.startRegsMask |= regMask(1) << r
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
2018-03-27 13:24:45 +02:00
|
|
|
s.startRegs[b.ID] = make([]startReg, len(regList))
|
|
|
|
|
copy(s.startRegs[b.ID], regList)
|
2015-12-17 10:01:24 -08:00
|
|
|
|
2016-03-10 17:52:57 -06:00
|
|
|
if s.f.pass.debug > regDebug {
|
2015-12-17 10:01:24 -08:00
|
|
|
fmt.Printf("after phis\n")
|
|
|
|
|
for _, x := range s.startRegs[b.ID] {
|
2017-08-17 12:23:34 -07:00
|
|
|
fmt.Printf(" %s: v%d\n", &s.registers[x.r], x.v.ID)
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
2015-08-11 12:51:33 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-07-12 15:31:25 -07:00
|
|
|
// Drop phis from registers if they immediately go dead.
|
|
|
|
|
for i, v := range phis {
|
|
|
|
|
s.curIdx = i
|
|
|
|
|
s.dropIfUnused(v)
|
|
|
|
|
}
|
|
|
|
|
|
2016-04-15 12:49:30 -07:00
|
|
|
// Allocate space to record the desired registers for each value.
|
2018-03-24 19:03:54 +01:00
|
|
|
if l := len(oldSched); cap(dinfo) < l {
|
|
|
|
|
dinfo = make([]dentry, l)
|
|
|
|
|
} else {
|
|
|
|
|
dinfo = dinfo[:l]
|
2025-04-17 07:49:35 +00:00
|
|
|
clear(dinfo)
|
2016-04-15 12:49:30 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Load static desired register info at the end of the block.
|
|
|
|
|
desired.copy(&s.desired[b.ID])
|
|
|
|
|
|
|
|
|
|
// Check actual assigned registers at the start of the next block(s).
|
|
|
|
|
// Dynamically assigned registers will trump the static
|
|
|
|
|
// desired registers computed during liveness analysis.
|
2016-01-18 20:00:15 -08:00
|
|
|
// Note that we do this phase after startRegs is set above, so that
|
|
|
|
|
// we get the right behavior for a block which branches to itself.
|
2016-04-28 16:52:47 -07:00
|
|
|
for _, e := range b.Succs {
|
|
|
|
|
succ := e.b
|
2016-04-15 12:49:30 -07:00
|
|
|
// TODO: prioritize likely successor?
|
2016-01-18 20:00:15 -08:00
|
|
|
for _, x := range s.startRegs[succ.ID] {
|
2017-03-07 14:45:46 -05:00
|
|
|
desired.add(x.v.ID, x.r)
|
2016-01-18 20:00:15 -08:00
|
|
|
}
|
2016-04-15 12:49:30 -07:00
|
|
|
// Process phi ops in succ.
|
2016-04-28 16:52:47 -07:00
|
|
|
pidx := e.i
|
2016-01-18 20:00:15 -08:00
|
|
|
for _, v := range succ.Values {
|
|
|
|
|
if v.Op != OpPhi {
|
cmd/compile: reimplement location list generation
Completely redesign and reimplement location list generation to be more
efficient, and hopefully not too hard to understand.
RegKills are gone. Instead of using the regalloc's liveness
calculations, redo them using the Ops' clobber information. Besides
saving a lot of Values, this avoids adding RegKills to blocks that would
be empty otherwise, which was messing up optimizations. This does mean
that it's much harder to tell whether the generation process is buggy
(there's nothing to cross-check it with), and there may be disagreements
with GC liveness. But the performance gain is significant, and it's nice
not to be messing with earlier compiler phases.
The intermediate representations are gone. Instead of producing
ssa.BlockDebugs, then dwarf.LocationLists, and then finally real
location lists, go directly from the SSA to a (mostly) real location
list. Because the SSA analysis happens before assembly, it stores
encoded block/value IDs where PCs would normally go. It would be easier
to do the SSA analysis after assembly, but I didn't want to retain the
SSA just for that.
Generation proceeds in two phases: first, it traverses the function in
CFG order, storing the state of the block at the beginning and end. End
states are used to produce the start states of the successor blocks. In
the second phase, it traverses in program text order and produces the
location lists. The processing in the second phase is redundant, but
much cheaper than storing the intermediate representation. It might be
possible to combine the two phases somewhat to take advantage of cases
where the CFG matches the block layout, but I haven't tried.
Location lists are finalized by adding a base address selection entry,
translating each encoded block/value ID to a real PC, and adding the
terminating zero entry. This probably won't work on OSX, where dsymutil
will choke on the base address selection. I tried emitting CU-relative
relocations for each address, and it was *very* bad for performance --
it uses more memory storing all the relocations than it does for the
actual location list bytes. I think I'm going to end up synthesizing the
relocations in the linker only on OSX, but TBD.
TestNexting needs updating: with more optimizations working, the
debugger doesn't stop on the continue (line 88) any more, and the test's
duplicate suppression kicks in. Also, dx and dy live a little longer
now, but they have the correct values.
Change-Id: Ie772dfe23a4e389ca573624fac4d05401ae32307
Reviewed-on: https://go-review.googlesource.com/89356
Run-TryBot: Heschi Kreinick <heschi@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2017-10-26 15:40:17 -04:00
|
|
|
break
|
2016-01-18 20:00:15 -08:00
|
|
|
}
|
|
|
|
|
if !s.values[v.ID].needReg {
|
|
|
|
|
continue
|
|
|
|
|
}
|
2016-04-15 12:49:30 -07:00
|
|
|
rp, ok := s.f.getHome(v.ID).(*Register)
|
2016-01-18 20:00:15 -08:00
|
|
|
if !ok {
|
2020-10-19 11:20:24 +08:00
|
|
|
// If v is not assigned a register, pick a register assigned to one of v's inputs.
|
|
|
|
|
// Hopefully v will get assigned that register later.
|
|
|
|
|
// If the inputs have allocated register information, add it to desired,
|
|
|
|
|
// which may reduce spill or copy operations when the register is available.
|
|
|
|
|
for _, a := range v.Args {
|
|
|
|
|
rp, ok = s.f.getHome(a.ID).(*Register)
|
|
|
|
|
if ok {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if !ok {
|
|
|
|
|
continue
|
|
|
|
|
}
|
2016-01-18 20:00:15 -08:00
|
|
|
}
|
2016-09-16 09:36:00 -07:00
|
|
|
desired.add(v.Args[pidx].ID, register(rp.num))
|
2016-01-18 20:00:15 -08:00
|
|
|
}
|
|
|
|
|
}
|
2016-04-15 12:49:30 -07:00
|
|
|
// Walk values backwards computing desired register info.
|
|
|
|
|
// See computeLive for more comments.
|
2016-01-18 20:00:15 -08:00
|
|
|
for i := len(oldSched) - 1; i >= 0; i-- {
|
|
|
|
|
v := oldSched[i]
|
2016-04-15 12:49:30 -07:00
|
|
|
prefs := desired.remove(v.ID)
|
2021-02-13 10:49:37 -05:00
|
|
|
regspec := s.regspec(v)
|
cmd/compile: don't lower OpConvert
Currently, each architecture lowers OpConvert to an arch-specific
OpXXXconvert. This is silly because OpConvert means the same thing on
all architectures and is logically a no-op that exists only to keep
track of conversions to and from unsafe.Pointer. Furthermore, lowering
it makes it harder to recognize in other analyses, particularly
liveness analysis.
This CL eliminates the lowering of OpConvert, leaving it as the
generic op until code generation time.
The main complexity here is that we still need to register-allocate
OpConvert operations. Currently, each arch's lowered OpConvert
specifies all GP registers in its register mask. Ideally, OpConvert
wouldn't affect value homing at all, and we could just copy the home
of OpConvert's source, but this can potentially home an OpConvert in a
LocalSlot, which neither regalloc nor stackalloc expect. Rather than
try to disentangle this assumption from regalloc and stackalloc, we
continue to register-allocate OpConvert, but teach regalloc that
OpConvert can be allocated to any allocatable GP register.
For #24543.
Change-Id: I795a6aee5fd94d4444a7bafac3838a400c9f7bb6
Reviewed-on: https://go-review.googlesource.com/108496
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2018-04-02 16:08:09 -04:00
|
|
|
desired.clobber(regspec.clobbers)
|
|
|
|
|
for _, j := range regspec.inputs {
|
2016-04-15 12:49:30 -07:00
|
|
|
if countRegs(j.regs) != 1 {
|
|
|
|
|
continue
|
2016-01-18 20:00:15 -08:00
|
|
|
}
|
2016-04-15 12:49:30 -07:00
|
|
|
desired.clobber(j.regs)
|
|
|
|
|
desired.add(v.Args[j.idx].ID, pickReg(j.regs))
|
2016-01-18 20:00:15 -08:00
|
|
|
}
|
2022-03-26 10:05:04 -07:00
|
|
|
if opcodeTable[v.Op].resultInArg0 || v.Op == OpAMD64ADDQconst || v.Op == OpAMD64ADDLconst || v.Op == OpSelect0 {
|
2016-04-15 12:49:30 -07:00
|
|
|
if opcodeTable[v.Op].commutative {
|
|
|
|
|
desired.addList(v.Args[1].ID, prefs)
|
2016-01-18 20:00:15 -08:00
|
|
|
}
|
2016-04-15 12:49:30 -07:00
|
|
|
desired.addList(v.Args[0].ID, prefs)
|
|
|
|
|
}
|
|
|
|
|
// Save desired registers for this value.
|
|
|
|
|
dinfo[i].out = prefs
|
|
|
|
|
for j, a := range v.Args {
|
|
|
|
|
if j >= len(dinfo[i].in) {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
dinfo[i].in[j] = desired.get(a.ID)
|
2016-01-18 20:00:15 -08:00
|
|
|
}
|
2024-11-23 10:58:47 -08:00
|
|
|
if v.Op == OpSelect1 && prefs[0] != noRegister {
|
|
|
|
|
// Save desired registers of select1 for
|
|
|
|
|
// use by the tuple generating instruction.
|
|
|
|
|
desiredSecondReg[v.Args[0].ID] = prefs
|
|
|
|
|
}
|
2016-01-18 20:00:15 -08:00
|
|
|
}
|
|
|
|
|
|
2015-08-11 12:51:33 -07:00
|
|
|
// Process all the non-phi values.
|
2016-04-15 12:49:30 -07:00
|
|
|
for idx, v := range oldSched {
|
2023-07-12 15:31:25 -07:00
|
|
|
s.curIdx = nphi + idx
|
2022-04-05 15:07:29 -07:00
|
|
|
tmpReg := noRegister
|
2016-03-10 17:52:57 -06:00
|
|
|
if s.f.pass.debug > regDebug {
|
2015-12-17 10:01:24 -08:00
|
|
|
fmt.Printf(" processing %s\n", v.LongString())
|
|
|
|
|
}
|
2021-02-13 10:49:37 -05:00
|
|
|
regspec := s.regspec(v)
|
2015-08-11 12:51:33 -07:00
|
|
|
if v.Op == OpPhi {
|
|
|
|
|
f.Fatalf("phi %s not at start of block", v)
|
|
|
|
|
}
|
2024-11-24 15:29:56 -08:00
|
|
|
if opcodeTable[v.Op].fixedReg {
|
|
|
|
|
switch v.Op {
|
|
|
|
|
case OpSP:
|
|
|
|
|
s.assignReg(s.SPReg, v, v)
|
|
|
|
|
s.sp = v.ID
|
|
|
|
|
case OpSB:
|
|
|
|
|
s.assignReg(s.SBReg, v, v)
|
|
|
|
|
s.sb = v.ID
|
|
|
|
|
case OpARM64ZERO:
|
|
|
|
|
s.assignReg(s.ZeroIntReg, v, v)
|
|
|
|
|
default:
|
|
|
|
|
f.Fatalf("unknown fixed-register op %s", v)
|
|
|
|
|
}
|
2015-05-27 14:52:22 -07:00
|
|
|
b.Values = append(b.Values, v)
|
2015-11-05 14:59:47 -08:00
|
|
|
s.advanceUses(v)
|
2015-05-27 14:52:22 -07:00
|
|
|
continue
|
|
|
|
|
}
|
2021-02-04 16:42:35 -05:00
|
|
|
if v.Op == OpSelect0 || v.Op == OpSelect1 || v.Op == OpSelectN {
|
2016-07-13 16:15:54 -07:00
|
|
|
if s.values[v.ID].needReg {
|
2021-02-04 16:42:35 -05:00
|
|
|
if v.Op == OpSelectN {
|
|
|
|
|
s.assignReg(register(s.f.getHome(v.Args[0].ID).(LocResults)[int(v.AuxInt)].(*Register).num), v, v)
|
|
|
|
|
} else {
|
|
|
|
|
var i = 0
|
|
|
|
|
if v.Op == OpSelect1 {
|
|
|
|
|
i = 1
|
|
|
|
|
}
|
|
|
|
|
s.assignReg(register(s.f.getHome(v.Args[0].ID).(LocPair)[i].(*Register).num), v, v)
|
2016-07-13 16:15:54 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
b.Values = append(b.Values, v)
|
|
|
|
|
s.advanceUses(v)
|
2022-11-02 02:07:12 +00:00
|
|
|
continue
|
2016-07-13 16:15:54 -07:00
|
|
|
}
|
2016-05-31 14:01:34 -04:00
|
|
|
if v.Op == OpGetG && s.f.Config.hasGReg {
|
|
|
|
|
// use hardware g register
|
|
|
|
|
if s.regs[s.GReg].v != nil {
|
|
|
|
|
s.freeReg(s.GReg) // kick out the old value
|
|
|
|
|
}
|
|
|
|
|
s.assignReg(s.GReg, v, v)
|
|
|
|
|
b.Values = append(b.Values, v)
|
|
|
|
|
s.advanceUses(v)
|
2022-11-02 02:07:12 +00:00
|
|
|
continue
|
2016-05-31 14:01:34 -04:00
|
|
|
}
|
2015-11-02 08:10:26 -08:00
|
|
|
if v.Op == OpArg {
|
2016-03-01 23:21:55 +00:00
|
|
|
// Args are "pre-spilled" values. We don't allocate
|
|
|
|
|
// any register here. We just set up the spill pointer to
|
2015-11-02 08:10:26 -08:00
|
|
|
// point at itself and any later user will restore it to use it.
|
|
|
|
|
s.values[v.ID].spill = v
|
|
|
|
|
b.Values = append(b.Values, v)
|
2015-11-05 14:59:47 -08:00
|
|
|
s.advanceUses(v)
|
2015-11-02 08:10:26 -08:00
|
|
|
continue
|
|
|
|
|
}
|
2016-04-21 19:28:28 -07:00
|
|
|
if v.Op == OpKeepAlive {
|
|
|
|
|
// Make sure the argument to v is still live here.
|
|
|
|
|
s.advanceUses(v)
|
2017-10-28 10:14:08 -07:00
|
|
|
a := v.Args[0]
|
|
|
|
|
vi := &s.values[a.ID]
|
|
|
|
|
if vi.regs == 0 && !vi.rematerializeable {
|
2016-04-21 19:28:28 -07:00
|
|
|
// Use the spill location.
|
2017-10-28 10:14:08 -07:00
|
|
|
// This forces later liveness analysis to make the
|
|
|
|
|
// value live at this point.
|
|
|
|
|
v.SetArg(0, s.makeSpill(a, b))
|
2020-12-06 18:13:43 -08:00
|
|
|
} else if _, ok := a.Aux.(*ir.Name); ok && vi.rematerializeable {
|
2025-03-26 15:46:30 +01:00
|
|
|
// Rematerializeable value with a *ir.Name. This is the address of
|
2019-02-27 20:43:29 -05:00
|
|
|
// a stack object (e.g. an LEAQ). Keep the object live.
|
|
|
|
|
// Change it to VarLive, which is what plive expects for locals.
|
|
|
|
|
v.Op = OpVarLive
|
|
|
|
|
v.SetArgs1(v.Args[1])
|
|
|
|
|
v.Aux = a.Aux
|
2016-04-21 19:28:28 -07:00
|
|
|
} else {
|
2017-10-28 10:14:08 -07:00
|
|
|
// In-register and rematerializeable values are already live.
|
2016-04-21 19:28:28 -07:00
|
|
|
// These are typically rematerializeable constants like nil,
|
|
|
|
|
// or values of a variable that were modified since the last call.
|
2016-05-18 13:28:48 -07:00
|
|
|
v.Op = OpCopy
|
|
|
|
|
v.SetArgs1(v.Args[1])
|
2016-04-21 19:28:28 -07:00
|
|
|
}
|
2016-05-18 13:28:48 -07:00
|
|
|
b.Values = append(b.Values, v)
|
2016-04-21 19:28:28 -07:00
|
|
|
continue
|
|
|
|
|
}
|
2015-08-11 12:51:33 -07:00
|
|
|
if len(regspec.inputs) == 0 && len(regspec.outputs) == 0 {
|
|
|
|
|
// No register allocation required (or none specified yet)
|
2021-03-17 19:15:38 -04:00
|
|
|
if s.doClobber && v.Op.IsCall() {
|
|
|
|
|
s.clobberRegs(regspec.clobbers)
|
|
|
|
|
}
|
2015-08-11 12:51:33 -07:00
|
|
|
s.freeRegs(regspec.clobbers)
|
|
|
|
|
b.Values = append(b.Values, v)
|
2016-04-21 19:28:28 -07:00
|
|
|
s.advanceUses(v)
|
2015-08-11 12:51:33 -07:00
|
|
|
continue
|
2015-05-05 16:19:12 -07:00
|
|
|
}
|
|
|
|
|
|
2015-12-17 10:01:24 -08:00
|
|
|
if s.values[v.ID].rematerializeable {
|
2015-10-19 10:57:03 -07:00
|
|
|
// Value is rematerializeable, don't issue it here.
|
|
|
|
|
// It will get issued just before each use (see
|
|
|
|
|
// allocValueToReg).
|
2016-03-15 20:45:50 -07:00
|
|
|
for _, a := range v.Args {
|
|
|
|
|
a.Uses--
|
|
|
|
|
}
|
2015-11-05 14:59:47 -08:00
|
|
|
s.advanceUses(v)
|
2015-10-19 10:57:03 -07:00
|
|
|
continue
|
|
|
|
|
}
|
2015-05-05 16:19:12 -07:00
|
|
|
|
2016-04-15 12:49:30 -07:00
|
|
|
if s.f.pass.debug > regDebug {
|
|
|
|
|
fmt.Printf("value %s\n", v.LongString())
|
|
|
|
|
fmt.Printf(" out:")
|
|
|
|
|
for _, r := range dinfo[idx].out {
|
|
|
|
|
if r != noRegister {
|
2017-08-17 12:23:34 -07:00
|
|
|
fmt.Printf(" %s", &s.registers[r])
|
2016-04-15 12:49:30 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
fmt.Println()
|
|
|
|
|
for i := 0; i < len(v.Args) && i < 3; i++ {
|
|
|
|
|
fmt.Printf(" in%d:", i)
|
|
|
|
|
for _, r := range dinfo[idx].in[i] {
|
|
|
|
|
if r != noRegister {
|
2017-08-17 12:23:34 -07:00
|
|
|
fmt.Printf(" %s", &s.registers[r])
|
2016-04-15 12:49:30 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
fmt.Println()
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
cmd/compile: reduce redundant register moves for regabi calls
Currently, if we have AX=a and BX=b, and we want to make a call
F(1, a, b), to move arguments into the desired registers it emits
MOVQ AX, CX
MOVL $1, AX // AX=1
MOVQ BX, DX
MOVQ CX, BX // BX=a
MOVQ DX, CX // CX=b
This has a few redundant moves.
This is because we process inputs in order. First, allocate 1 to
AX, which kicks out a (in AX) to CX (a free register at the
moment). Then, allocate a to BX, which kicks out b (in BX) to DX.
Finally, put b to CX.
Notice that if we start with allocating CX=b, then BX=a, AX=1,
we will not have redundant moves. This CL reduces redundant moves
by allocating them in different order: First, for inpouts that are
already in place, keep them there. Then allocate free registers.
Then everything else.
before after
cmd/compile binary size 23703888 23609680
text size 8565899 8533291
(with regabiargs enabled.)
Change-Id: I69e1bdf745f2c90bb791f6d7c45b37384af1e874
Reviewed-on: https://go-review.googlesource.com/c/go/+/311371
Trust: Cherry Zhang <cherryyz@google.com>
Reviewed-by: David Chase <drchase@google.com>
Reviewed-by: Than McIntosh <thanm@google.com>
2021-04-17 22:50:13 -04:00
|
|
|
// Move arguments to registers.
|
|
|
|
|
// First, if an arg must be in a specific register and it is already
|
|
|
|
|
// in place, keep it.
|
|
|
|
|
args = append(args[:0], make([]*Value, len(v.Args))...)
|
|
|
|
|
for i, a := range v.Args {
|
|
|
|
|
if !s.values[a.ID].needReg {
|
|
|
|
|
args[i] = a
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
for _, i := range regspec.inputs {
|
|
|
|
|
mask := i.regs
|
|
|
|
|
if countRegs(mask) == 1 && mask&s.values[v.Args[i.idx].ID].regs != 0 {
|
|
|
|
|
args[i.idx] = s.allocValToReg(v.Args[i.idx], mask, true, v.Pos)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// Then, if an arg must be in a specific register and that
|
|
|
|
|
// register is free, allocate that one. Otherwise when processing
|
|
|
|
|
// another input we may kick a value into the free register, which
|
|
|
|
|
// then will be kicked out again.
|
|
|
|
|
// This is a common case for passing-in-register arguments for
|
|
|
|
|
// function calls.
|
|
|
|
|
for {
|
|
|
|
|
freed := false
|
|
|
|
|
for _, i := range regspec.inputs {
|
|
|
|
|
if args[i.idx] != nil {
|
|
|
|
|
continue // already allocated
|
|
|
|
|
}
|
|
|
|
|
mask := i.regs
|
|
|
|
|
if countRegs(mask) == 1 && mask&^s.used != 0 {
|
|
|
|
|
args[i.idx] = s.allocValToReg(v.Args[i.idx], mask, true, v.Pos)
|
|
|
|
|
// If the input is in other registers that will be clobbered by v,
|
|
|
|
|
// or the input is dead, free the registers. This may make room
|
|
|
|
|
// for other inputs.
|
|
|
|
|
oldregs := s.values[v.Args[i.idx].ID].regs
|
|
|
|
|
if oldregs&^regspec.clobbers == 0 || !s.liveAfterCurrentInstruction(v.Args[i.idx]) {
|
|
|
|
|
s.freeRegs(oldregs &^ mask &^ s.nospill)
|
|
|
|
|
freed = true
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if !freed {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// Last, allocate remaining ones, in an ordering defined
|
2015-11-05 14:59:47 -08:00
|
|
|
// by the register specification (most constrained first).
|
2015-08-11 12:51:33 -07:00
|
|
|
for _, i := range regspec.inputs {
|
cmd/compile: reduce redundant register moves for regabi calls
Currently, if we have AX=a and BX=b, and we want to make a call
F(1, a, b), to move arguments into the desired registers it emits
MOVQ AX, CX
MOVL $1, AX // AX=1
MOVQ BX, DX
MOVQ CX, BX // BX=a
MOVQ DX, CX // CX=b
This has a few redundant moves.
This is because we process inputs in order. First, allocate 1 to
AX, which kicks out a (in AX) to CX (a free register at the
moment). Then, allocate a to BX, which kicks out b (in BX) to DX.
Finally, put b to CX.
Notice that if we start with allocating CX=b, then BX=a, AX=1,
we will not have redundant moves. This CL reduces redundant moves
by allocating them in different order: First, for inpouts that are
already in place, keep them there. Then allocate free registers.
Then everything else.
before after
cmd/compile binary size 23703888 23609680
text size 8565899 8533291
(with regabiargs enabled.)
Change-Id: I69e1bdf745f2c90bb791f6d7c45b37384af1e874
Reviewed-on: https://go-review.googlesource.com/c/go/+/311371
Trust: Cherry Zhang <cherryyz@google.com>
Reviewed-by: David Chase <drchase@google.com>
Reviewed-by: Than McIntosh <thanm@google.com>
2021-04-17 22:50:13 -04:00
|
|
|
if args[i.idx] != nil {
|
|
|
|
|
continue // already allocated
|
|
|
|
|
}
|
2016-04-15 12:49:30 -07:00
|
|
|
mask := i.regs
|
cmd/compile: reduce redundant register moves for regabi calls
Currently, if we have AX=a and BX=b, and we want to make a call
F(1, a, b), to move arguments into the desired registers it emits
MOVQ AX, CX
MOVL $1, AX // AX=1
MOVQ BX, DX
MOVQ CX, BX // BX=a
MOVQ DX, CX // CX=b
This has a few redundant moves.
This is because we process inputs in order. First, allocate 1 to
AX, which kicks out a (in AX) to CX (a free register at the
moment). Then, allocate a to BX, which kicks out b (in BX) to DX.
Finally, put b to CX.
Notice that if we start with allocating CX=b, then BX=a, AX=1,
we will not have redundant moves. This CL reduces redundant moves
by allocating them in different order: First, for inpouts that are
already in place, keep them there. Then allocate free registers.
Then everything else.
before after
cmd/compile binary size 23703888 23609680
text size 8565899 8533291
(with regabiargs enabled.)
Change-Id: I69e1bdf745f2c90bb791f6d7c45b37384af1e874
Reviewed-on: https://go-review.googlesource.com/c/go/+/311371
Trust: Cherry Zhang <cherryyz@google.com>
Reviewed-by: David Chase <drchase@google.com>
Reviewed-by: Than McIntosh <thanm@google.com>
2021-04-17 22:50:13 -04:00
|
|
|
if mask&s.values[v.Args[i.idx].ID].regs == 0 {
|
2016-04-15 12:49:30 -07:00
|
|
|
// Need a new register for the input.
|
|
|
|
|
mask &= s.allocatable
|
|
|
|
|
mask &^= s.nospill
|
|
|
|
|
// Used desired register if available.
|
|
|
|
|
if i.idx < 3 {
|
|
|
|
|
for _, r := range dinfo[idx].in[i.idx] {
|
|
|
|
|
if r != noRegister && (mask&^s.used)>>r&1 != 0 {
|
|
|
|
|
// Desired register is allowed and unused.
|
|
|
|
|
mask = regMask(1) << r
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// Avoid registers we're saving for other values.
|
|
|
|
|
if mask&^desired.avoid != 0 {
|
|
|
|
|
mask &^= desired.avoid
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-08-01 07:38:55 -07:00
|
|
|
if mask&s.values[v.Args[i.idx].ID].regs&(1<<s.SPReg) != 0 {
|
|
|
|
|
// Prefer SP register. This ensures that local variables
|
|
|
|
|
// use SP as their base register (instead of a copy of the
|
|
|
|
|
// stack pointer living in another register). See issue 74836.
|
|
|
|
|
mask = 1 << s.SPReg
|
|
|
|
|
}
|
cmd/compile: reduce redundant register moves for regabi calls
Currently, if we have AX=a and BX=b, and we want to make a call
F(1, a, b), to move arguments into the desired registers it emits
MOVQ AX, CX
MOVL $1, AX // AX=1
MOVQ BX, DX
MOVQ CX, BX // BX=a
MOVQ DX, CX // CX=b
This has a few redundant moves.
This is because we process inputs in order. First, allocate 1 to
AX, which kicks out a (in AX) to CX (a free register at the
moment). Then, allocate a to BX, which kicks out b (in BX) to DX.
Finally, put b to CX.
Notice that if we start with allocating CX=b, then BX=a, AX=1,
we will not have redundant moves. This CL reduces redundant moves
by allocating them in different order: First, for inpouts that are
already in place, keep them there. Then allocate free registers.
Then everything else.
before after
cmd/compile binary size 23703888 23609680
text size 8565899 8533291
(with regabiargs enabled.)
Change-Id: I69e1bdf745f2c90bb791f6d7c45b37384af1e874
Reviewed-on: https://go-review.googlesource.com/c/go/+/311371
Trust: Cherry Zhang <cherryyz@google.com>
Reviewed-by: David Chase <drchase@google.com>
Reviewed-by: Than McIntosh <thanm@google.com>
2021-04-17 22:50:13 -04:00
|
|
|
args[i.idx] = s.allocValToReg(v.Args[i.idx], mask, true, v.Pos)
|
2015-08-11 12:51:33 -07:00
|
|
|
}
|
2015-05-05 16:19:12 -07:00
|
|
|
|
2016-04-15 12:49:30 -07:00
|
|
|
// If the output clobbers the input register, make sure we have
|
|
|
|
|
// at least two copies of the input register so we don't
|
|
|
|
|
// have to reload the value from the spill location.
|
|
|
|
|
if opcodeTable[v.Op].resultInArg0 {
|
|
|
|
|
var m regMask
|
|
|
|
|
if !s.liveAfterCurrentInstruction(v.Args[0]) {
|
|
|
|
|
// arg0 is dead. We can clobber its register.
|
|
|
|
|
goto ok
|
|
|
|
|
}
|
2019-10-06 23:03:28 -07:00
|
|
|
if opcodeTable[v.Op].commutative && !s.liveAfterCurrentInstruction(v.Args[1]) {
|
|
|
|
|
args[0], args[1] = args[1], args[0]
|
|
|
|
|
goto ok
|
|
|
|
|
}
|
2016-10-05 14:35:47 -07:00
|
|
|
if s.values[v.Args[0].ID].rematerializeable {
|
|
|
|
|
// We can rematerialize the input, don't worry about clobbering it.
|
|
|
|
|
goto ok
|
|
|
|
|
}
|
2019-10-06 23:03:28 -07:00
|
|
|
if opcodeTable[v.Op].commutative && s.values[v.Args[1].ID].rematerializeable {
|
|
|
|
|
args[0], args[1] = args[1], args[0]
|
|
|
|
|
goto ok
|
|
|
|
|
}
|
2016-04-15 12:49:30 -07:00
|
|
|
if countRegs(s.values[v.Args[0].ID].regs) >= 2 {
|
|
|
|
|
// we have at least 2 copies of arg0. We can afford to clobber one.
|
|
|
|
|
goto ok
|
|
|
|
|
}
|
2019-10-06 23:03:28 -07:00
|
|
|
if opcodeTable[v.Op].commutative && countRegs(s.values[v.Args[1].ID].regs) >= 2 {
|
|
|
|
|
args[0], args[1] = args[1], args[0]
|
|
|
|
|
goto ok
|
2016-04-15 12:49:30 -07:00
|
|
|
}
|
2016-04-10 08:26:43 -07:00
|
|
|
|
2016-04-15 12:49:30 -07:00
|
|
|
// We can't overwrite arg0 (or arg1, if commutative). So we
|
|
|
|
|
// need to make a copy of an input so we have a register we can modify.
|
|
|
|
|
|
|
|
|
|
// Possible new registers to copy into.
|
|
|
|
|
m = s.compatRegs(v.Args[0].Type) &^ s.used
|
|
|
|
|
if m == 0 {
|
|
|
|
|
// No free registers. In this case we'll just clobber
|
|
|
|
|
// an input and future uses of that input must use a restore.
|
|
|
|
|
// TODO(khr): We should really do this like allocReg does it,
|
|
|
|
|
// spilling the value with the most distant next use.
|
|
|
|
|
goto ok
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-28 11:00:15 -04:00
|
|
|
// Try to move an input to the desired output, if allowed.
|
2016-04-15 12:49:30 -07:00
|
|
|
for _, r := range dinfo[idx].out {
|
2021-04-28 11:00:15 -04:00
|
|
|
if r != noRegister && (m®spec.outputs[0].regs)>>r&1 != 0 {
|
2016-04-15 12:49:30 -07:00
|
|
|
m = regMask(1) << r
|
2016-12-07 18:14:35 -08:00
|
|
|
args[0] = s.allocValToReg(v.Args[0], m, true, v.Pos)
|
2016-04-15 12:49:30 -07:00
|
|
|
// Note: we update args[0] so the instruction will
|
|
|
|
|
// use the register copy we just made.
|
|
|
|
|
goto ok
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// Try to copy input to its desired location & use its old
|
|
|
|
|
// location as the result register.
|
|
|
|
|
for _, r := range dinfo[idx].in[0] {
|
|
|
|
|
if r != noRegister && m>>r&1 != 0 {
|
|
|
|
|
m = regMask(1) << r
|
2016-12-07 18:14:35 -08:00
|
|
|
c := s.allocValToReg(v.Args[0], m, true, v.Pos)
|
2016-09-23 09:15:51 -04:00
|
|
|
s.copies[c] = false
|
2016-04-15 12:49:30 -07:00
|
|
|
// Note: no update to args[0] so the instruction will
|
|
|
|
|
// use the original copy.
|
|
|
|
|
goto ok
|
|
|
|
|
}
|
2016-04-10 08:26:43 -07:00
|
|
|
}
|
2016-04-15 12:49:30 -07:00
|
|
|
if opcodeTable[v.Op].commutative {
|
|
|
|
|
for _, r := range dinfo[idx].in[1] {
|
|
|
|
|
if r != noRegister && m>>r&1 != 0 {
|
|
|
|
|
m = regMask(1) << r
|
2016-12-07 18:14:35 -08:00
|
|
|
c := s.allocValToReg(v.Args[1], m, true, v.Pos)
|
2016-09-23 09:15:51 -04:00
|
|
|
s.copies[c] = false
|
2016-04-15 12:49:30 -07:00
|
|
|
args[0], args[1] = args[1], args[0]
|
|
|
|
|
goto ok
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2023-01-05 13:02:44 -08:00
|
|
|
|
2016-04-15 12:49:30 -07:00
|
|
|
// Avoid future fixed uses if we can.
|
|
|
|
|
if m&^desired.avoid != 0 {
|
|
|
|
|
m &^= desired.avoid
|
|
|
|
|
}
|
|
|
|
|
// Save input 0 to a new register so we can clobber it.
|
2016-12-07 18:14:35 -08:00
|
|
|
c := s.allocValToReg(v.Args[0], m, true, v.Pos)
|
2016-09-23 09:15:51 -04:00
|
|
|
s.copies[c] = false
|
2023-01-05 13:02:44 -08:00
|
|
|
|
|
|
|
|
// Normally we use the register of the old copy of input 0 as the target.
|
|
|
|
|
// However, if input 0 is already in its desired register then we use
|
|
|
|
|
// the register of the new copy instead.
|
|
|
|
|
if regspec.outputs[0].regs>>s.f.getHome(c.ID).(*Register).num&1 != 0 {
|
|
|
|
|
if rp, ok := s.f.getHome(args[0].ID).(*Register); ok {
|
|
|
|
|
r := register(rp.num)
|
|
|
|
|
for _, r2 := range dinfo[idx].in[0] {
|
|
|
|
|
if r == r2 {
|
|
|
|
|
args[0] = c
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-04-10 08:26:43 -07:00
|
|
|
}
|
2016-09-23 09:15:51 -04:00
|
|
|
ok:
|
2025-06-09 16:44:46 -07:00
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
|
if !(i == 0 && regspec.clobbersArg0 || i == 1 && regspec.clobbersArg1) {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
if !s.liveAfterCurrentInstruction(v.Args[i]) {
|
|
|
|
|
// arg is dead. We can clobber its register.
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
if s.values[v.Args[i].ID].rematerializeable {
|
|
|
|
|
// We can rematerialize the input, don't worry about clobbering it.
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
if countRegs(s.values[v.Args[i].ID].regs) >= 2 {
|
|
|
|
|
// We have at least 2 copies of arg. We can afford to clobber one.
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
// Possible new registers to copy into.
|
|
|
|
|
m := s.compatRegs(v.Args[i].Type) &^ s.used
|
|
|
|
|
if m == 0 {
|
|
|
|
|
// No free registers. In this case we'll just clobber the
|
|
|
|
|
// input and future uses of that input must use a restore.
|
|
|
|
|
// TODO(khr): We should really do this like allocReg does it,
|
|
|
|
|
// spilling the value with the most distant next use.
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
// Copy input to a new clobberable register.
|
|
|
|
|
c := s.allocValToReg(v.Args[i], m, true, v.Pos)
|
|
|
|
|
s.copies[c] = false
|
|
|
|
|
args[i] = c
|
|
|
|
|
}
|
|
|
|
|
|
2022-04-05 15:07:29 -07:00
|
|
|
// Pick a temporary register if needed.
|
|
|
|
|
// It should be distinct from all the input registers, so we
|
|
|
|
|
// allocate it after all the input registers, but before
|
|
|
|
|
// the input registers are freed via advanceUses below.
|
|
|
|
|
// (Not all instructions need that distinct part, but it is conservative.)
|
2024-06-25 14:56:11 -07:00
|
|
|
// We also ensure it is not any of the single-choice output registers.
|
2022-04-05 15:07:29 -07:00
|
|
|
if opcodeTable[v.Op].needIntTemp {
|
|
|
|
|
m := s.allocatable & s.f.Config.gpRegMask
|
2024-06-25 14:56:11 -07:00
|
|
|
for _, out := range regspec.outputs {
|
|
|
|
|
if countRegs(out.regs) == 1 {
|
|
|
|
|
m &^= out.regs
|
|
|
|
|
}
|
|
|
|
|
}
|
2023-01-17 09:35:21 -08:00
|
|
|
if m&^desired.avoid&^s.nospill != 0 {
|
2022-04-05 15:07:29 -07:00
|
|
|
m &^= desired.avoid
|
|
|
|
|
}
|
|
|
|
|
tmpReg = s.allocReg(m, &tmpVal)
|
|
|
|
|
s.nospill |= regMask(1) << tmpReg
|
2025-02-23 10:34:00 -08:00
|
|
|
s.tmpused |= regMask(1) << tmpReg
|
2022-04-05 15:07:29 -07:00
|
|
|
}
|
|
|
|
|
|
2025-06-09 16:44:46 -07:00
|
|
|
if regspec.clobbersArg0 {
|
|
|
|
|
s.freeReg(register(s.f.getHome(args[0].ID).(*Register).num))
|
|
|
|
|
}
|
|
|
|
|
if regspec.clobbersArg1 {
|
|
|
|
|
s.freeReg(register(s.f.getHome(args[1].ID).(*Register).num))
|
|
|
|
|
}
|
|
|
|
|
|
2015-08-11 12:51:33 -07:00
|
|
|
// Now that all args are in regs, we're ready to issue the value itself.
|
2015-11-05 14:59:47 -08:00
|
|
|
// Before we pick a register for the output value, allow input registers
|
2015-08-11 12:51:33 -07:00
|
|
|
// to be deallocated. We do this here so that the output can use the
|
|
|
|
|
// same register as a dying input.
|
2016-08-29 16:26:57 -04:00
|
|
|
if !opcodeTable[v.Op].resultNotInArgs {
|
2016-09-23 09:15:51 -04:00
|
|
|
s.tmpused = s.nospill
|
2016-08-29 16:26:57 -04:00
|
|
|
s.nospill = 0
|
|
|
|
|
s.advanceUses(v) // frees any registers holding args that are no longer live
|
|
|
|
|
}
|
2015-08-11 12:51:33 -07:00
|
|
|
|
|
|
|
|
// Dump any registers which will be clobbered
|
2021-03-17 19:15:38 -04:00
|
|
|
if s.doClobber && v.Op.IsCall() {
|
|
|
|
|
// clobber registers that are marked as clobber in regmask, but
|
|
|
|
|
// don't clobber inputs.
|
|
|
|
|
s.clobberRegs(regspec.clobbers &^ s.tmpused &^ s.nospill)
|
|
|
|
|
}
|
2015-08-11 12:51:33 -07:00
|
|
|
s.freeRegs(regspec.clobbers)
|
2016-09-23 09:15:51 -04:00
|
|
|
s.tmpused |= regspec.clobbers
|
2015-08-11 12:51:33 -07:00
|
|
|
|
2016-07-13 16:15:54 -07:00
|
|
|
// Pick registers for outputs.
|
|
|
|
|
{
|
2021-02-18 15:50:37 -05:00
|
|
|
outRegs := noRegisters // TODO if this is costly, hoist and clear incrementally below.
|
|
|
|
|
maxOutIdx := -1
|
2016-07-13 16:15:54 -07:00
|
|
|
var used regMask
|
2022-04-05 15:07:29 -07:00
|
|
|
if tmpReg != noRegister {
|
|
|
|
|
// Ensure output registers are distinct from the temporary register.
|
|
|
|
|
// (Not all instructions need that distinct part, but it is conservative.)
|
|
|
|
|
used |= regMask(1) << tmpReg
|
|
|
|
|
}
|
2016-07-13 16:15:54 -07:00
|
|
|
for _, out := range regspec.outputs {
|
2024-06-25 14:56:11 -07:00
|
|
|
if out.regs == 0 {
|
|
|
|
|
continue
|
|
|
|
|
}
|
2016-07-13 16:15:54 -07:00
|
|
|
mask := out.regs & s.allocatable &^ used
|
|
|
|
|
if mask == 0 {
|
2024-06-25 14:56:11 -07:00
|
|
|
s.f.Fatalf("can't find any output register %s", v.LongString())
|
2016-07-13 16:15:54 -07:00
|
|
|
}
|
2016-08-23 16:49:28 -07:00
|
|
|
if opcodeTable[v.Op].resultInArg0 && out.idx == 0 {
|
2016-07-13 16:15:54 -07:00
|
|
|
if !opcodeTable[v.Op].commutative {
|
|
|
|
|
// Output must use the same register as input 0.
|
2016-09-16 09:36:00 -07:00
|
|
|
r := register(s.f.getHome(args[0].ID).(*Register).num)
|
2021-04-28 11:00:15 -04:00
|
|
|
if mask>>r&1 == 0 {
|
|
|
|
|
s.f.Fatalf("resultInArg0 value's input %v cannot be an output of %s", s.f.getHome(args[0].ID).(*Register), v.LongString())
|
|
|
|
|
}
|
2016-07-13 16:15:54 -07:00
|
|
|
mask = regMask(1) << r
|
|
|
|
|
} else {
|
|
|
|
|
// Output must use the same register as input 0 or 1.
|
2016-09-16 09:36:00 -07:00
|
|
|
r0 := register(s.f.getHome(args[0].ID).(*Register).num)
|
|
|
|
|
r1 := register(s.f.getHome(args[1].ID).(*Register).num)
|
2016-07-13 16:15:54 -07:00
|
|
|
// Check r0 and r1 for desired output register.
|
|
|
|
|
found := false
|
|
|
|
|
for _, r := range dinfo[idx].out {
|
|
|
|
|
if (r == r0 || r == r1) && (mask&^s.used)>>r&1 != 0 {
|
|
|
|
|
mask = regMask(1) << r
|
|
|
|
|
found = true
|
|
|
|
|
if r == r1 {
|
|
|
|
|
args[0], args[1] = args[1], args[0]
|
|
|
|
|
}
|
|
|
|
|
break
|
2016-04-15 12:49:30 -07:00
|
|
|
}
|
2016-07-13 16:15:54 -07:00
|
|
|
}
|
|
|
|
|
if !found {
|
|
|
|
|
// Neither are desired, pick r0.
|
|
|
|
|
mask = regMask(1) << r0
|
2016-04-15 12:49:30 -07:00
|
|
|
}
|
|
|
|
|
}
|
2016-07-13 16:15:54 -07:00
|
|
|
}
|
2022-03-26 10:05:04 -07:00
|
|
|
if out.idx == 0 { // desired registers only apply to the first element of a tuple result
|
|
|
|
|
for _, r := range dinfo[idx].out {
|
|
|
|
|
if r != noRegister && (mask&^s.used)>>r&1 != 0 {
|
|
|
|
|
// Desired register is allowed and unused.
|
|
|
|
|
mask = regMask(1) << r
|
|
|
|
|
break
|
|
|
|
|
}
|
2016-04-15 12:49:30 -07:00
|
|
|
}
|
|
|
|
|
}
|
2024-11-23 10:58:47 -08:00
|
|
|
if out.idx == 1 {
|
|
|
|
|
if prefs, ok := desiredSecondReg[v.ID]; ok {
|
|
|
|
|
for _, r := range prefs {
|
|
|
|
|
if r != noRegister && (mask&^s.used)>>r&1 != 0 {
|
|
|
|
|
// Desired register is allowed and unused.
|
|
|
|
|
mask = regMask(1) << r
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-07-13 16:15:54 -07:00
|
|
|
// Avoid registers we're saving for other values.
|
2023-03-28 09:41:46 -07:00
|
|
|
if mask&^desired.avoid&^s.nospill&^s.used != 0 {
|
2016-07-13 16:15:54 -07:00
|
|
|
mask &^= desired.avoid
|
2016-04-15 12:49:30 -07:00
|
|
|
}
|
2016-08-02 13:17:09 -07:00
|
|
|
r := s.allocReg(mask, v)
|
2021-02-18 15:50:37 -05:00
|
|
|
if out.idx > maxOutIdx {
|
|
|
|
|
maxOutIdx = out.idx
|
|
|
|
|
}
|
2016-07-13 16:15:54 -07:00
|
|
|
outRegs[out.idx] = r
|
|
|
|
|
used |= regMask(1) << r
|
2016-09-23 09:15:51 -04:00
|
|
|
s.tmpused |= regMask(1) << r
|
2016-04-15 12:49:30 -07:00
|
|
|
}
|
2016-07-13 16:15:54 -07:00
|
|
|
// Record register choices
|
|
|
|
|
if v.Type.IsTuple() {
|
|
|
|
|
var outLocs LocPair
|
|
|
|
|
if r := outRegs[0]; r != noRegister {
|
|
|
|
|
outLocs[0] = &s.registers[r]
|
|
|
|
|
}
|
|
|
|
|
if r := outRegs[1]; r != noRegister {
|
|
|
|
|
outLocs[1] = &s.registers[r]
|
|
|
|
|
}
|
|
|
|
|
s.f.setHome(v, outLocs)
|
|
|
|
|
// Note that subsequent SelectX instructions will do the assignReg calls.
|
2021-02-17 10:38:03 -05:00
|
|
|
} else if v.Type.IsResults() {
|
2021-02-18 15:50:37 -05:00
|
|
|
// preallocate outLocs to the right size, which is maxOutIdx+1
|
|
|
|
|
outLocs := make(LocResults, maxOutIdx+1, maxOutIdx+1)
|
|
|
|
|
for i := 0; i <= maxOutIdx; i++ {
|
|
|
|
|
if r := outRegs[i]; r != noRegister {
|
|
|
|
|
outLocs[i] = &s.registers[r]
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
s.f.setHome(v, outLocs)
|
2016-07-13 16:15:54 -07:00
|
|
|
} else {
|
|
|
|
|
if r := outRegs[0]; r != noRegister {
|
|
|
|
|
s.assignReg(r, v, v)
|
|
|
|
|
}
|
2016-03-10 13:05:56 -08:00
|
|
|
}
|
2022-04-05 15:07:29 -07:00
|
|
|
if tmpReg != noRegister {
|
|
|
|
|
// Remember the temp register allocation, if any.
|
|
|
|
|
if s.f.tempRegs == nil {
|
|
|
|
|
s.f.tempRegs = map[ID]*Register{}
|
|
|
|
|
}
|
|
|
|
|
s.f.tempRegs[v.ID] = &s.registers[tmpReg]
|
|
|
|
|
}
|
2015-08-11 12:51:33 -07:00
|
|
|
}
|
2015-05-05 16:19:12 -07:00
|
|
|
|
2016-08-29 16:26:57 -04:00
|
|
|
// deallocate dead args, if we have not done so
|
|
|
|
|
if opcodeTable[v.Op].resultNotInArgs {
|
|
|
|
|
s.nospill = 0
|
|
|
|
|
s.advanceUses(v) // frees any registers holding args that are no longer live
|
|
|
|
|
}
|
2016-09-23 09:15:51 -04:00
|
|
|
s.tmpused = 0
|
2016-08-29 16:26:57 -04:00
|
|
|
|
2015-08-11 12:51:33 -07:00
|
|
|
// Issue the Value itself.
|
2015-11-05 14:59:47 -08:00
|
|
|
for i, a := range args {
|
2016-03-15 20:45:50 -07:00
|
|
|
v.SetArg(i, a) // use register version of arguments
|
2015-11-05 14:59:47 -08:00
|
|
|
}
|
2015-08-11 12:51:33 -07:00
|
|
|
b.Values = append(b.Values, v)
|
2023-07-12 15:31:25 -07:00
|
|
|
s.dropIfUnused(v)
|
2015-08-11 12:51:33 -07:00
|
|
|
}
|
2015-05-05 16:19:12 -07:00
|
|
|
|
2019-08-12 20:19:58 +01:00
|
|
|
// Copy the control values - we need this so we can reduce the
|
|
|
|
|
// uses property of these values later.
|
|
|
|
|
controls := append(make([]*Value, 0, 2), b.ControlValues()...)
|
|
|
|
|
|
|
|
|
|
// Load control values into registers.
|
|
|
|
|
for i, v := range b.ControlValues() {
|
|
|
|
|
if !s.values[v.ID].needReg {
|
|
|
|
|
continue
|
|
|
|
|
}
|
2016-03-10 17:52:57 -06:00
|
|
|
if s.f.pass.debug > regDebug {
|
2015-12-17 10:01:24 -08:00
|
|
|
fmt.Printf(" processing control %s\n", v.LongString())
|
|
|
|
|
}
|
2016-07-13 16:15:54 -07:00
|
|
|
// We assume that a control input can be passed in any
|
|
|
|
|
// type-compatible register. If this turns out not to be true,
|
|
|
|
|
// we'll need to introduce a regspec for a block's control value.
|
2019-08-12 20:19:58 +01:00
|
|
|
b.ReplaceControl(i, s.allocValToReg(v, s.compatRegs(v.Type), false, b.Pos))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Reduce the uses of the control values once registers have been loaded.
|
|
|
|
|
// This loop is equivalent to the advanceUses method.
|
|
|
|
|
for _, v := range controls {
|
|
|
|
|
vi := &s.values[v.ID]
|
|
|
|
|
if !vi.needReg {
|
|
|
|
|
continue
|
2016-09-23 09:15:51 -04:00
|
|
|
}
|
2015-11-05 14:59:47 -08:00
|
|
|
// Remove this use from the uses list.
|
2015-12-17 10:01:24 -08:00
|
|
|
u := vi.uses
|
|
|
|
|
vi.uses = u.next
|
|
|
|
|
if u.next == nil {
|
|
|
|
|
s.freeRegs(vi.regs) // value is dead
|
|
|
|
|
}
|
2015-11-05 14:59:47 -08:00
|
|
|
u.next = s.freeUseRecords
|
|
|
|
|
s.freeUseRecords = u
|
2015-08-11 12:51:33 -07:00
|
|
|
}
|
2015-05-05 16:19:12 -07:00
|
|
|
|
2016-03-02 15:18:40 -08:00
|
|
|
// If we are approaching a merge point and we are the primary
|
|
|
|
|
// predecessor of it, find live values that we use soon after
|
|
|
|
|
// the merge point and promote them to registers now.
|
2016-03-10 14:42:52 -05:00
|
|
|
if len(b.Succs) == 1 {
|
2018-05-25 16:08:13 -04:00
|
|
|
if s.f.Config.hasGReg && s.regs[s.GReg].v != nil {
|
|
|
|
|
s.freeReg(s.GReg) // Spill value in G register before any merge.
|
|
|
|
|
}
|
2025-05-13 17:53:45 -07:00
|
|
|
if s.blockOrder[b.ID] > s.blockOrder[b.Succs[0].b.ID] {
|
|
|
|
|
// No point if we've already regalloc'd the destination.
|
|
|
|
|
goto badloop
|
|
|
|
|
}
|
2016-03-02 15:18:40 -08:00
|
|
|
// For this to be worthwhile, the loop must have no calls in it.
|
2016-04-28 16:52:47 -07:00
|
|
|
top := b.Succs[0].b
|
2016-03-10 14:42:52 -05:00
|
|
|
loop := s.loopnest.b2l[top.ID]
|
2017-12-14 13:27:11 -06:00
|
|
|
if loop == nil || loop.header != top || loop.containsUnavoidableCall {
|
2016-03-10 14:42:52 -05:00
|
|
|
goto badloop
|
2016-03-02 15:18:40 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// TODO: sort by distance, pick the closest ones?
|
|
|
|
|
for _, live := range s.live[b.ID] {
|
|
|
|
|
if live.dist >= unlikelyDistance {
|
|
|
|
|
// Don't preload anything live after the loop.
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
vid := live.ID
|
|
|
|
|
vi := &s.values[vid]
|
|
|
|
|
if vi.regs != 0 {
|
|
|
|
|
continue
|
|
|
|
|
}
|
2016-10-05 14:35:47 -07:00
|
|
|
if vi.rematerializeable {
|
|
|
|
|
continue
|
|
|
|
|
}
|
2016-03-02 15:18:40 -08:00
|
|
|
v := s.orig[vid]
|
|
|
|
|
m := s.compatRegs(v.Type) &^ s.used
|
2020-10-19 03:57:15 +00:00
|
|
|
// Used desired register if available.
|
|
|
|
|
outerloop:
|
|
|
|
|
for _, e := range desired.entries {
|
|
|
|
|
if e.ID != v.ID {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
for _, r := range e.regs {
|
|
|
|
|
if r != noRegister && m>>r&1 != 0 {
|
|
|
|
|
m = regMask(1) << r
|
|
|
|
|
break outerloop
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-04-15 12:49:30 -07:00
|
|
|
if m&^desired.avoid != 0 {
|
|
|
|
|
m &^= desired.avoid
|
|
|
|
|
}
|
2016-03-02 15:18:40 -08:00
|
|
|
if m != 0 {
|
2016-12-07 18:14:35 -08:00
|
|
|
s.allocValToReg(v, m, false, b.Pos)
|
2016-03-02 15:18:40 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
badloop:
|
|
|
|
|
;
|
|
|
|
|
|
2015-12-17 10:01:24 -08:00
|
|
|
// Save end-of-block register state.
|
2016-02-10 17:43:31 -05:00
|
|
|
// First count how many, this cuts allocations in half.
|
|
|
|
|
k := 0
|
2016-03-21 22:57:26 -07:00
|
|
|
for r := register(0); r < s.numRegs; r++ {
|
2016-02-10 17:43:31 -05:00
|
|
|
v := s.regs[r].v
|
|
|
|
|
if v == nil {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
k++
|
|
|
|
|
}
|
|
|
|
|
regList := make([]endReg, 0, k)
|
2016-03-21 22:57:26 -07:00
|
|
|
for r := register(0); r < s.numRegs; r++ {
|
2015-12-17 10:01:24 -08:00
|
|
|
v := s.regs[r].v
|
|
|
|
|
if v == nil {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
regList = append(regList, endReg{r, v, s.regs[r].c})
|
|
|
|
|
}
|
|
|
|
|
s.endRegs[b.ID] = regList
|
|
|
|
|
|
2016-10-07 09:35:04 -07:00
|
|
|
if checkEnabled {
|
2017-03-22 21:34:12 -04:00
|
|
|
regValLiveSet.clear()
|
2015-12-17 10:01:24 -08:00
|
|
|
for _, x := range s.live[b.ID] {
|
2017-03-22 21:34:12 -04:00
|
|
|
regValLiveSet.add(x.ID)
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
2016-03-21 22:57:26 -07:00
|
|
|
for r := register(0); r < s.numRegs; r++ {
|
2015-12-17 10:01:24 -08:00
|
|
|
v := s.regs[r].v
|
|
|
|
|
if v == nil {
|
|
|
|
|
continue
|
|
|
|
|
}
|
2017-03-22 21:34:12 -04:00
|
|
|
if !regValLiveSet.contains(v.ID) {
|
2015-12-17 10:01:24 -08:00
|
|
|
s.f.Fatalf("val %s is in reg but not live at end of %s", v, b)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If a value is live at the end of the block and
|
2017-03-07 14:45:46 -05:00
|
|
|
// isn't in a register, generate a use for the spill location.
|
|
|
|
|
// We need to remember this information so that
|
2016-01-18 20:00:15 -08:00
|
|
|
// the liveness analysis in stackalloc is correct.
|
2015-12-17 10:01:24 -08:00
|
|
|
for _, e := range s.live[b.ID] {
|
2017-03-07 14:45:46 -05:00
|
|
|
vi := &s.values[e.ID]
|
|
|
|
|
if vi.regs != 0 {
|
2015-12-17 10:01:24 -08:00
|
|
|
// in a register, we'll use that source for the merge.
|
|
|
|
|
continue
|
|
|
|
|
}
|
2017-03-07 14:45:46 -05:00
|
|
|
if vi.rematerializeable {
|
|
|
|
|
// we'll rematerialize during the merge.
|
2015-12-17 10:01:24 -08:00
|
|
|
continue
|
|
|
|
|
}
|
2020-10-19 03:57:15 +00:00
|
|
|
if s.f.pass.debug > regDebug {
|
|
|
|
|
fmt.Printf("live-at-end spill for %s at %s\n", s.orig[e.ID], b)
|
|
|
|
|
}
|
2017-03-07 14:45:46 -05:00
|
|
|
spill := s.makeSpill(s.orig[e.ID], b)
|
2015-12-17 10:01:24 -08:00
|
|
|
s.spillLive[b.ID] = append(s.spillLive[b.ID], spill.ID)
|
2017-03-07 14:45:46 -05:00
|
|
|
}
|
2016-03-21 11:32:04 -04:00
|
|
|
|
2015-11-05 14:59:47 -08:00
|
|
|
// Clear any final uses.
|
|
|
|
|
// All that is left should be the pseudo-uses added for values which
|
|
|
|
|
// are live at the end of b.
|
|
|
|
|
for _, e := range s.live[b.ID] {
|
|
|
|
|
u := s.values[e.ID].uses
|
|
|
|
|
if u == nil {
|
|
|
|
|
f.Fatalf("live at end, no uses v%d", e.ID)
|
|
|
|
|
}
|
|
|
|
|
if u.next != nil {
|
|
|
|
|
f.Fatalf("live at end, too many uses v%d", e.ID)
|
|
|
|
|
}
|
|
|
|
|
s.values[e.ID].uses = nil
|
|
|
|
|
u.next = s.freeUseRecords
|
|
|
|
|
s.freeUseRecords = u
|
|
|
|
|
}
|
cmd/compile/internal/ssa: drop overwritten regalloc basic block input requirements
For the following description, consider the following basic block graph:
b1 ───┐┌──── b2
││
││
▼▼
b3
For register allocator transitions between basic blocks, there are two
key passes (significant paraphrasing):
First, each basic block is visited in some predetermined visit order.
This is the core visitOrder range loop in regAllocState.regalloc. The
specific ordering heuristics aren't important here, except that the
order guarantees that when visiting a basic block at least one of its
predecessors has already been visited.
Upon visiting a basic block, that block sets its expected starting
register state (regAllocState.startRegs) based on the ending register
state (regAlloc.State.endRegs) of one of its predecessors. (How it
chooses which predecessor to use is not important here.)
From that starting state, registers are assigned for all values in the
block, ultimately resulting in some ending register state.
After all blocks have been visited, the shuffle pass
(regAllocState.shuffle) ensures that for each edge, endRegs of the
predecessor == startRegs of the successor. That is, it makes sure that
the startRegs assumptions actually hold true for each edge. It does this
by adding moves to the end of the predecessor block to place values in
the expected register for the successor block. These may be moves from
other registers, or from memory if the value is spilled.
Now on to the actual problem:
Assume that b1 places some value v1 into register R10, and thus ends
with endRegs containing R10 = v1.
When b3 is visited, it selects b1 as its model predecessor and sets
startRegs with R10 = v1.
b2 does not have v1 in R10, so later in the shuffle pass, we will add a
move of v1 into R10 to the end of b2 to ensure it is available for b3.
This is all perfectly fine and exactly how things should work.
Now suppose that b3 does not use v1. It does need to use some other
value v2, which is not currently in a register. When assigning v2 to a
register, it finds all registers are already in use and it needs to dump
a value. Ultimately, it decides to dump v1 from R10 and replace it with
v2.
This is fine, but it has downstream effects on shuffle in b2. b3's
startRegs still state that R10 = v1, so b2 will add a move to R10 even
though b3 will unconditionally overwrite it. i.e., the move at the end
of b2 is completely useless and can result in code like:
// end of b2
MOV n(SP), R10 // R10 = v1 <-- useless
// start of b3
MOV m(SP), R10 // R10 = v2
This is precisely what happened in #58298.
This CL addresses this problem by dropping registers from startRegs if
they are never used in the basic block prior to getting dumped. This
allows the shuffle pass to avoid placing those useless values into the
register.
There is a significant limitation to this CL, which is that it only
impacts the immediate predecessors of an overwriting block. We can
discuss this by zooming out a bit on the previous graph:
b4 ───┐┌──── b5
││
││
▼▼
b1 ───┐┌──── b2
││
││
▼▼
b3
Here we have the same graph, except we can see the two predecessors of
b1.
Now suppose that rather than b1 assigning R10 = v1 as above, the
assignment is done in b4. b1 has startRegs R10 = v1, doesn't use the
value at all, and simply passes it through to endRegs R10 = v1.
Now the shuffle pass will require both b2 and b5 to add a move to
assigned R10 = v1, because that is specified in their successor
startRegs.
With this CL, b3 drops R10 = v1 from startRegs, but there is no
backwards propagation, so b1 still has R10 = v1 in startRegs, and b5
still needs to add a useless move.
Extending this CL with such propagation may significantly increase the
number of useless moves we can remove, though it will add complexity to
maintenance and could potentially impact build performance depending on
how efficiently we could implement the propagation (something I haven't
considered carefully).
As-is, this optimization does not impact much code. In bent .text size
geomean is -0.02%. In the container/heap test binary, 18 of ~2500
functions are impacted by this CL. Bent and sweet do not show a
noticeable performance impact one way or another, however #58298 does
show a case where this can have impact if the useless instructions end
up in the hot path of a tight loop.
For #58298.
Change-Id: I2fcef37c955159d068fa0725f995a1848add8a5f
Reviewed-on: https://go-review.googlesource.com/c/go/+/471158
Run-TryBot: Michael Pratt <mpratt@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
Reviewed-by: David Chase <drchase@google.com>
2023-02-21 13:20:49 -05:00
|
|
|
|
|
|
|
|
// allocReg may have dropped registers from startRegsMask that
|
|
|
|
|
// aren't actually needed in startRegs. Synchronize back to
|
|
|
|
|
// startRegs.
|
|
|
|
|
//
|
|
|
|
|
// This must be done before placing spills, which will look at
|
|
|
|
|
// startRegs to decide if a block is a valid block for a spill.
|
|
|
|
|
if c := countRegs(s.startRegsMask); c != len(s.startRegs[b.ID]) {
|
|
|
|
|
regs := make([]startReg, 0, c)
|
|
|
|
|
for _, sr := range s.startRegs[b.ID] {
|
2023-03-20 13:01:14 -07:00
|
|
|
if s.startRegsMask&(regMask(1)<<sr.r) == 0 {
|
cmd/compile/internal/ssa: drop overwritten regalloc basic block input requirements
For the following description, consider the following basic block graph:
b1 ───┐┌──── b2
││
││
▼▼
b3
For register allocator transitions between basic blocks, there are two
key passes (significant paraphrasing):
First, each basic block is visited in some predetermined visit order.
This is the core visitOrder range loop in regAllocState.regalloc. The
specific ordering heuristics aren't important here, except that the
order guarantees that when visiting a basic block at least one of its
predecessors has already been visited.
Upon visiting a basic block, that block sets its expected starting
register state (regAllocState.startRegs) based on the ending register
state (regAlloc.State.endRegs) of one of its predecessors. (How it
chooses which predecessor to use is not important here.)
From that starting state, registers are assigned for all values in the
block, ultimately resulting in some ending register state.
After all blocks have been visited, the shuffle pass
(regAllocState.shuffle) ensures that for each edge, endRegs of the
predecessor == startRegs of the successor. That is, it makes sure that
the startRegs assumptions actually hold true for each edge. It does this
by adding moves to the end of the predecessor block to place values in
the expected register for the successor block. These may be moves from
other registers, or from memory if the value is spilled.
Now on to the actual problem:
Assume that b1 places some value v1 into register R10, and thus ends
with endRegs containing R10 = v1.
When b3 is visited, it selects b1 as its model predecessor and sets
startRegs with R10 = v1.
b2 does not have v1 in R10, so later in the shuffle pass, we will add a
move of v1 into R10 to the end of b2 to ensure it is available for b3.
This is all perfectly fine and exactly how things should work.
Now suppose that b3 does not use v1. It does need to use some other
value v2, which is not currently in a register. When assigning v2 to a
register, it finds all registers are already in use and it needs to dump
a value. Ultimately, it decides to dump v1 from R10 and replace it with
v2.
This is fine, but it has downstream effects on shuffle in b2. b3's
startRegs still state that R10 = v1, so b2 will add a move to R10 even
though b3 will unconditionally overwrite it. i.e., the move at the end
of b2 is completely useless and can result in code like:
// end of b2
MOV n(SP), R10 // R10 = v1 <-- useless
// start of b3
MOV m(SP), R10 // R10 = v2
This is precisely what happened in #58298.
This CL addresses this problem by dropping registers from startRegs if
they are never used in the basic block prior to getting dumped. This
allows the shuffle pass to avoid placing those useless values into the
register.
There is a significant limitation to this CL, which is that it only
impacts the immediate predecessors of an overwriting block. We can
discuss this by zooming out a bit on the previous graph:
b4 ───┐┌──── b5
││
││
▼▼
b1 ───┐┌──── b2
││
││
▼▼
b3
Here we have the same graph, except we can see the two predecessors of
b1.
Now suppose that rather than b1 assigning R10 = v1 as above, the
assignment is done in b4. b1 has startRegs R10 = v1, doesn't use the
value at all, and simply passes it through to endRegs R10 = v1.
Now the shuffle pass will require both b2 and b5 to add a move to
assigned R10 = v1, because that is specified in their successor
startRegs.
With this CL, b3 drops R10 = v1 from startRegs, but there is no
backwards propagation, so b1 still has R10 = v1 in startRegs, and b5
still needs to add a useless move.
Extending this CL with such propagation may significantly increase the
number of useless moves we can remove, though it will add complexity to
maintenance and could potentially impact build performance depending on
how efficiently we could implement the propagation (something I haven't
considered carefully).
As-is, this optimization does not impact much code. In bent .text size
geomean is -0.02%. In the container/heap test binary, 18 of ~2500
functions are impacted by this CL. Bent and sweet do not show a
noticeable performance impact one way or another, however #58298 does
show a case where this can have impact if the useless instructions end
up in the hot path of a tight loop.
For #58298.
Change-Id: I2fcef37c955159d068fa0725f995a1848add8a5f
Reviewed-on: https://go-review.googlesource.com/c/go/+/471158
Run-TryBot: Michael Pratt <mpratt@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
Reviewed-by: David Chase <drchase@google.com>
2023-02-21 13:20:49 -05:00
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
regs = append(regs, sr)
|
|
|
|
|
}
|
|
|
|
|
s.startRegs[b.ID] = regs
|
|
|
|
|
}
|
2015-08-11 12:51:33 -07:00
|
|
|
}
|
|
|
|
|
|
2017-03-07 14:45:46 -05:00
|
|
|
// Decide where the spills we generated will go.
|
|
|
|
|
s.placeSpills()
|
2016-03-21 11:32:04 -04:00
|
|
|
|
2015-12-17 10:01:24 -08:00
|
|
|
// Anything that didn't get a register gets a stack location here.
|
|
|
|
|
// (StoreReg, stack-based phis, inputs, ...)
|
|
|
|
|
stacklive := stackalloc(s.f, s.spillLive)
|
|
|
|
|
|
|
|
|
|
// Fix up all merge edges.
|
|
|
|
|
s.shuffle(stacklive)
|
2016-03-21 11:32:04 -04:00
|
|
|
|
2016-09-29 15:08:37 -07:00
|
|
|
// Erase any copies we never used.
|
|
|
|
|
// Also, an unused copy might be the only use of another copy,
|
|
|
|
|
// so continue erasing until we reach a fixed point.
|
|
|
|
|
for {
|
|
|
|
|
progress := false
|
|
|
|
|
for c, used := range s.copies {
|
|
|
|
|
if !used && c.Uses == 0 {
|
|
|
|
|
if s.f.pass.debug > regDebug {
|
|
|
|
|
fmt.Printf("delete copied value %s\n", c.LongString())
|
|
|
|
|
}
|
2021-10-25 01:02:12 +07:00
|
|
|
c.resetArgs()
|
2016-09-29 15:08:37 -07:00
|
|
|
f.freeValue(c)
|
|
|
|
|
delete(s.copies, c)
|
|
|
|
|
progress = true
|
2016-09-23 09:15:51 -04:00
|
|
|
}
|
2016-09-29 15:08:37 -07:00
|
|
|
}
|
|
|
|
|
if !progress {
|
|
|
|
|
break
|
2016-09-23 09:15:51 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-06-30 16:20:10 -04:00
|
|
|
for _, b := range s.visitOrder {
|
2016-09-23 09:15:51 -04:00
|
|
|
i := 0
|
|
|
|
|
for _, v := range b.Values {
|
|
|
|
|
if v.Op == OpInvalid {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
b.Values[i] = v
|
|
|
|
|
i++
|
|
|
|
|
}
|
|
|
|
|
b.Values = b.Values[:i]
|
|
|
|
|
}
|
2017-03-07 14:45:46 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (s *regAllocState) placeSpills() {
|
2021-05-25 10:13:07 -04:00
|
|
|
mustBeFirst := func(op Op) bool {
|
|
|
|
|
return op.isLoweredGetClosurePtr() || op == OpPhi || op == OpArgIntReg || op == OpArgFloatReg
|
|
|
|
|
}
|
|
|
|
|
|
2017-03-07 14:45:46 -05:00
|
|
|
// Start maps block IDs to the list of spills
|
|
|
|
|
// that go at the start of the block (but after any phis).
|
|
|
|
|
start := map[ID][]*Value{}
|
|
|
|
|
// After maps value IDs to the list of spills
|
|
|
|
|
// that go immediately after that value ID.
|
|
|
|
|
after := map[ID][]*Value{}
|
|
|
|
|
|
|
|
|
|
for i := range s.values {
|
|
|
|
|
vi := s.values[i]
|
|
|
|
|
spill := vi.spill
|
|
|
|
|
if spill == nil {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
if spill.Block != nil {
|
|
|
|
|
// Some spills are already fully set up,
|
|
|
|
|
// like OpArgs and stack-based phis.
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
v := s.orig[i]
|
|
|
|
|
|
|
|
|
|
// Walk down the dominator tree looking for a good place to
|
|
|
|
|
// put the spill of v. At the start "best" is the best place
|
|
|
|
|
// we have found so far.
|
|
|
|
|
// TODO: find a way to make this O(1) without arbitrary cutoffs.
|
2021-02-04 16:42:35 -05:00
|
|
|
if v == nil {
|
|
|
|
|
panic(fmt.Errorf("nil v, s.orig[%d], vi = %v, spill = %s", i, vi, spill.LongString()))
|
|
|
|
|
}
|
2017-03-07 14:45:46 -05:00
|
|
|
best := v.Block
|
|
|
|
|
bestArg := v
|
|
|
|
|
var bestDepth int16
|
|
|
|
|
if l := s.loopnest.b2l[best.ID]; l != nil {
|
|
|
|
|
bestDepth = l.depth
|
|
|
|
|
}
|
|
|
|
|
b := best
|
|
|
|
|
const maxSpillSearch = 100
|
|
|
|
|
for i := 0; i < maxSpillSearch; i++ {
|
|
|
|
|
// Find the child of b in the dominator tree which
|
|
|
|
|
// dominates all restores.
|
|
|
|
|
p := b
|
|
|
|
|
b = nil
|
|
|
|
|
for c := s.sdom.Child(p); c != nil && i < maxSpillSearch; c, i = s.sdom.Sibling(c), i+1 {
|
|
|
|
|
if s.sdom[c.ID].entry <= vi.restoreMin && s.sdom[c.ID].exit >= vi.restoreMax {
|
|
|
|
|
// c also dominates all restores. Walk down into c.
|
|
|
|
|
b = c
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if b == nil {
|
|
|
|
|
// Ran out of blocks which dominate all restores.
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var depth int16
|
|
|
|
|
if l := s.loopnest.b2l[b.ID]; l != nil {
|
|
|
|
|
depth = l.depth
|
|
|
|
|
}
|
|
|
|
|
if depth > bestDepth {
|
|
|
|
|
// Don't push the spill into a deeper loop.
|
|
|
|
|
continue
|
|
|
|
|
}
|
2016-03-21 11:32:04 -04:00
|
|
|
|
2017-03-07 14:45:46 -05:00
|
|
|
// If v is in a register at the start of b, we can
|
|
|
|
|
// place the spill here (after the phis).
|
|
|
|
|
if len(b.Preds) == 1 {
|
|
|
|
|
for _, e := range s.endRegs[b.Preds[0].b.ID] {
|
|
|
|
|
if e.v == v {
|
|
|
|
|
// Found a better spot for the spill.
|
|
|
|
|
best = b
|
|
|
|
|
bestArg = e.c
|
|
|
|
|
bestDepth = depth
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
for _, e := range s.startRegs[b.ID] {
|
|
|
|
|
if e.v == v {
|
|
|
|
|
// Found a better spot for the spill.
|
|
|
|
|
best = b
|
|
|
|
|
bestArg = e.c
|
|
|
|
|
bestDepth = depth
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Put the spill in the best block we found.
|
|
|
|
|
spill.Block = best
|
|
|
|
|
spill.AddArg(bestArg)
|
2021-05-25 10:13:07 -04:00
|
|
|
if best == v.Block && !mustBeFirst(v.Op) {
|
2017-03-07 14:45:46 -05:00
|
|
|
// Place immediately after v.
|
|
|
|
|
after[v.ID] = append(after[v.ID], spill)
|
|
|
|
|
} else {
|
|
|
|
|
// Place at the start of best block.
|
|
|
|
|
start[best.ID] = append(start[best.ID], spill)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Insert spill instructions into the block schedules.
|
|
|
|
|
var oldSched []*Value
|
2017-06-30 16:20:10 -04:00
|
|
|
for _, b := range s.visitOrder {
|
2021-05-25 10:13:07 -04:00
|
|
|
nfirst := 0
|
2017-03-07 14:45:46 -05:00
|
|
|
for _, v := range b.Values {
|
2021-05-25 10:13:07 -04:00
|
|
|
if !mustBeFirst(v.Op) {
|
2017-03-07 14:45:46 -05:00
|
|
|
break
|
|
|
|
|
}
|
2021-05-25 10:13:07 -04:00
|
|
|
nfirst++
|
2017-03-07 14:45:46 -05:00
|
|
|
}
|
2021-05-25 10:13:07 -04:00
|
|
|
oldSched = append(oldSched[:0], b.Values[nfirst:]...)
|
|
|
|
|
b.Values = b.Values[:nfirst]
|
2017-09-28 20:17:59 +01:00
|
|
|
b.Values = append(b.Values, start[b.ID]...)
|
2017-03-07 14:45:46 -05:00
|
|
|
for _, v := range oldSched {
|
|
|
|
|
b.Values = append(b.Values, v)
|
2017-09-28 20:17:59 +01:00
|
|
|
b.Values = append(b.Values, after[v.ID]...)
|
2017-03-07 14:45:46 -05:00
|
|
|
}
|
|
|
|
|
}
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// shuffle fixes up all the merge edges (those going into blocks of indegree > 1).
|
|
|
|
|
func (s *regAllocState) shuffle(stacklive [][]ID) {
|
|
|
|
|
var e edgeState
|
|
|
|
|
e.s = s
|
|
|
|
|
e.cache = map[ID][]*Value{}
|
|
|
|
|
e.contents = map[Location]contentRecord{}
|
2016-03-10 17:52:57 -06:00
|
|
|
if s.f.pass.debug > regDebug {
|
2015-12-17 10:01:24 -08:00
|
|
|
fmt.Printf("shuffle %s\n", s.f.Name)
|
|
|
|
|
fmt.Println(s.f.String())
|
|
|
|
|
}
|
|
|
|
|
|
2017-06-30 16:20:10 -04:00
|
|
|
for _, b := range s.visitOrder {
|
2015-12-17 10:01:24 -08:00
|
|
|
if len(b.Preds) <= 1 {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
e.b = b
|
2016-04-28 16:52:47 -07:00
|
|
|
for i, edge := range b.Preds {
|
|
|
|
|
p := edge.b
|
2015-12-17 10:01:24 -08:00
|
|
|
e.p = p
|
|
|
|
|
e.setup(i, s.endRegs[p.ID], s.startRegs[b.ID], stacklive[p.ID])
|
|
|
|
|
e.process()
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-04-14 15:46:26 +01:00
|
|
|
|
|
|
|
|
if s.f.pass.debug > regDebug {
|
|
|
|
|
fmt.Printf("post shuffle %s\n", s.f.Name)
|
|
|
|
|
fmt.Println(s.f.String())
|
|
|
|
|
}
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type edgeState struct {
|
|
|
|
|
s *regAllocState
|
|
|
|
|
p, b *Block // edge goes from p->b.
|
|
|
|
|
|
|
|
|
|
// for each pre-regalloc value, a list of equivalent cached values
|
2016-03-03 09:53:03 -08:00
|
|
|
cache map[ID][]*Value
|
|
|
|
|
cachedVals []ID // (superset of) keys of the above map, for deterministic iteration
|
2015-12-17 10:01:24 -08:00
|
|
|
|
|
|
|
|
// map from location to the value it contains
|
|
|
|
|
contents map[Location]contentRecord
|
|
|
|
|
|
|
|
|
|
// desired destination locations
|
|
|
|
|
destinations []dstRecord
|
|
|
|
|
extra []dstRecord
|
|
|
|
|
|
2018-03-16 07:15:59 -07:00
|
|
|
usedRegs regMask // registers currently holding something
|
|
|
|
|
uniqueRegs regMask // registers holding the only copy of a value
|
|
|
|
|
finalRegs regMask // registers holding final target
|
|
|
|
|
rematerializeableRegs regMask // registers that hold rematerializeable values
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type contentRecord struct {
|
2016-12-15 17:17:01 -08:00
|
|
|
vid ID // pre-regalloc value
|
|
|
|
|
c *Value // cached value
|
|
|
|
|
final bool // this is a satisfied destination
|
|
|
|
|
pos src.XPos // source position of use of the value
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type dstRecord struct {
|
|
|
|
|
loc Location // register or stack slot
|
|
|
|
|
vid ID // pre-regalloc value it should contain
|
|
|
|
|
splice **Value // place to store reference to the generating instruction
|
2016-12-15 17:17:01 -08:00
|
|
|
pos src.XPos // source position of use of this location
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// setup initializes the edge state for shuffling.
|
|
|
|
|
func (e *edgeState) setup(idx int, srcReg []endReg, dstReg []startReg, stacklive []ID) {
|
2016-03-10 17:52:57 -06:00
|
|
|
if e.s.f.pass.debug > regDebug {
|
2015-12-17 10:01:24 -08:00
|
|
|
fmt.Printf("edge %s->%s\n", e.p, e.b)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Clear state.
|
2024-09-03 13:53:37 -07:00
|
|
|
clear(e.cache)
|
2016-03-03 09:53:03 -08:00
|
|
|
e.cachedVals = e.cachedVals[:0]
|
2024-09-03 13:53:37 -07:00
|
|
|
clear(e.contents)
|
2016-01-04 13:34:54 -08:00
|
|
|
e.usedRegs = 0
|
|
|
|
|
e.uniqueRegs = 0
|
|
|
|
|
e.finalRegs = 0
|
2018-03-16 07:15:59 -07:00
|
|
|
e.rematerializeableRegs = 0
|
2015-12-17 10:01:24 -08:00
|
|
|
|
|
|
|
|
// Live registers can be sources.
|
|
|
|
|
for _, x := range srcReg {
|
2016-12-15 17:17:01 -08:00
|
|
|
e.set(&e.s.registers[x.r], x.v.ID, x.c, false, src.NoXPos) // don't care the position of the source
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
|
|
|
|
// So can all of the spill locations.
|
|
|
|
|
for _, spillID := range stacklive {
|
|
|
|
|
v := e.s.orig[spillID]
|
|
|
|
|
spill := e.s.values[v.ID].spill
|
2019-11-01 14:04:08 -07:00
|
|
|
if !e.s.sdom.IsAncestorEq(spill.Block, e.p) {
|
2017-03-07 14:45:46 -05:00
|
|
|
// Spills were placed that only dominate the uses found
|
|
|
|
|
// during the first regalloc pass. The edge fixup code
|
|
|
|
|
// can't use a spill location if the spill doesn't dominate
|
|
|
|
|
// the edge.
|
|
|
|
|
// We are guaranteed that if the spill doesn't dominate this edge,
|
|
|
|
|
// then the value is available in a register (because we called
|
|
|
|
|
// makeSpill for every value not in a register at the start
|
|
|
|
|
// of an edge).
|
|
|
|
|
continue
|
|
|
|
|
}
|
2016-12-15 17:17:01 -08:00
|
|
|
e.set(e.s.f.getHome(spillID), v.ID, spill, false, src.NoXPos) // don't care the position of the source
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Figure out all the destinations we need.
|
|
|
|
|
dsts := e.destinations[:0]
|
|
|
|
|
for _, x := range dstReg {
|
2017-03-07 14:45:46 -05:00
|
|
|
dsts = append(dsts, dstRecord{&e.s.registers[x.r], x.v.ID, nil, x.pos})
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
|
|
|
|
// Phis need their args to end up in a specific location.
|
|
|
|
|
for _, v := range e.b.Values {
|
|
|
|
|
if v.Op != OpPhi {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
loc := e.s.f.getHome(v.ID)
|
|
|
|
|
if loc == nil {
|
|
|
|
|
continue
|
|
|
|
|
}
|
2016-12-07 18:14:35 -08:00
|
|
|
dsts = append(dsts, dstRecord{loc, v.Args[idx].ID, &v.Args[idx], v.Pos})
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
|
|
|
|
e.destinations = dsts
|
|
|
|
|
|
2016-03-10 17:52:57 -06:00
|
|
|
if e.s.f.pass.debug > regDebug {
|
2016-03-03 09:53:03 -08:00
|
|
|
for _, vid := range e.cachedVals {
|
|
|
|
|
a := e.cache[vid]
|
2015-12-17 10:01:24 -08:00
|
|
|
for _, c := range a {
|
2017-08-17 12:23:34 -07:00
|
|
|
fmt.Printf("src %s: v%d cache=%s\n", e.s.f.getHome(c.ID), vid, c)
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
for _, d := range e.destinations {
|
2017-08-17 12:23:34 -07:00
|
|
|
fmt.Printf("dst %s: v%d\n", d.loc, d.vid)
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// process generates code to move all the values to the right destination locations.
|
|
|
|
|
func (e *edgeState) process() {
|
|
|
|
|
dsts := e.destinations
|
|
|
|
|
|
|
|
|
|
// Process the destinations until they are all satisfied.
|
|
|
|
|
for len(dsts) > 0 {
|
|
|
|
|
i := 0
|
|
|
|
|
for _, d := range dsts {
|
2016-12-08 13:49:51 -08:00
|
|
|
if !e.processDest(d.loc, d.vid, d.splice, d.pos) {
|
2015-12-17 10:01:24 -08:00
|
|
|
// Failed - save for next iteration.
|
|
|
|
|
dsts[i] = d
|
|
|
|
|
i++
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if i < len(dsts) {
|
2016-03-01 23:21:55 +00:00
|
|
|
// Made some progress. Go around again.
|
2015-12-17 10:01:24 -08:00
|
|
|
dsts = dsts[:i]
|
|
|
|
|
|
|
|
|
|
// Append any extras destinations we generated.
|
|
|
|
|
dsts = append(dsts, e.extra...)
|
|
|
|
|
e.extra = e.extra[:0]
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-01 23:21:55 +00:00
|
|
|
// We made no progress. That means that any
|
2015-12-17 10:01:24 -08:00
|
|
|
// remaining unsatisfied moves are in simple cycles.
|
|
|
|
|
// For example, A -> B -> C -> D -> A.
|
|
|
|
|
// A ----> B
|
|
|
|
|
// ^ |
|
|
|
|
|
// | |
|
|
|
|
|
// | v
|
|
|
|
|
// D <---- C
|
|
|
|
|
|
|
|
|
|
// To break the cycle, we pick an unused register, say R,
|
|
|
|
|
// and put a copy of B there.
|
|
|
|
|
// A ----> B
|
|
|
|
|
// ^ |
|
|
|
|
|
// | |
|
|
|
|
|
// | v
|
|
|
|
|
// D <---- C <---- R=copyofB
|
|
|
|
|
// When we resume the outer loop, the A->B move can now proceed,
|
|
|
|
|
// and eventually the whole cycle completes.
|
|
|
|
|
|
2016-03-01 23:21:55 +00:00
|
|
|
// Copy any cycle location to a temp register. This duplicates
|
2015-12-17 10:01:24 -08:00
|
|
|
// one of the cycle entries, allowing the just duplicated value
|
|
|
|
|
// to be overwritten and the cycle to proceed.
|
2016-07-20 10:09:40 -04:00
|
|
|
d := dsts[0]
|
|
|
|
|
loc := d.loc
|
2015-12-17 10:01:24 -08:00
|
|
|
vid := e.contents[loc].vid
|
|
|
|
|
c := e.contents[loc].c
|
|
|
|
|
r := e.findRegFor(c.Type)
|
2016-03-10 17:52:57 -06:00
|
|
|
if e.s.f.pass.debug > regDebug {
|
2017-08-17 12:23:34 -07:00
|
|
|
fmt.Printf("breaking cycle with v%d in %s:%s\n", vid, loc, c)
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
[dev.debug] cmd/compile: better DWARF with optimizations on
Debuggers use DWARF information to find local variables on the
stack and in registers. Prior to this CL, the DWARF information for
functions claimed that all variables were on the stack at all times.
That's incorrect when optimizations are enabled, and results in
debuggers showing data that is out of date or complete gibberish.
After this CL, the compiler is capable of representing variable
locations more accurately, and attempts to do so. Due to limitations of
the SSA backend, it's not possible to be completely correct.
There are a number of problems in the current design. One of the easier
to understand is that variable names currently must be attached to an
SSA value, but not all assignments in the source code actually result
in machine code. For example:
type myint int
var a int
b := myint(int)
and
b := (*uint64)(unsafe.Pointer(a))
don't generate machine code because the underlying representation is the
same, so the correct value of b will not be set when the user would
expect.
Generating the more precise debug information is behind a flag,
dwarflocationlists. Because of the issues described above, setting the
flag may not make the debugging experience much better, and may actually
make it worse in cases where the variable actually is on the stack and
the more complicated analysis doesn't realize it.
A number of changes are included:
- Add a new pseudo-instruction, RegKill, which indicates that the value
in the register has been clobbered.
- Adjust regalloc to emit RegKills in the right places. Significantly,
this means that phis are mixed with StoreReg and RegKills after
regalloc.
- Track variable decomposition in ssa.LocalSlots.
- After the SSA backend is done, analyze the result and build location
lists for each LocalSlot.
- After assembly is done, update the location lists with the assembled
PC offsets, recompose variables, and build DWARF location lists. Emit the
list as a new linker symbol, one per function.
- In the linker, aggregate the location lists into a .debug_loc section.
TODO:
- currently disabled for non-X86/AMD64 because there are no data tables.
go build -toolexec 'toolstash -cmp' -a std succeeds.
With -dwarflocationlists false:
before: f02812195637909ff675782c0b46836a8ff01976
after: 06f61e8112a42ac34fb80e0c818b3cdb84a5e7ec
benchstat -geomean /tmp/220352263 /tmp/621364410
completed 15 of 15, estimated time remaining 0s (eta 3:52PM)
name old time/op new time/op delta
Template 199ms ± 3% 198ms ± 2% ~ (p=0.400 n=15+14)
Unicode 96.6ms ± 5% 96.4ms ± 5% ~ (p=0.838 n=15+15)
GoTypes 653ms ± 2% 647ms ± 2% ~ (p=0.102 n=15+14)
Flate 133ms ± 6% 129ms ± 3% -2.62% (p=0.041 n=15+15)
GoParser 164ms ± 5% 159ms ± 3% -3.05% (p=0.000 n=15+15)
Reflect 428ms ± 4% 422ms ± 3% ~ (p=0.156 n=15+13)
Tar 123ms ±10% 124ms ± 8% ~ (p=0.461 n=15+15)
XML 228ms ± 3% 224ms ± 3% -1.57% (p=0.045 n=15+15)
[Geo mean] 206ms 377ms +82.86%
name old user-time/op new user-time/op delta
Template 292ms ±10% 301ms ±12% ~ (p=0.189 n=15+15)
Unicode 166ms ±37% 158ms ±14% ~ (p=0.418 n=15+14)
GoTypes 962ms ± 6% 963ms ± 7% ~ (p=0.976 n=15+15)
Flate 207ms ±19% 200ms ±14% ~ (p=0.345 n=14+15)
GoParser 246ms ±22% 240ms ±15% ~ (p=0.587 n=15+15)
Reflect 611ms ±13% 587ms ±14% ~ (p=0.085 n=15+13)
Tar 211ms ±12% 217ms ±14% ~ (p=0.355 n=14+15)
XML 335ms ±15% 320ms ±18% ~ (p=0.169 n=15+15)
[Geo mean] 317ms 583ms +83.72%
name old alloc/op new alloc/op delta
Template 40.2MB ± 0% 40.2MB ± 0% -0.15% (p=0.000 n=14+15)
Unicode 29.2MB ± 0% 29.3MB ± 0% ~ (p=0.624 n=15+15)
GoTypes 114MB ± 0% 114MB ± 0% -0.15% (p=0.000 n=15+14)
Flate 25.7MB ± 0% 25.6MB ± 0% -0.18% (p=0.000 n=13+15)
GoParser 32.2MB ± 0% 32.2MB ± 0% -0.14% (p=0.003 n=15+15)
Reflect 77.8MB ± 0% 77.9MB ± 0% ~ (p=0.061 n=15+15)
Tar 27.1MB ± 0% 27.0MB ± 0% -0.11% (p=0.029 n=15+15)
XML 42.7MB ± 0% 42.5MB ± 0% -0.29% (p=0.000 n=15+15)
[Geo mean] 42.1MB 75.0MB +78.05%
name old allocs/op new allocs/op delta
Template 402k ± 1% 398k ± 0% -0.91% (p=0.000 n=15+15)
Unicode 344k ± 1% 344k ± 0% ~ (p=0.715 n=15+14)
GoTypes 1.18M ± 0% 1.17M ± 0% -0.91% (p=0.000 n=15+14)
Flate 243k ± 0% 240k ± 1% -1.05% (p=0.000 n=13+15)
GoParser 327k ± 1% 324k ± 1% -0.96% (p=0.000 n=15+15)
Reflect 984k ± 1% 982k ± 0% ~ (p=0.050 n=15+15)
Tar 261k ± 1% 259k ± 1% -0.77% (p=0.000 n=15+15)
XML 411k ± 0% 404k ± 1% -1.55% (p=0.000 n=15+15)
[Geo mean] 439k 755k +72.01%
name old text-bytes new text-bytes delta
HelloSize 694kB ± 0% 694kB ± 0% -0.00% (p=0.000 n=15+15)
name old data-bytes new data-bytes delta
HelloSize 5.55kB ± 0% 5.55kB ± 0% ~ (all equal)
name old bss-bytes new bss-bytes delta
HelloSize 133kB ± 0% 133kB ± 0% ~ (all equal)
name old exe-bytes new exe-bytes delta
HelloSize 1.04MB ± 0% 1.04MB ± 0% ~ (all equal)
Change-Id: I991fc553ef175db46bb23b2128317bbd48de70d8
Reviewed-on: https://go-review.googlesource.com/41770
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2017-07-21 18:30:19 -04:00
|
|
|
e.erase(r)
|
2018-03-02 20:33:15 -05:00
|
|
|
pos := d.pos.WithNotStmt()
|
2015-12-17 10:01:24 -08:00
|
|
|
if _, isReg := loc.(*Register); isReg {
|
2018-03-02 20:33:15 -05:00
|
|
|
c = e.p.NewValue1(pos, OpCopy, c.Type, c)
|
2015-12-17 10:01:24 -08:00
|
|
|
} else {
|
2018-03-02 20:33:15 -05:00
|
|
|
c = e.p.NewValue1(pos, OpLoadReg, c.Type, c)
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
2018-03-02 20:33:15 -05:00
|
|
|
e.set(r, vid, c, false, pos)
|
2018-05-25 16:08:13 -04:00
|
|
|
if c.Op == OpLoadReg && e.s.isGReg(register(r.(*Register).num)) {
|
|
|
|
|
e.s.f.Fatalf("process.OpLoadReg targeting g: " + c.LongString())
|
|
|
|
|
}
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-01 23:21:55 +00:00
|
|
|
// processDest generates code to put value vid into location loc. Returns true
|
2015-12-17 10:01:24 -08:00
|
|
|
// if progress was made.
|
2016-12-15 17:17:01 -08:00
|
|
|
func (e *edgeState) processDest(loc Location, vid ID, splice **Value, pos src.XPos) bool {
|
2018-03-02 20:33:15 -05:00
|
|
|
pos = pos.WithNotStmt()
|
2015-12-17 10:01:24 -08:00
|
|
|
occupant := e.contents[loc]
|
|
|
|
|
if occupant.vid == vid {
|
|
|
|
|
// Value is already in the correct place.
|
2016-12-08 13:49:51 -08:00
|
|
|
e.contents[loc] = contentRecord{vid, occupant.c, true, pos}
|
2015-12-17 10:01:24 -08:00
|
|
|
if splice != nil {
|
2016-03-15 20:45:50 -07:00
|
|
|
(*splice).Uses--
|
2015-12-17 10:01:24 -08:00
|
|
|
*splice = occupant.c
|
2016-03-15 20:45:50 -07:00
|
|
|
occupant.c.Uses++
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
2016-03-01 23:21:55 +00:00
|
|
|
// Note: if splice==nil then c will appear dead. This is
|
2015-12-17 10:01:24 -08:00
|
|
|
// non-SSA formed code, so be careful after this pass not to run
|
|
|
|
|
// deadcode elimination.
|
2016-09-23 09:15:51 -04:00
|
|
|
if _, ok := e.s.copies[occupant.c]; ok {
|
|
|
|
|
// The copy at occupant.c was used to avoid spill.
|
|
|
|
|
e.s.copies[occupant.c] = true
|
|
|
|
|
}
|
2015-12-17 10:01:24 -08:00
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Check if we're allowed to clobber the destination location.
|
2025-08-13 14:01:30 -07:00
|
|
|
if len(e.cache[occupant.vid]) == 1 && !e.s.values[occupant.vid].rematerializeable && !opcodeTable[e.s.orig[occupant.vid].Op].fixedReg {
|
2015-12-17 10:01:24 -08:00
|
|
|
// We can't overwrite the last copy
|
|
|
|
|
// of a value that needs to survive.
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Copy from a source of v, register preferred.
|
|
|
|
|
v := e.s.orig[vid]
|
|
|
|
|
var c *Value
|
|
|
|
|
var src Location
|
2016-03-10 17:52:57 -06:00
|
|
|
if e.s.f.pass.debug > regDebug {
|
2017-08-17 12:23:34 -07:00
|
|
|
fmt.Printf("moving v%d to %s\n", vid, loc)
|
2015-12-17 10:01:24 -08:00
|
|
|
fmt.Printf("sources of v%d:", vid)
|
|
|
|
|
}
|
2024-11-24 15:29:56 -08:00
|
|
|
if opcodeTable[v.Op].fixedReg {
|
|
|
|
|
c = v
|
|
|
|
|
src = e.s.f.getHome(v.ID)
|
|
|
|
|
} else {
|
|
|
|
|
for _, w := range e.cache[vid] {
|
|
|
|
|
h := e.s.f.getHome(w.ID)
|
|
|
|
|
if e.s.f.pass.debug > regDebug {
|
|
|
|
|
fmt.Printf(" %s:%s", h, w)
|
|
|
|
|
}
|
|
|
|
|
_, isreg := h.(*Register)
|
|
|
|
|
if src == nil || isreg {
|
|
|
|
|
c = w
|
|
|
|
|
src = h
|
|
|
|
|
}
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
|
|
|
|
}
|
2016-03-10 17:52:57 -06:00
|
|
|
if e.s.f.pass.debug > regDebug {
|
2015-12-17 10:01:24 -08:00
|
|
|
if src != nil {
|
2017-08-17 12:23:34 -07:00
|
|
|
fmt.Printf(" [use %s]\n", src)
|
2015-12-17 10:01:24 -08:00
|
|
|
} else {
|
|
|
|
|
fmt.Printf(" [no source]\n")
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
_, dstReg := loc.(*Register)
|
[dev.debug] cmd/compile: better DWARF with optimizations on
Debuggers use DWARF information to find local variables on the
stack and in registers. Prior to this CL, the DWARF information for
functions claimed that all variables were on the stack at all times.
That's incorrect when optimizations are enabled, and results in
debuggers showing data that is out of date or complete gibberish.
After this CL, the compiler is capable of representing variable
locations more accurately, and attempts to do so. Due to limitations of
the SSA backend, it's not possible to be completely correct.
There are a number of problems in the current design. One of the easier
to understand is that variable names currently must be attached to an
SSA value, but not all assignments in the source code actually result
in machine code. For example:
type myint int
var a int
b := myint(int)
and
b := (*uint64)(unsafe.Pointer(a))
don't generate machine code because the underlying representation is the
same, so the correct value of b will not be set when the user would
expect.
Generating the more precise debug information is behind a flag,
dwarflocationlists. Because of the issues described above, setting the
flag may not make the debugging experience much better, and may actually
make it worse in cases where the variable actually is on the stack and
the more complicated analysis doesn't realize it.
A number of changes are included:
- Add a new pseudo-instruction, RegKill, which indicates that the value
in the register has been clobbered.
- Adjust regalloc to emit RegKills in the right places. Significantly,
this means that phis are mixed with StoreReg and RegKills after
regalloc.
- Track variable decomposition in ssa.LocalSlots.
- After the SSA backend is done, analyze the result and build location
lists for each LocalSlot.
- After assembly is done, update the location lists with the assembled
PC offsets, recompose variables, and build DWARF location lists. Emit the
list as a new linker symbol, one per function.
- In the linker, aggregate the location lists into a .debug_loc section.
TODO:
- currently disabled for non-X86/AMD64 because there are no data tables.
go build -toolexec 'toolstash -cmp' -a std succeeds.
With -dwarflocationlists false:
before: f02812195637909ff675782c0b46836a8ff01976
after: 06f61e8112a42ac34fb80e0c818b3cdb84a5e7ec
benchstat -geomean /tmp/220352263 /tmp/621364410
completed 15 of 15, estimated time remaining 0s (eta 3:52PM)
name old time/op new time/op delta
Template 199ms ± 3% 198ms ± 2% ~ (p=0.400 n=15+14)
Unicode 96.6ms ± 5% 96.4ms ± 5% ~ (p=0.838 n=15+15)
GoTypes 653ms ± 2% 647ms ± 2% ~ (p=0.102 n=15+14)
Flate 133ms ± 6% 129ms ± 3% -2.62% (p=0.041 n=15+15)
GoParser 164ms ± 5% 159ms ± 3% -3.05% (p=0.000 n=15+15)
Reflect 428ms ± 4% 422ms ± 3% ~ (p=0.156 n=15+13)
Tar 123ms ±10% 124ms ± 8% ~ (p=0.461 n=15+15)
XML 228ms ± 3% 224ms ± 3% -1.57% (p=0.045 n=15+15)
[Geo mean] 206ms 377ms +82.86%
name old user-time/op new user-time/op delta
Template 292ms ±10% 301ms ±12% ~ (p=0.189 n=15+15)
Unicode 166ms ±37% 158ms ±14% ~ (p=0.418 n=15+14)
GoTypes 962ms ± 6% 963ms ± 7% ~ (p=0.976 n=15+15)
Flate 207ms ±19% 200ms ±14% ~ (p=0.345 n=14+15)
GoParser 246ms ±22% 240ms ±15% ~ (p=0.587 n=15+15)
Reflect 611ms ±13% 587ms ±14% ~ (p=0.085 n=15+13)
Tar 211ms ±12% 217ms ±14% ~ (p=0.355 n=14+15)
XML 335ms ±15% 320ms ±18% ~ (p=0.169 n=15+15)
[Geo mean] 317ms 583ms +83.72%
name old alloc/op new alloc/op delta
Template 40.2MB ± 0% 40.2MB ± 0% -0.15% (p=0.000 n=14+15)
Unicode 29.2MB ± 0% 29.3MB ± 0% ~ (p=0.624 n=15+15)
GoTypes 114MB ± 0% 114MB ± 0% -0.15% (p=0.000 n=15+14)
Flate 25.7MB ± 0% 25.6MB ± 0% -0.18% (p=0.000 n=13+15)
GoParser 32.2MB ± 0% 32.2MB ± 0% -0.14% (p=0.003 n=15+15)
Reflect 77.8MB ± 0% 77.9MB ± 0% ~ (p=0.061 n=15+15)
Tar 27.1MB ± 0% 27.0MB ± 0% -0.11% (p=0.029 n=15+15)
XML 42.7MB ± 0% 42.5MB ± 0% -0.29% (p=0.000 n=15+15)
[Geo mean] 42.1MB 75.0MB +78.05%
name old allocs/op new allocs/op delta
Template 402k ± 1% 398k ± 0% -0.91% (p=0.000 n=15+15)
Unicode 344k ± 1% 344k ± 0% ~ (p=0.715 n=15+14)
GoTypes 1.18M ± 0% 1.17M ± 0% -0.91% (p=0.000 n=15+14)
Flate 243k ± 0% 240k ± 1% -1.05% (p=0.000 n=13+15)
GoParser 327k ± 1% 324k ± 1% -0.96% (p=0.000 n=15+15)
Reflect 984k ± 1% 982k ± 0% ~ (p=0.050 n=15+15)
Tar 261k ± 1% 259k ± 1% -0.77% (p=0.000 n=15+15)
XML 411k ± 0% 404k ± 1% -1.55% (p=0.000 n=15+15)
[Geo mean] 439k 755k +72.01%
name old text-bytes new text-bytes delta
HelloSize 694kB ± 0% 694kB ± 0% -0.00% (p=0.000 n=15+15)
name old data-bytes new data-bytes delta
HelloSize 5.55kB ± 0% 5.55kB ± 0% ~ (all equal)
name old bss-bytes new bss-bytes delta
HelloSize 133kB ± 0% 133kB ± 0% ~ (all equal)
name old exe-bytes new exe-bytes delta
HelloSize 1.04MB ± 0% 1.04MB ± 0% ~ (all equal)
Change-Id: I991fc553ef175db46bb23b2128317bbd48de70d8
Reviewed-on: https://go-review.googlesource.com/41770
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2017-07-21 18:30:19 -04:00
|
|
|
|
|
|
|
|
// Pre-clobber destination. This avoids the
|
|
|
|
|
// following situation:
|
|
|
|
|
// - v is currently held in R0 and stacktmp0.
|
|
|
|
|
// - We want to copy stacktmp1 to stacktmp0.
|
|
|
|
|
// - We choose R0 as the temporary register.
|
|
|
|
|
// During the copy, both R0 and stacktmp0 are
|
|
|
|
|
// clobbered, losing both copies of v. Oops!
|
|
|
|
|
// Erasing the destination early means R0 will not
|
|
|
|
|
// be chosen as the temp register, as it will then
|
|
|
|
|
// be the last copy of v.
|
|
|
|
|
e.erase(loc)
|
2015-12-17 10:01:24 -08:00
|
|
|
var x *Value
|
2018-03-15 22:40:23 -07:00
|
|
|
if c == nil || e.s.values[vid].rematerializeable {
|
2015-12-17 10:01:24 -08:00
|
|
|
if !e.s.values[vid].rematerializeable {
|
2016-09-16 15:02:47 -07:00
|
|
|
e.s.f.Fatalf("can't find source for %s->%s: %s\n", e.p, e.b, v.LongString())
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
|
|
|
|
if dstReg {
|
cmd/compile: assign and preserve statement boundaries.
A new pass run after ssa building (before any other
optimization) identifies the "first" ssa node for each
statement. Other "noise" nodes are tagged as being never
appropriate for a statement boundary (e.g., VarKill, VarDef,
Phi).
Rewrite, deadcode, cse, and nilcheck are modified to move
the statement boundaries forward whenever possible if a
boundary-tagged ssa value is removed; never-boundary nodes
are ignored in this search (some operations involving
constants are also tagged as never-boundary and also ignored
because they are likely to be moved or removed during
optimization).
Code generation treats all nodes except those explicitly
marked as statement boundaries as "not statement" nodes,
and floats statement boundaries to the beginning of each
same-line run of instructions found within a basic block.
Line number html conversion was modified to make statement
boundary nodes a bit more obvious by prepending a "+".
The code in fuse.go that glued together the value slices
of two blocks produced a result that depended on the
former capacities (not lengths) of the two slices. This
causes differences in the 386 bootstrap, and also can
sometimes put values into an order that does a worse job
of preserving statement boundaries when values are removed.
Portions of two delve tests that had caught problems were
incorporated into ssa/debug_test.go. There are some
opportunities to do better with optimized code, but the
next-ing is not lying or overly jumpy.
Over 4 CLs, compilebench geomean measured binary size
increase of 3.5% and compile user time increase of 3.8%
(this is after optimization to reuse a sparse map instead
of creating multiple maps.)
This CL worsens the optimized-debugging experience with
Delve; we need to work with the delve team so that
they can use the is_stmt marks that we're emitting now.
The reference output changes from time to time depending
on other changes in the compiler, sometimes better,
sometimes worse.
This CL now includes a test ensuring that 99+% of the lines
in the Go command itself (a handy optimized binary) include
is_stmt markers.
Change-Id: I359c94e06843f1eb41f9da437bd614885aa9644a
Reviewed-on: https://go-review.googlesource.com/102435
Run-TryBot: David Chase <drchase@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
2018-03-23 22:46:06 -04:00
|
|
|
x = v.copyInto(e.p)
|
2015-12-17 10:01:24 -08:00
|
|
|
} else {
|
2016-03-01 23:21:55 +00:00
|
|
|
// Rematerialize into stack slot. Need a free
|
2015-12-17 10:01:24 -08:00
|
|
|
// register to accomplish this.
|
|
|
|
|
r := e.findRegFor(v.Type)
|
[dev.debug] cmd/compile: better DWARF with optimizations on
Debuggers use DWARF information to find local variables on the
stack and in registers. Prior to this CL, the DWARF information for
functions claimed that all variables were on the stack at all times.
That's incorrect when optimizations are enabled, and results in
debuggers showing data that is out of date or complete gibberish.
After this CL, the compiler is capable of representing variable
locations more accurately, and attempts to do so. Due to limitations of
the SSA backend, it's not possible to be completely correct.
There are a number of problems in the current design. One of the easier
to understand is that variable names currently must be attached to an
SSA value, but not all assignments in the source code actually result
in machine code. For example:
type myint int
var a int
b := myint(int)
and
b := (*uint64)(unsafe.Pointer(a))
don't generate machine code because the underlying representation is the
same, so the correct value of b will not be set when the user would
expect.
Generating the more precise debug information is behind a flag,
dwarflocationlists. Because of the issues described above, setting the
flag may not make the debugging experience much better, and may actually
make it worse in cases where the variable actually is on the stack and
the more complicated analysis doesn't realize it.
A number of changes are included:
- Add a new pseudo-instruction, RegKill, which indicates that the value
in the register has been clobbered.
- Adjust regalloc to emit RegKills in the right places. Significantly,
this means that phis are mixed with StoreReg and RegKills after
regalloc.
- Track variable decomposition in ssa.LocalSlots.
- After the SSA backend is done, analyze the result and build location
lists for each LocalSlot.
- After assembly is done, update the location lists with the assembled
PC offsets, recompose variables, and build DWARF location lists. Emit the
list as a new linker symbol, one per function.
- In the linker, aggregate the location lists into a .debug_loc section.
TODO:
- currently disabled for non-X86/AMD64 because there are no data tables.
go build -toolexec 'toolstash -cmp' -a std succeeds.
With -dwarflocationlists false:
before: f02812195637909ff675782c0b46836a8ff01976
after: 06f61e8112a42ac34fb80e0c818b3cdb84a5e7ec
benchstat -geomean /tmp/220352263 /tmp/621364410
completed 15 of 15, estimated time remaining 0s (eta 3:52PM)
name old time/op new time/op delta
Template 199ms ± 3% 198ms ± 2% ~ (p=0.400 n=15+14)
Unicode 96.6ms ± 5% 96.4ms ± 5% ~ (p=0.838 n=15+15)
GoTypes 653ms ± 2% 647ms ± 2% ~ (p=0.102 n=15+14)
Flate 133ms ± 6% 129ms ± 3% -2.62% (p=0.041 n=15+15)
GoParser 164ms ± 5% 159ms ± 3% -3.05% (p=0.000 n=15+15)
Reflect 428ms ± 4% 422ms ± 3% ~ (p=0.156 n=15+13)
Tar 123ms ±10% 124ms ± 8% ~ (p=0.461 n=15+15)
XML 228ms ± 3% 224ms ± 3% -1.57% (p=0.045 n=15+15)
[Geo mean] 206ms 377ms +82.86%
name old user-time/op new user-time/op delta
Template 292ms ±10% 301ms ±12% ~ (p=0.189 n=15+15)
Unicode 166ms ±37% 158ms ±14% ~ (p=0.418 n=15+14)
GoTypes 962ms ± 6% 963ms ± 7% ~ (p=0.976 n=15+15)
Flate 207ms ±19% 200ms ±14% ~ (p=0.345 n=14+15)
GoParser 246ms ±22% 240ms ±15% ~ (p=0.587 n=15+15)
Reflect 611ms ±13% 587ms ±14% ~ (p=0.085 n=15+13)
Tar 211ms ±12% 217ms ±14% ~ (p=0.355 n=14+15)
XML 335ms ±15% 320ms ±18% ~ (p=0.169 n=15+15)
[Geo mean] 317ms 583ms +83.72%
name old alloc/op new alloc/op delta
Template 40.2MB ± 0% 40.2MB ± 0% -0.15% (p=0.000 n=14+15)
Unicode 29.2MB ± 0% 29.3MB ± 0% ~ (p=0.624 n=15+15)
GoTypes 114MB ± 0% 114MB ± 0% -0.15% (p=0.000 n=15+14)
Flate 25.7MB ± 0% 25.6MB ± 0% -0.18% (p=0.000 n=13+15)
GoParser 32.2MB ± 0% 32.2MB ± 0% -0.14% (p=0.003 n=15+15)
Reflect 77.8MB ± 0% 77.9MB ± 0% ~ (p=0.061 n=15+15)
Tar 27.1MB ± 0% 27.0MB ± 0% -0.11% (p=0.029 n=15+15)
XML 42.7MB ± 0% 42.5MB ± 0% -0.29% (p=0.000 n=15+15)
[Geo mean] 42.1MB 75.0MB +78.05%
name old allocs/op new allocs/op delta
Template 402k ± 1% 398k ± 0% -0.91% (p=0.000 n=15+15)
Unicode 344k ± 1% 344k ± 0% ~ (p=0.715 n=15+14)
GoTypes 1.18M ± 0% 1.17M ± 0% -0.91% (p=0.000 n=15+14)
Flate 243k ± 0% 240k ± 1% -1.05% (p=0.000 n=13+15)
GoParser 327k ± 1% 324k ± 1% -0.96% (p=0.000 n=15+15)
Reflect 984k ± 1% 982k ± 0% ~ (p=0.050 n=15+15)
Tar 261k ± 1% 259k ± 1% -0.77% (p=0.000 n=15+15)
XML 411k ± 0% 404k ± 1% -1.55% (p=0.000 n=15+15)
[Geo mean] 439k 755k +72.01%
name old text-bytes new text-bytes delta
HelloSize 694kB ± 0% 694kB ± 0% -0.00% (p=0.000 n=15+15)
name old data-bytes new data-bytes delta
HelloSize 5.55kB ± 0% 5.55kB ± 0% ~ (all equal)
name old bss-bytes new bss-bytes delta
HelloSize 133kB ± 0% 133kB ± 0% ~ (all equal)
name old exe-bytes new exe-bytes delta
HelloSize 1.04MB ± 0% 1.04MB ± 0% ~ (all equal)
Change-Id: I991fc553ef175db46bb23b2128317bbd48de70d8
Reviewed-on: https://go-review.googlesource.com/41770
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2017-07-21 18:30:19 -04:00
|
|
|
e.erase(r)
|
2017-07-21 12:00:38 -04:00
|
|
|
x = v.copyIntoWithXPos(e.p, pos)
|
2016-12-08 13:49:51 -08:00
|
|
|
e.set(r, vid, x, false, pos)
|
2016-01-04 13:34:54 -08:00
|
|
|
// Make sure we spill with the size of the slot, not the
|
|
|
|
|
// size of x (which might be wider due to our dropping
|
|
|
|
|
// of narrowing conversions).
|
2016-12-08 13:49:51 -08:00
|
|
|
x = e.p.NewValue1(pos, OpStoreReg, loc.(LocalSlot).Type, x)
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
// Emit move from src to dst.
|
|
|
|
|
_, srcReg := src.(*Register)
|
|
|
|
|
if srcReg {
|
|
|
|
|
if dstReg {
|
2016-12-08 13:49:51 -08:00
|
|
|
x = e.p.NewValue1(pos, OpCopy, c.Type, c)
|
2015-12-17 10:01:24 -08:00
|
|
|
} else {
|
2016-12-08 13:49:51 -08:00
|
|
|
x = e.p.NewValue1(pos, OpStoreReg, loc.(LocalSlot).Type, c)
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
if dstReg {
|
2016-12-08 13:49:51 -08:00
|
|
|
x = e.p.NewValue1(pos, OpLoadReg, c.Type, c)
|
2015-12-17 10:01:24 -08:00
|
|
|
} else {
|
2016-03-01 23:21:55 +00:00
|
|
|
// mem->mem. Use temp register.
|
2015-12-17 10:01:24 -08:00
|
|
|
r := e.findRegFor(c.Type)
|
[dev.debug] cmd/compile: better DWARF with optimizations on
Debuggers use DWARF information to find local variables on the
stack and in registers. Prior to this CL, the DWARF information for
functions claimed that all variables were on the stack at all times.
That's incorrect when optimizations are enabled, and results in
debuggers showing data that is out of date or complete gibberish.
After this CL, the compiler is capable of representing variable
locations more accurately, and attempts to do so. Due to limitations of
the SSA backend, it's not possible to be completely correct.
There are a number of problems in the current design. One of the easier
to understand is that variable names currently must be attached to an
SSA value, but not all assignments in the source code actually result
in machine code. For example:
type myint int
var a int
b := myint(int)
and
b := (*uint64)(unsafe.Pointer(a))
don't generate machine code because the underlying representation is the
same, so the correct value of b will not be set when the user would
expect.
Generating the more precise debug information is behind a flag,
dwarflocationlists. Because of the issues described above, setting the
flag may not make the debugging experience much better, and may actually
make it worse in cases where the variable actually is on the stack and
the more complicated analysis doesn't realize it.
A number of changes are included:
- Add a new pseudo-instruction, RegKill, which indicates that the value
in the register has been clobbered.
- Adjust regalloc to emit RegKills in the right places. Significantly,
this means that phis are mixed with StoreReg and RegKills after
regalloc.
- Track variable decomposition in ssa.LocalSlots.
- After the SSA backend is done, analyze the result and build location
lists for each LocalSlot.
- After assembly is done, update the location lists with the assembled
PC offsets, recompose variables, and build DWARF location lists. Emit the
list as a new linker symbol, one per function.
- In the linker, aggregate the location lists into a .debug_loc section.
TODO:
- currently disabled for non-X86/AMD64 because there are no data tables.
go build -toolexec 'toolstash -cmp' -a std succeeds.
With -dwarflocationlists false:
before: f02812195637909ff675782c0b46836a8ff01976
after: 06f61e8112a42ac34fb80e0c818b3cdb84a5e7ec
benchstat -geomean /tmp/220352263 /tmp/621364410
completed 15 of 15, estimated time remaining 0s (eta 3:52PM)
name old time/op new time/op delta
Template 199ms ± 3% 198ms ± 2% ~ (p=0.400 n=15+14)
Unicode 96.6ms ± 5% 96.4ms ± 5% ~ (p=0.838 n=15+15)
GoTypes 653ms ± 2% 647ms ± 2% ~ (p=0.102 n=15+14)
Flate 133ms ± 6% 129ms ± 3% -2.62% (p=0.041 n=15+15)
GoParser 164ms ± 5% 159ms ± 3% -3.05% (p=0.000 n=15+15)
Reflect 428ms ± 4% 422ms ± 3% ~ (p=0.156 n=15+13)
Tar 123ms ±10% 124ms ± 8% ~ (p=0.461 n=15+15)
XML 228ms ± 3% 224ms ± 3% -1.57% (p=0.045 n=15+15)
[Geo mean] 206ms 377ms +82.86%
name old user-time/op new user-time/op delta
Template 292ms ±10% 301ms ±12% ~ (p=0.189 n=15+15)
Unicode 166ms ±37% 158ms ±14% ~ (p=0.418 n=15+14)
GoTypes 962ms ± 6% 963ms ± 7% ~ (p=0.976 n=15+15)
Flate 207ms ±19% 200ms ±14% ~ (p=0.345 n=14+15)
GoParser 246ms ±22% 240ms ±15% ~ (p=0.587 n=15+15)
Reflect 611ms ±13% 587ms ±14% ~ (p=0.085 n=15+13)
Tar 211ms ±12% 217ms ±14% ~ (p=0.355 n=14+15)
XML 335ms ±15% 320ms ±18% ~ (p=0.169 n=15+15)
[Geo mean] 317ms 583ms +83.72%
name old alloc/op new alloc/op delta
Template 40.2MB ± 0% 40.2MB ± 0% -0.15% (p=0.000 n=14+15)
Unicode 29.2MB ± 0% 29.3MB ± 0% ~ (p=0.624 n=15+15)
GoTypes 114MB ± 0% 114MB ± 0% -0.15% (p=0.000 n=15+14)
Flate 25.7MB ± 0% 25.6MB ± 0% -0.18% (p=0.000 n=13+15)
GoParser 32.2MB ± 0% 32.2MB ± 0% -0.14% (p=0.003 n=15+15)
Reflect 77.8MB ± 0% 77.9MB ± 0% ~ (p=0.061 n=15+15)
Tar 27.1MB ± 0% 27.0MB ± 0% -0.11% (p=0.029 n=15+15)
XML 42.7MB ± 0% 42.5MB ± 0% -0.29% (p=0.000 n=15+15)
[Geo mean] 42.1MB 75.0MB +78.05%
name old allocs/op new allocs/op delta
Template 402k ± 1% 398k ± 0% -0.91% (p=0.000 n=15+15)
Unicode 344k ± 1% 344k ± 0% ~ (p=0.715 n=15+14)
GoTypes 1.18M ± 0% 1.17M ± 0% -0.91% (p=0.000 n=15+14)
Flate 243k ± 0% 240k ± 1% -1.05% (p=0.000 n=13+15)
GoParser 327k ± 1% 324k ± 1% -0.96% (p=0.000 n=15+15)
Reflect 984k ± 1% 982k ± 0% ~ (p=0.050 n=15+15)
Tar 261k ± 1% 259k ± 1% -0.77% (p=0.000 n=15+15)
XML 411k ± 0% 404k ± 1% -1.55% (p=0.000 n=15+15)
[Geo mean] 439k 755k +72.01%
name old text-bytes new text-bytes delta
HelloSize 694kB ± 0% 694kB ± 0% -0.00% (p=0.000 n=15+15)
name old data-bytes new data-bytes delta
HelloSize 5.55kB ± 0% 5.55kB ± 0% ~ (all equal)
name old bss-bytes new bss-bytes delta
HelloSize 133kB ± 0% 133kB ± 0% ~ (all equal)
name old exe-bytes new exe-bytes delta
HelloSize 1.04MB ± 0% 1.04MB ± 0% ~ (all equal)
Change-Id: I991fc553ef175db46bb23b2128317bbd48de70d8
Reviewed-on: https://go-review.googlesource.com/41770
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
2017-07-21 18:30:19 -04:00
|
|
|
e.erase(r)
|
2016-12-08 13:49:51 -08:00
|
|
|
t := e.p.NewValue1(pos, OpLoadReg, c.Type, c)
|
|
|
|
|
e.set(r, vid, t, false, pos)
|
|
|
|
|
x = e.p.NewValue1(pos, OpStoreReg, loc.(LocalSlot).Type, t)
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-12-08 13:49:51 -08:00
|
|
|
e.set(loc, vid, x, true, pos)
|
2018-05-25 16:08:13 -04:00
|
|
|
if x.Op == OpLoadReg && e.s.isGReg(register(loc.(*Register).num)) {
|
|
|
|
|
e.s.f.Fatalf("processDest.OpLoadReg targeting g: " + x.LongString())
|
|
|
|
|
}
|
2015-12-17 10:01:24 -08:00
|
|
|
if splice != nil {
|
2016-03-15 20:45:50 -07:00
|
|
|
(*splice).Uses--
|
2015-12-17 10:01:24 -08:00
|
|
|
*splice = x
|
2016-03-15 20:45:50 -07:00
|
|
|
x.Uses++
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// set changes the contents of location loc to hold the given value and its cached representative.
|
2016-12-15 17:17:01 -08:00
|
|
|
func (e *edgeState) set(loc Location, vid ID, c *Value, final bool, pos src.XPos) {
|
2015-12-17 10:01:24 -08:00
|
|
|
e.s.f.setHome(c, loc)
|
2016-12-08 13:49:51 -08:00
|
|
|
e.contents[loc] = contentRecord{vid, c, final, pos}
|
2015-12-17 10:01:24 -08:00
|
|
|
a := e.cache[vid]
|
2016-03-03 09:53:03 -08:00
|
|
|
if len(a) == 0 {
|
|
|
|
|
e.cachedVals = append(e.cachedVals, vid)
|
|
|
|
|
}
|
2015-12-17 10:01:24 -08:00
|
|
|
a = append(a, c)
|
|
|
|
|
e.cache[vid] = a
|
|
|
|
|
if r, ok := loc.(*Register); ok {
|
2020-04-14 10:12:32 +01:00
|
|
|
if e.usedRegs&(regMask(1)<<uint(r.num)) != 0 {
|
|
|
|
|
e.s.f.Fatalf("%v is already set (v%d/%v)", r, vid, c)
|
|
|
|
|
}
|
2016-09-16 09:36:00 -07:00
|
|
|
e.usedRegs |= regMask(1) << uint(r.num)
|
2015-12-17 10:01:24 -08:00
|
|
|
if final {
|
2016-09-16 09:36:00 -07:00
|
|
|
e.finalRegs |= regMask(1) << uint(r.num)
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
|
|
|
|
if len(a) == 1 {
|
2016-09-16 09:36:00 -07:00
|
|
|
e.uniqueRegs |= regMask(1) << uint(r.num)
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
|
|
|
|
if len(a) == 2 {
|
|
|
|
|
if t, ok := e.s.f.getHome(a[0].ID).(*Register); ok {
|
2016-09-16 09:36:00 -07:00
|
|
|
e.uniqueRegs &^= regMask(1) << uint(t.num)
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
|
|
|
|
}
|
2018-03-16 07:15:59 -07:00
|
|
|
if e.s.values[vid].rematerializeable {
|
|
|
|
|
e.rematerializeableRegs |= regMask(1) << uint(r.num)
|
|
|
|
|
}
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
2016-03-10 17:52:57 -06:00
|
|
|
if e.s.f.pass.debug > regDebug {
|
2015-12-17 10:01:24 -08:00
|
|
|
fmt.Printf("%s\n", c.LongString())
|
2017-08-17 12:23:34 -07:00
|
|
|
fmt.Printf("v%d now available in %s:%s\n", vid, loc, c)
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// erase removes any user of loc.
|
|
|
|
|
func (e *edgeState) erase(loc Location) {
|
|
|
|
|
cr := e.contents[loc]
|
|
|
|
|
if cr.c == nil {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
vid := cr.vid
|
|
|
|
|
|
|
|
|
|
if cr.final {
|
|
|
|
|
// Add a destination to move this value back into place.
|
|
|
|
|
// Make sure it gets added to the tail of the destination queue
|
|
|
|
|
// so we make progress on other moves first.
|
2016-12-08 13:49:51 -08:00
|
|
|
e.extra = append(e.extra, dstRecord{loc, cr.vid, nil, cr.pos})
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Remove c from the list of cached values.
|
|
|
|
|
a := e.cache[vid]
|
|
|
|
|
for i, c := range a {
|
|
|
|
|
if e.s.f.getHome(c.ID) == loc {
|
2016-03-10 17:52:57 -06:00
|
|
|
if e.s.f.pass.debug > regDebug {
|
2017-08-17 12:23:34 -07:00
|
|
|
fmt.Printf("v%d no longer available in %s:%s\n", vid, loc, c)
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
|
|
|
|
a[i], a = a[len(a)-1], a[:len(a)-1]
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
e.cache[vid] = a
|
|
|
|
|
|
|
|
|
|
// Update register masks.
|
|
|
|
|
if r, ok := loc.(*Register); ok {
|
2016-09-16 09:36:00 -07:00
|
|
|
e.usedRegs &^= regMask(1) << uint(r.num)
|
2015-12-17 10:01:24 -08:00
|
|
|
if cr.final {
|
2016-09-16 09:36:00 -07:00
|
|
|
e.finalRegs &^= regMask(1) << uint(r.num)
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
2018-03-16 07:15:59 -07:00
|
|
|
e.rematerializeableRegs &^= regMask(1) << uint(r.num)
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
|
|
|
|
if len(a) == 1 {
|
|
|
|
|
if r, ok := e.s.f.getHome(a[0].ID).(*Register); ok {
|
2016-09-16 09:36:00 -07:00
|
|
|
e.uniqueRegs |= regMask(1) << uint(r.num)
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// findRegFor finds a register we can use to make a temp copy of type typ.
|
cmd/compile: change ssa.Type into *types.Type
When package ssa was created, Type was in package gc.
To avoid circular dependencies, we used an interface (ssa.Type)
to represent type information in SSA.
In the Go 1.9 cycle, gri extricated the Type type from package gc.
As a result, we can now use it in package ssa.
Now, instead of package types depending on package ssa,
it is the other way.
This is a more sensible dependency tree,
and helps compiler performance a bit.
Though this is a big CL, most of the changes are
mechanical and uninteresting.
Interesting bits:
* Add new singleton globals to package types for the special
SSA types Memory, Void, Invalid, Flags, and Int128.
* Add two new Types, TSSA for the special types,
and TTUPLE, for SSA tuple types.
ssa.MakeTuple is now types.NewTuple.
* Move type comparison result constants CMPlt, CMPeq, and CMPgt
to package types.
* We had picked the name "types" in our rules for the handy
list of types provided by ssa.Config. That conflicted with
the types package name, so change it to "typ".
* Update the type comparison routine to handle tuples and special
types inline.
* Teach gc/fmt.go how to print special types.
* We can now eliminate ElemTypes in favor of just Elem,
and probably also some other duplicated Type methods
designed to return ssa.Type instead of *types.Type.
* The ssa tests were using their own dummy types,
and they were not particularly careful about types in general.
Of necessity, this CL switches them to use *types.Type;
it does not make them more type-accurate.
Unfortunately, using types.Type means initializing a bit
of the types universe.
This is prime for refactoring and improvement.
This shrinks ssa.Value; it now fits in a smaller size class
on 64 bit systems. This doesn't have a giant impact,
though, since most Values are preallocated in a chunk.
name old alloc/op new alloc/op delta
Template 37.9MB ± 0% 37.7MB ± 0% -0.57% (p=0.000 n=10+8)
Unicode 28.9MB ± 0% 28.7MB ± 0% -0.52% (p=0.000 n=10+10)
GoTypes 110MB ± 0% 109MB ± 0% -0.88% (p=0.000 n=10+10)
Flate 24.7MB ± 0% 24.6MB ± 0% -0.66% (p=0.000 n=10+10)
GoParser 31.1MB ± 0% 30.9MB ± 0% -0.61% (p=0.000 n=10+9)
Reflect 73.9MB ± 0% 73.4MB ± 0% -0.62% (p=0.000 n=10+8)
Tar 25.8MB ± 0% 25.6MB ± 0% -0.77% (p=0.000 n=9+10)
XML 41.2MB ± 0% 40.9MB ± 0% -0.80% (p=0.000 n=10+10)
[Geo mean] 40.5MB 40.3MB -0.68%
name old allocs/op new allocs/op delta
Template 385k ± 0% 386k ± 0% ~ (p=0.356 n=10+9)
Unicode 343k ± 1% 344k ± 0% ~ (p=0.481 n=10+10)
GoTypes 1.16M ± 0% 1.16M ± 0% -0.16% (p=0.004 n=10+10)
Flate 238k ± 1% 238k ± 1% ~ (p=0.853 n=10+10)
GoParser 320k ± 0% 320k ± 0% ~ (p=0.720 n=10+9)
Reflect 957k ± 0% 957k ± 0% ~ (p=0.460 n=10+8)
Tar 252k ± 0% 252k ± 0% ~ (p=0.133 n=9+10)
XML 400k ± 0% 400k ± 0% ~ (p=0.796 n=10+10)
[Geo mean] 428k 428k -0.01%
Removing all the interface calls helps non-trivially with CPU, though.
name old time/op new time/op delta
Template 178ms ± 4% 173ms ± 3% -2.90% (p=0.000 n=94+96)
Unicode 85.0ms ± 4% 83.9ms ± 4% -1.23% (p=0.000 n=96+96)
GoTypes 543ms ± 3% 528ms ± 3% -2.73% (p=0.000 n=98+96)
Flate 116ms ± 3% 113ms ± 4% -2.34% (p=0.000 n=96+99)
GoParser 144ms ± 3% 140ms ± 4% -2.80% (p=0.000 n=99+97)
Reflect 344ms ± 3% 334ms ± 4% -3.02% (p=0.000 n=100+99)
Tar 106ms ± 5% 103ms ± 4% -3.30% (p=0.000 n=98+94)
XML 198ms ± 5% 192ms ± 4% -2.88% (p=0.000 n=92+95)
[Geo mean] 178ms 173ms -2.65%
name old user-time/op new user-time/op delta
Template 229ms ± 5% 224ms ± 5% -2.36% (p=0.000 n=95+99)
Unicode 107ms ± 6% 106ms ± 5% -1.13% (p=0.001 n=93+95)
GoTypes 696ms ± 4% 679ms ± 4% -2.45% (p=0.000 n=97+99)
Flate 137ms ± 4% 134ms ± 5% -2.66% (p=0.000 n=99+96)
GoParser 176ms ± 5% 172ms ± 8% -2.27% (p=0.000 n=98+100)
Reflect 430ms ± 6% 411ms ± 5% -4.46% (p=0.000 n=100+92)
Tar 128ms ±13% 123ms ±13% -4.21% (p=0.000 n=100+100)
XML 239ms ± 6% 233ms ± 6% -2.50% (p=0.000 n=95+97)
[Geo mean] 220ms 213ms -2.76%
Change-Id: I15c7d6268347f8358e75066dfdbd77db24e8d0c1
Reviewed-on: https://go-review.googlesource.com/42145
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2017-04-28 14:12:28 -07:00
|
|
|
func (e *edgeState) findRegFor(typ *types.Type) Location {
|
2015-12-17 10:01:24 -08:00
|
|
|
// Which registers are possibilities.
|
2017-03-17 16:04:46 -07:00
|
|
|
types := &e.s.f.Config.Types
|
2019-09-12 21:05:45 +02:00
|
|
|
m := e.s.compatRegs(typ)
|
2015-12-17 10:01:24 -08:00
|
|
|
|
2016-03-01 23:21:55 +00:00
|
|
|
// Pick a register. In priority order:
|
2015-12-17 10:01:24 -08:00
|
|
|
// 1) an unused register
|
|
|
|
|
// 2) a non-unique register not holding a final value
|
|
|
|
|
// 3) a non-unique register
|
2018-03-16 07:15:59 -07:00
|
|
|
// 4) a register holding a rematerializeable value
|
2015-12-17 10:01:24 -08:00
|
|
|
x := m &^ e.usedRegs
|
|
|
|
|
if x != 0 {
|
2016-03-21 22:57:26 -07:00
|
|
|
return &e.s.registers[pickReg(x)]
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
|
|
|
|
x = m &^ e.uniqueRegs &^ e.finalRegs
|
|
|
|
|
if x != 0 {
|
2016-03-21 22:57:26 -07:00
|
|
|
return &e.s.registers[pickReg(x)]
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
|
|
|
|
x = m &^ e.uniqueRegs
|
|
|
|
|
if x != 0 {
|
2016-03-21 22:57:26 -07:00
|
|
|
return &e.s.registers[pickReg(x)]
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
2018-03-16 07:15:59 -07:00
|
|
|
x = m & e.rematerializeableRegs
|
|
|
|
|
if x != 0 {
|
|
|
|
|
return &e.s.registers[pickReg(x)]
|
|
|
|
|
}
|
2015-12-17 10:01:24 -08:00
|
|
|
|
2017-03-22 20:27:54 -07:00
|
|
|
// No register is available.
|
2015-12-17 10:01:24 -08:00
|
|
|
// Pick a register to spill.
|
2016-03-03 09:53:03 -08:00
|
|
|
for _, vid := range e.cachedVals {
|
|
|
|
|
a := e.cache[vid]
|
2015-12-17 10:01:24 -08:00
|
|
|
for _, c := range a {
|
2016-09-16 09:36:00 -07:00
|
|
|
if r, ok := e.s.f.getHome(c.ID).(*Register); ok && m>>uint(r.num)&1 != 0 {
|
2017-03-14 18:21:23 -04:00
|
|
|
if !c.rematerializeable() {
|
|
|
|
|
x := e.p.NewValue1(c.Pos, OpStoreReg, c.Type, c)
|
2017-03-22 20:27:54 -07:00
|
|
|
// Allocate a temp location to spill a register to.
|
|
|
|
|
// The type of the slot is immaterial - it will not be live across
|
|
|
|
|
// any safepoint. Just use a type big enough to hold any register.
|
2023-09-06 22:42:11 -07:00
|
|
|
t := LocalSlot{N: e.s.f.NewLocal(c.Pos, types.Int64), Type: types.Int64}
|
2017-07-21 18:28:06 -04:00
|
|
|
// TODO: reuse these slots. They'll need to be erased first.
|
2017-03-14 18:21:23 -04:00
|
|
|
e.set(t, vid, x, false, c.Pos)
|
|
|
|
|
if e.s.f.pass.debug > regDebug {
|
2017-08-17 12:23:34 -07:00
|
|
|
fmt.Printf(" SPILL %s->%s %s\n", r, t, x.LongString())
|
2017-03-14 18:21:23 -04:00
|
|
|
}
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
2016-03-01 23:21:55 +00:00
|
|
|
// r will now be overwritten by the caller. At some point
|
2015-12-17 10:01:24 -08:00
|
|
|
// later, the newly saved value will be moved back to its
|
|
|
|
|
// final destination in processDest.
|
|
|
|
|
return r
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-16 07:15:59 -07:00
|
|
|
fmt.Printf("m:%d unique:%d final:%d rematerializable:%d\n", m, e.uniqueRegs, e.finalRegs, e.rematerializeableRegs)
|
2016-03-03 09:53:03 -08:00
|
|
|
for _, vid := range e.cachedVals {
|
|
|
|
|
a := e.cache[vid]
|
2016-01-04 13:34:54 -08:00
|
|
|
for _, c := range a {
|
2017-08-17 12:23:34 -07:00
|
|
|
fmt.Printf("v%d: %s %s\n", vid, c, e.s.f.getHome(c.ID))
|
2016-01-04 13:34:54 -08:00
|
|
|
}
|
|
|
|
|
}
|
2015-12-17 10:01:24 -08:00
|
|
|
e.s.f.Fatalf("can't find empty register on edge %s->%s", e.p, e.b)
|
|
|
|
|
return nil
|
2015-05-05 16:19:12 -07:00
|
|
|
}
|
|
|
|
|
|
2016-07-13 16:15:54 -07:00
|
|
|
// rematerializeable reports whether the register allocator should recompute
|
|
|
|
|
// a value instead of spilling/restoring it.
|
2015-10-19 10:57:03 -07:00
|
|
|
func (v *Value) rematerializeable() bool {
|
2016-01-31 11:39:39 -08:00
|
|
|
if !opcodeTable[v.Op].rematerializeable {
|
2015-10-19 10:57:03 -07:00
|
|
|
return false
|
|
|
|
|
}
|
2016-01-31 11:39:39 -08:00
|
|
|
for _, a := range v.Args {
|
2024-11-24 15:29:56 -08:00
|
|
|
// Fixed-register allocations (SP, SB, etc.) are always available.
|
|
|
|
|
// Any other argument of an opcode makes it not rematerializeable.
|
|
|
|
|
if !opcodeTable[a.Op].fixedReg {
|
2016-01-31 11:39:39 -08:00
|
|
|
return false
|
|
|
|
|
}
|
2015-10-19 10:57:03 -07:00
|
|
|
}
|
2016-01-31 11:39:39 -08:00
|
|
|
return true
|
2015-10-19 10:57:03 -07:00
|
|
|
}
|
|
|
|
|
|
2015-11-05 14:59:47 -08:00
|
|
|
type liveInfo struct {
|
2016-12-15 17:17:01 -08:00
|
|
|
ID ID // ID of value
|
|
|
|
|
dist int32 // # of instructions before next use
|
|
|
|
|
pos src.XPos // source position of next use
|
2015-11-05 14:59:47 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// computeLive computes a map from block ID to a list of value IDs live at the end
|
2016-03-01 23:21:55 +00:00
|
|
|
// of that block. Together with the value ID is a count of how many instructions
|
2016-04-15 12:49:30 -07:00
|
|
|
// to the next use of that value. The resulting map is stored in s.live.
|
|
|
|
|
// computeLive also computes the desired register information at the end of each block.
|
|
|
|
|
// This desired register information is stored in s.desired.
|
2015-05-05 16:19:12 -07:00
|
|
|
// TODO: this could be quadratic if lots of variables are live across lots of
|
2016-03-01 23:21:55 +00:00
|
|
|
// basic blocks. Figure out a way to make this function (or, more precisely, the user
|
2015-05-05 16:19:12 -07:00
|
|
|
// of this function) require only linear size & time.
|
2015-11-05 14:59:47 -08:00
|
|
|
func (s *regAllocState) computeLive() {
|
|
|
|
|
f := s.f
|
|
|
|
|
s.live = make([][]liveInfo, f.NumBlocks())
|
2016-04-15 12:49:30 -07:00
|
|
|
s.desired = make([]desiredState, f.NumBlocks())
|
2015-05-05 16:19:12 -07:00
|
|
|
var phis []*Value
|
|
|
|
|
|
2022-10-21 14:16:41 -07:00
|
|
|
live := f.newSparseMapPos(f.NumValues())
|
|
|
|
|
defer f.retSparseMapPos(live)
|
|
|
|
|
t := f.newSparseMapPos(f.NumValues())
|
|
|
|
|
defer f.retSparseMapPos(t)
|
2015-07-22 20:40:18 -07:00
|
|
|
|
2016-04-15 12:49:30 -07:00
|
|
|
// Keep track of which value we want in each register.
|
|
|
|
|
var desired desiredState
|
|
|
|
|
|
2015-07-22 20:40:18 -07:00
|
|
|
// Instead of iterating over f.Blocks, iterate over their postordering.
|
|
|
|
|
// Liveness information flows backward, so starting at the end
|
|
|
|
|
// increases the probability that we will stabilize quickly.
|
|
|
|
|
// TODO: Do a better job yet. Here's one possibility:
|
|
|
|
|
// Calculate the dominator tree and locate all strongly connected components.
|
|
|
|
|
// If a value is live in one block of an SCC, it is live in all.
|
|
|
|
|
// Walk the dominator tree from end to beginning, just once, treating SCC
|
|
|
|
|
// components as single blocks, duplicated calculated liveness information
|
|
|
|
|
// out to all of them.
|
2016-09-16 13:50:18 -07:00
|
|
|
po := f.postorder()
|
|
|
|
|
s.loopnest = f.loopnest()
|
2025-06-10 14:37:47 -07:00
|
|
|
s.loopnest.computeUnavoidableCalls()
|
2015-05-05 16:19:12 -07:00
|
|
|
for {
|
|
|
|
|
changed := false
|
|
|
|
|
|
2015-07-22 20:40:18 -07:00
|
|
|
for _, b := range po {
|
2015-11-05 14:59:47 -08:00
|
|
|
// Start with known live values at the end of the block.
|
|
|
|
|
// Add len(b.Values) to adjust from end-of-block distance
|
|
|
|
|
// to beginning-of-block distance.
|
|
|
|
|
live.clear()
|
|
|
|
|
for _, e := range s.live[b.ID] {
|
2016-12-08 13:49:51 -08:00
|
|
|
live.set(e.ID, e.dist+int32(len(b.Values)), e.pos)
|
2015-08-11 12:51:33 -07:00
|
|
|
}
|
|
|
|
|
|
2019-08-12 20:19:58 +01:00
|
|
|
// Mark control values as live
|
|
|
|
|
for _, c := range b.ControlValues() {
|
|
|
|
|
if s.values[c.ID].needReg {
|
|
|
|
|
live.set(c.ID, int32(len(b.Values)), b.Pos)
|
|
|
|
|
}
|
2015-08-04 14:22:29 -07:00
|
|
|
}
|
2015-05-05 16:19:12 -07:00
|
|
|
|
|
|
|
|
// Propagate backwards to the start of the block
|
|
|
|
|
// Assumes Values have been scheduled.
|
2016-04-30 22:28:37 -07:00
|
|
|
phis = phis[:0]
|
2015-05-05 16:19:12 -07:00
|
|
|
for i := len(b.Values) - 1; i >= 0; i-- {
|
|
|
|
|
v := b.Values[i]
|
2015-11-05 14:59:47 -08:00
|
|
|
live.remove(v.ID)
|
2015-05-05 16:19:12 -07:00
|
|
|
if v.Op == OpPhi {
|
|
|
|
|
// save phi ops for later
|
|
|
|
|
phis = append(phis, v)
|
|
|
|
|
continue
|
|
|
|
|
}
|
2016-09-09 13:11:07 -07:00
|
|
|
if opcodeTable[v.Op].call {
|
|
|
|
|
c := live.contents()
|
|
|
|
|
for i := range c {
|
|
|
|
|
c[i].val += unlikelyDistance
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-11-05 14:59:47 -08:00
|
|
|
for _, a := range v.Args {
|
2015-12-17 10:01:24 -08:00
|
|
|
if s.values[a.ID].needReg {
|
2016-12-07 18:14:35 -08:00
|
|
|
live.set(a.ID, int32(i), v.Pos)
|
2015-11-05 14:59:47 -08:00
|
|
|
}
|
|
|
|
|
}
|
2015-05-05 16:19:12 -07:00
|
|
|
}
|
2016-04-15 12:49:30 -07:00
|
|
|
// Propagate desired registers backwards.
|
|
|
|
|
desired.copy(&s.desired[b.ID])
|
|
|
|
|
for i := len(b.Values) - 1; i >= 0; i-- {
|
|
|
|
|
v := b.Values[i]
|
|
|
|
|
prefs := desired.remove(v.ID)
|
|
|
|
|
if v.Op == OpPhi {
|
|
|
|
|
// TODO: if v is a phi, save desired register for phi inputs.
|
|
|
|
|
// For now, we just drop it and don't propagate
|
|
|
|
|
// desired registers back though phi nodes.
|
|
|
|
|
continue
|
|
|
|
|
}
|
2021-02-13 10:49:37 -05:00
|
|
|
regspec := s.regspec(v)
|
2016-04-15 12:49:30 -07:00
|
|
|
// Cancel desired registers if they get clobbered.
|
cmd/compile: don't lower OpConvert
Currently, each architecture lowers OpConvert to an arch-specific
OpXXXconvert. This is silly because OpConvert means the same thing on
all architectures and is logically a no-op that exists only to keep
track of conversions to and from unsafe.Pointer. Furthermore, lowering
it makes it harder to recognize in other analyses, particularly
liveness analysis.
This CL eliminates the lowering of OpConvert, leaving it as the
generic op until code generation time.
The main complexity here is that we still need to register-allocate
OpConvert operations. Currently, each arch's lowered OpConvert
specifies all GP registers in its register mask. Ideally, OpConvert
wouldn't affect value homing at all, and we could just copy the home
of OpConvert's source, but this can potentially home an OpConvert in a
LocalSlot, which neither regalloc nor stackalloc expect. Rather than
try to disentangle this assumption from regalloc and stackalloc, we
continue to register-allocate OpConvert, but teach regalloc that
OpConvert can be allocated to any allocatable GP register.
For #24543.
Change-Id: I795a6aee5fd94d4444a7bafac3838a400c9f7bb6
Reviewed-on: https://go-review.googlesource.com/108496
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2018-04-02 16:08:09 -04:00
|
|
|
desired.clobber(regspec.clobbers)
|
2016-04-15 12:49:30 -07:00
|
|
|
// Update desired registers if there are any fixed register inputs.
|
cmd/compile: don't lower OpConvert
Currently, each architecture lowers OpConvert to an arch-specific
OpXXXconvert. This is silly because OpConvert means the same thing on
all architectures and is logically a no-op that exists only to keep
track of conversions to and from unsafe.Pointer. Furthermore, lowering
it makes it harder to recognize in other analyses, particularly
liveness analysis.
This CL eliminates the lowering of OpConvert, leaving it as the
generic op until code generation time.
The main complexity here is that we still need to register-allocate
OpConvert operations. Currently, each arch's lowered OpConvert
specifies all GP registers in its register mask. Ideally, OpConvert
wouldn't affect value homing at all, and we could just copy the home
of OpConvert's source, but this can potentially home an OpConvert in a
LocalSlot, which neither regalloc nor stackalloc expect. Rather than
try to disentangle this assumption from regalloc and stackalloc, we
continue to register-allocate OpConvert, but teach regalloc that
OpConvert can be allocated to any allocatable GP register.
For #24543.
Change-Id: I795a6aee5fd94d4444a7bafac3838a400c9f7bb6
Reviewed-on: https://go-review.googlesource.com/108496
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2018-04-02 16:08:09 -04:00
|
|
|
for _, j := range regspec.inputs {
|
2016-04-15 12:49:30 -07:00
|
|
|
if countRegs(j.regs) != 1 {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
desired.clobber(j.regs)
|
|
|
|
|
desired.add(v.Args[j.idx].ID, pickReg(j.regs))
|
|
|
|
|
}
|
|
|
|
|
// Set desired register of input 0 if this is a 2-operand instruction.
|
2022-03-26 10:05:04 -07:00
|
|
|
if opcodeTable[v.Op].resultInArg0 || v.Op == OpAMD64ADDQconst || v.Op == OpAMD64ADDLconst || v.Op == OpSelect0 {
|
|
|
|
|
// ADDQconst is added here because we want to treat it as resultInArg0 for
|
|
|
|
|
// the purposes of desired registers, even though it is not an absolute requirement.
|
|
|
|
|
// This is because we'd rather implement it as ADDQ instead of LEAQ.
|
|
|
|
|
// Same for ADDLconst
|
|
|
|
|
// Select0 is added here to propagate the desired register to the tuple-generating instruction.
|
2016-04-15 12:49:30 -07:00
|
|
|
if opcodeTable[v.Op].commutative {
|
|
|
|
|
desired.addList(v.Args[1].ID, prefs)
|
|
|
|
|
}
|
|
|
|
|
desired.addList(v.Args[0].ID, prefs)
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-05-05 16:19:12 -07:00
|
|
|
|
2015-11-05 14:59:47 -08:00
|
|
|
// For each predecessor of b, expand its list of live-at-end values.
|
|
|
|
|
// invariant: live contains the values live at the start of b (excluding phi inputs)
|
2016-04-28 16:52:47 -07:00
|
|
|
for i, e := range b.Preds {
|
|
|
|
|
p := e.b
|
2015-11-05 14:59:47 -08:00
|
|
|
// Compute additional distance for the edge.
|
|
|
|
|
// Note: delta must be at least 1 to distinguish the control
|
|
|
|
|
// value use from the first user in a successor block.
|
2016-03-02 15:18:40 -08:00
|
|
|
delta := int32(normalDistance)
|
2015-11-05 14:59:47 -08:00
|
|
|
if len(p.Succs) == 2 {
|
2016-04-28 16:52:47 -07:00
|
|
|
if p.Succs[0].b == b && p.Likely == BranchLikely ||
|
|
|
|
|
p.Succs[1].b == b && p.Likely == BranchUnlikely {
|
2016-03-02 15:18:40 -08:00
|
|
|
delta = likelyDistance
|
2015-11-05 14:59:47 -08:00
|
|
|
}
|
2016-04-28 16:52:47 -07:00
|
|
|
if p.Succs[0].b == b && p.Likely == BranchUnlikely ||
|
|
|
|
|
p.Succs[1].b == b && p.Likely == BranchLikely {
|
2016-03-02 15:18:40 -08:00
|
|
|
delta = unlikelyDistance
|
2015-08-11 12:51:33 -07:00
|
|
|
}
|
|
|
|
|
}
|
2015-11-05 14:59:47 -08:00
|
|
|
|
2016-04-15 12:49:30 -07:00
|
|
|
// Update any desired registers at the end of p.
|
|
|
|
|
s.desired[p.ID].merge(&desired)
|
|
|
|
|
|
2015-11-05 14:59:47 -08:00
|
|
|
// Start t off with the previously known live values at the end of p.
|
2015-05-05 16:19:12 -07:00
|
|
|
t.clear()
|
2015-11-05 14:59:47 -08:00
|
|
|
for _, e := range s.live[p.ID] {
|
2016-12-08 13:49:51 -08:00
|
|
|
t.set(e.ID, e.dist, e.pos)
|
2015-11-05 14:59:47 -08:00
|
|
|
}
|
|
|
|
|
update := false
|
|
|
|
|
|
|
|
|
|
// Add new live values from scanning this block.
|
|
|
|
|
for _, e := range live.contents() {
|
|
|
|
|
d := e.val + delta
|
|
|
|
|
if !t.contains(e.key) || d < t.get(e.key) {
|
|
|
|
|
update = true
|
2022-10-21 14:16:41 -07:00
|
|
|
t.set(e.key, d, e.pos)
|
2015-11-05 14:59:47 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// Also add the correct arg from the saved phi values.
|
|
|
|
|
// All phis are at distance delta (we consider them
|
|
|
|
|
// simultaneously happening at the start of the block).
|
2015-05-05 16:19:12 -07:00
|
|
|
for _, v := range phis {
|
2015-11-05 14:59:47 -08:00
|
|
|
id := v.Args[i].ID
|
2016-04-15 12:49:30 -07:00
|
|
|
if s.values[id].needReg && (!t.contains(id) || delta < t.get(id)) {
|
2015-11-05 14:59:47 -08:00
|
|
|
update = true
|
2016-12-07 18:14:35 -08:00
|
|
|
t.set(id, delta, v.Pos)
|
2015-11-05 14:59:47 -08:00
|
|
|
}
|
2015-05-05 16:19:12 -07:00
|
|
|
}
|
2015-11-05 14:59:47 -08:00
|
|
|
|
|
|
|
|
if !update {
|
2015-05-05 16:19:12 -07:00
|
|
|
continue
|
|
|
|
|
}
|
2015-11-05 14:59:47 -08:00
|
|
|
// The live set has changed, update it.
|
|
|
|
|
l := s.live[p.ID][:0]
|
2016-02-10 17:43:31 -05:00
|
|
|
if cap(l) < t.size() {
|
|
|
|
|
l = make([]liveInfo, 0, t.size())
|
2016-01-30 17:37:38 -05:00
|
|
|
}
|
2015-11-05 14:59:47 -08:00
|
|
|
for _, e := range t.contents() {
|
2022-10-21 14:16:41 -07:00
|
|
|
l = append(l, liveInfo{e.key, e.val, e.pos})
|
2015-11-05 14:59:47 -08:00
|
|
|
}
|
|
|
|
|
s.live[p.ID] = l
|
2015-05-05 16:19:12 -07:00
|
|
|
changed = true
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if !changed {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-03-10 17:52:57 -06:00
|
|
|
if f.pass.debug > regDebug {
|
2015-12-17 10:01:24 -08:00
|
|
|
fmt.Println("live values at end of each block")
|
|
|
|
|
for _, b := range f.Blocks {
|
|
|
|
|
fmt.Printf(" %s:", b)
|
|
|
|
|
for _, x := range s.live[b.ID] {
|
2020-10-19 03:57:15 +00:00
|
|
|
fmt.Printf(" v%d(%d)", x.ID, x.dist)
|
2016-04-15 12:49:30 -07:00
|
|
|
for _, e := range s.desired[b.ID].entries {
|
|
|
|
|
if e.ID != x.ID {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
fmt.Printf("[")
|
|
|
|
|
first := true
|
|
|
|
|
for _, r := range e.regs {
|
|
|
|
|
if r == noRegister {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
if !first {
|
|
|
|
|
fmt.Printf(",")
|
|
|
|
|
}
|
2017-08-17 12:23:34 -07:00
|
|
|
fmt.Print(&s.registers[r])
|
2016-04-15 12:49:30 -07:00
|
|
|
first = false
|
|
|
|
|
}
|
|
|
|
|
fmt.Printf("]")
|
|
|
|
|
}
|
2015-12-17 10:01:24 -08:00
|
|
|
}
|
2018-04-17 09:09:07 -07:00
|
|
|
if avoid := s.desired[b.ID].avoid; avoid != 0 {
|
|
|
|
|
fmt.Printf(" avoid=%v", s.RegMaskString(avoid))
|
|
|
|
|
}
|
2015-12-17 10:01:24 -08:00
|
|
|
fmt.Println()
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-05-05 16:19:12 -07:00
|
|
|
}
|
2015-10-22 13:07:38 -07:00
|
|
|
|
2016-04-15 12:49:30 -07:00
|
|
|
// A desiredState represents desired register assignments.
|
|
|
|
|
type desiredState struct {
|
|
|
|
|
// Desired assignments will be small, so we just use a list
|
|
|
|
|
// of valueID+registers entries.
|
|
|
|
|
entries []desiredStateEntry
|
|
|
|
|
// Registers that other values want to be in. This value will
|
|
|
|
|
// contain at least the union of the regs fields of entries, but
|
|
|
|
|
// may contain additional entries for values that were once in
|
|
|
|
|
// this data structure but are no longer.
|
|
|
|
|
avoid regMask
|
|
|
|
|
}
|
|
|
|
|
type desiredStateEntry struct {
|
|
|
|
|
// (pre-regalloc) value
|
|
|
|
|
ID ID
|
|
|
|
|
// Registers it would like to be in, in priority order.
|
|
|
|
|
// Unused slots are filled with noRegister.
|
2022-03-26 10:05:04 -07:00
|
|
|
// For opcodes that return tuples, we track desired registers only
|
2024-11-23 10:58:47 -08:00
|
|
|
// for the first element of the tuple (see desiredSecondReg for
|
|
|
|
|
// tracking the desired register for second part of a tuple).
|
2016-04-15 12:49:30 -07:00
|
|
|
regs [4]register
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// get returns a list of desired registers for value vid.
|
|
|
|
|
func (d *desiredState) get(vid ID) [4]register {
|
|
|
|
|
for _, e := range d.entries {
|
|
|
|
|
if e.ID == vid {
|
|
|
|
|
return e.regs
|
|
|
|
|
}
|
2015-10-22 13:07:38 -07:00
|
|
|
}
|
2016-04-15 12:49:30 -07:00
|
|
|
return [4]register{noRegister, noRegister, noRegister, noRegister}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// add records that we'd like value vid to be in register r.
|
|
|
|
|
func (d *desiredState) add(vid ID, r register) {
|
|
|
|
|
d.avoid |= regMask(1) << r
|
|
|
|
|
for i := range d.entries {
|
|
|
|
|
e := &d.entries[i]
|
|
|
|
|
if e.ID != vid {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
if e.regs[0] == r {
|
|
|
|
|
// Already known and highest priority
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
for j := 1; j < len(e.regs); j++ {
|
|
|
|
|
if e.regs[j] == r {
|
|
|
|
|
// Move from lower priority to top priority
|
|
|
|
|
copy(e.regs[1:], e.regs[:j])
|
|
|
|
|
e.regs[0] = r
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
copy(e.regs[1:], e.regs[:])
|
|
|
|
|
e.regs[0] = r
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
d.entries = append(d.entries, desiredStateEntry{vid, [4]register{r, noRegister, noRegister, noRegister}})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (d *desiredState) addList(vid ID, regs [4]register) {
|
|
|
|
|
// regs is in priority order, so iterate in reverse order.
|
|
|
|
|
for i := len(regs) - 1; i >= 0; i-- {
|
|
|
|
|
r := regs[i]
|
|
|
|
|
if r != noRegister {
|
|
|
|
|
d.add(vid, r)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// clobber erases any desired registers in the set m.
|
|
|
|
|
func (d *desiredState) clobber(m regMask) {
|
|
|
|
|
for i := 0; i < len(d.entries); {
|
|
|
|
|
e := &d.entries[i]
|
|
|
|
|
j := 0
|
|
|
|
|
for _, r := range e.regs {
|
|
|
|
|
if r != noRegister && m>>r&1 == 0 {
|
|
|
|
|
e.regs[j] = r
|
|
|
|
|
j++
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if j == 0 {
|
|
|
|
|
// No more desired registers for this value.
|
|
|
|
|
d.entries[i] = d.entries[len(d.entries)-1]
|
|
|
|
|
d.entries = d.entries[:len(d.entries)-1]
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
for ; j < len(e.regs); j++ {
|
|
|
|
|
e.regs[j] = noRegister
|
|
|
|
|
}
|
|
|
|
|
i++
|
|
|
|
|
}
|
|
|
|
|
d.avoid &^= m
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// copy copies a desired state from another desiredState x.
|
|
|
|
|
func (d *desiredState) copy(x *desiredState) {
|
|
|
|
|
d.entries = append(d.entries[:0], x.entries...)
|
|
|
|
|
d.avoid = x.avoid
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// remove removes the desired registers for vid and returns them.
|
|
|
|
|
func (d *desiredState) remove(vid ID) [4]register {
|
|
|
|
|
for i := range d.entries {
|
|
|
|
|
if d.entries[i].ID == vid {
|
|
|
|
|
regs := d.entries[i].regs
|
|
|
|
|
d.entries[i] = d.entries[len(d.entries)-1]
|
|
|
|
|
d.entries = d.entries[:len(d.entries)-1]
|
|
|
|
|
return regs
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return [4]register{noRegister, noRegister, noRegister, noRegister}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// merge merges another desired state x into d.
|
|
|
|
|
func (d *desiredState) merge(x *desiredState) {
|
|
|
|
|
d.avoid |= x.avoid
|
|
|
|
|
// There should only be a few desired registers, so
|
|
|
|
|
// linear insert is ok.
|
|
|
|
|
for _, e := range x.entries {
|
|
|
|
|
d.addList(e.ID, e.regs)
|
2015-10-22 13:07:38 -07:00
|
|
|
}
|
|
|
|
|
}
|
2025-06-10 14:37:47 -07:00
|
|
|
|
|
|
|
|
// computeUnavoidableCalls computes the containsUnavoidableCall fields in the loop nest.
|
|
|
|
|
func (loopnest *loopnest) computeUnavoidableCalls() {
|
|
|
|
|
f := loopnest.f
|
|
|
|
|
|
|
|
|
|
hasCall := f.Cache.allocBoolSlice(f.NumBlocks())
|
|
|
|
|
defer f.Cache.freeBoolSlice(hasCall)
|
|
|
|
|
for _, b := range f.Blocks {
|
|
|
|
|
if b.containsCall() {
|
|
|
|
|
hasCall[b.ID] = true
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
found := f.Cache.allocSparseSet(f.NumBlocks())
|
|
|
|
|
defer f.Cache.freeSparseSet(found)
|
|
|
|
|
// Run dfs to find path through the loop that avoids all calls.
|
|
|
|
|
// Such path either escapes the loop or returns back to the header.
|
|
|
|
|
// It isn't enough to have exit not dominated by any call, for example:
|
|
|
|
|
// ... some loop
|
|
|
|
|
// call1 call2
|
|
|
|
|
// \ /
|
|
|
|
|
// block
|
|
|
|
|
// ...
|
|
|
|
|
// block is not dominated by any single call, but we don't have call-free path to it.
|
|
|
|
|
loopLoop:
|
|
|
|
|
for _, l := range loopnest.loops {
|
|
|
|
|
found.clear()
|
|
|
|
|
tovisit := make([]*Block, 0, 8)
|
|
|
|
|
tovisit = append(tovisit, l.header)
|
|
|
|
|
for len(tovisit) > 0 {
|
|
|
|
|
cur := tovisit[len(tovisit)-1]
|
|
|
|
|
tovisit = tovisit[:len(tovisit)-1]
|
|
|
|
|
if hasCall[cur.ID] {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
for _, s := range cur.Succs {
|
|
|
|
|
nb := s.Block()
|
|
|
|
|
if nb == l.header {
|
|
|
|
|
// Found a call-free path around the loop.
|
|
|
|
|
continue loopLoop
|
|
|
|
|
}
|
|
|
|
|
if found.contains(nb.ID) {
|
|
|
|
|
// Already found via another path.
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
nl := loopnest.b2l[nb.ID]
|
|
|
|
|
if nl == nil || (nl.depth <= l.depth && nl != l) {
|
|
|
|
|
// Left the loop.
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
tovisit = append(tovisit, nb)
|
|
|
|
|
found.add(nb.ID)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// No call-free path was found.
|
|
|
|
|
l.containsUnavoidableCall = true
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (b *Block) containsCall() bool {
|
|
|
|
|
if b.Kind == BlockDefer {
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
for _, v := range b.Values {
|
|
|
|
|
if opcodeTable[v.Op].call {
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return false
|
|
|
|
|
}
|