go/src/cmd/compile/internal/ssa/schedule.go

441 lines
13 KiB
Go
Raw Normal View History

// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssa
cmd/compile: Tinkering with schedule for debug and regalloc This adds a heap-based proper priority queue to the scheduler which made a relatively easy to test quite a few heuristics that "ought to work well". For go tools themselves (which may not be representative) the heuristic that works best is (1) in line-number-order, then (2) from more to fewer args, then (3) in variable ID order. Trying to improve this with information about use at end of blocks turned out to be fruitless -- all of my naive attempts at using that information turned out worse than ignoring it. I can confirm that the stores-early heuristic tends to help; removing it makes the results slightly worse. My metric is code size reduction, which I take to mean fewer spills from register allocation. It's not uniform. Here's the endpoints for "vet" from one set of pretty-good heuristics (this is representative at least). -2208 time.parse 13472 15680 -14.081633% -1514 runtime.pclntab 1002058 1003572 -0.150861% -352 time.Time.AppendFormat 9952 10304 -3.416149% -112 runtime.runGCProg 1984 2096 -5.343511% -64 regexp/syntax.(*parser).factor 7264 7328 -0.873362% -44 go.string.alldata 238630 238674 -0.018435% 48 math/big.(*Float).round 1376 1328 3.614458% 48 text/tabwriter.(*Writer).writeLines 1232 1184 4.054054% 48 math/big.shr 832 784 6.122449% 88 go.func.* 75174 75086 0.117199% 96 time.Date 1968 1872 5.128205% Overall there appears to be an 0.1% decrease in text size. No timings yet, and given the distribution of size reductions it might make sense to wait on those. addr2line text (code) = -4392 bytes (-0.156273%) api text (code) = -5502 bytes (-0.147644%) asm text (code) = -5254 bytes (-0.187810%) cgo text (code) = -4886 bytes (-0.148846%) compile text (code) = -1577 bytes (-0.019346%) * changed cover text (code) = -5236 bytes (-0.137992%) dist text (code) = -5015 bytes (-0.167829%) doc text (code) = -5180 bytes (-0.182121%) fix text (code) = -5000 bytes (-0.215148%) link text (code) = -5092 bytes (-0.152712%) newlink text (code) = -5204 bytes (-0.196986%) nm text (code) = -4398 bytes (-0.156018%) objdump text (code) = -4582 bytes (-0.155046%) pack text (code) = -4503 bytes (-0.294287%) pprof text (code) = -6314 bytes (-0.085177%) trace text (code) = -5856 bytes (-0.097818%) vet text (code) = -5696 bytes (-0.117334%) yacc text (code) = -4971 bytes (-0.213817%) This leaves me sorely tempted to look into a "real" scheduler to try to do a better job, but I think it might make more sense to look into getting loop information into the register allocator instead. Fixes #14577. Change-Id: I5238b83284ce76dea1eb94084a8cd47277db6827 Reviewed-on: https://go-review.googlesource.com/20240 Run-TryBot: David Chase <drchase@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
2016-03-04 14:19:49 -05:00
import "container/heap"
const (
ScorePhi = iota // towards top of block
ScoreNilCheck
ScoreReadTuple
ScoreVarDef
ScoreMemory
ScoreDefault
ScoreFlags
ScoreControl // towards bottom of block
)
cmd/compile: Tinkering with schedule for debug and regalloc This adds a heap-based proper priority queue to the scheduler which made a relatively easy to test quite a few heuristics that "ought to work well". For go tools themselves (which may not be representative) the heuristic that works best is (1) in line-number-order, then (2) from more to fewer args, then (3) in variable ID order. Trying to improve this with information about use at end of blocks turned out to be fruitless -- all of my naive attempts at using that information turned out worse than ignoring it. I can confirm that the stores-early heuristic tends to help; removing it makes the results slightly worse. My metric is code size reduction, which I take to mean fewer spills from register allocation. It's not uniform. Here's the endpoints for "vet" from one set of pretty-good heuristics (this is representative at least). -2208 time.parse 13472 15680 -14.081633% -1514 runtime.pclntab 1002058 1003572 -0.150861% -352 time.Time.AppendFormat 9952 10304 -3.416149% -112 runtime.runGCProg 1984 2096 -5.343511% -64 regexp/syntax.(*parser).factor 7264 7328 -0.873362% -44 go.string.alldata 238630 238674 -0.018435% 48 math/big.(*Float).round 1376 1328 3.614458% 48 text/tabwriter.(*Writer).writeLines 1232 1184 4.054054% 48 math/big.shr 832 784 6.122449% 88 go.func.* 75174 75086 0.117199% 96 time.Date 1968 1872 5.128205% Overall there appears to be an 0.1% decrease in text size. No timings yet, and given the distribution of size reductions it might make sense to wait on those. addr2line text (code) = -4392 bytes (-0.156273%) api text (code) = -5502 bytes (-0.147644%) asm text (code) = -5254 bytes (-0.187810%) cgo text (code) = -4886 bytes (-0.148846%) compile text (code) = -1577 bytes (-0.019346%) * changed cover text (code) = -5236 bytes (-0.137992%) dist text (code) = -5015 bytes (-0.167829%) doc text (code) = -5180 bytes (-0.182121%) fix text (code) = -5000 bytes (-0.215148%) link text (code) = -5092 bytes (-0.152712%) newlink text (code) = -5204 bytes (-0.196986%) nm text (code) = -4398 bytes (-0.156018%) objdump text (code) = -4582 bytes (-0.155046%) pack text (code) = -4503 bytes (-0.294287%) pprof text (code) = -6314 bytes (-0.085177%) trace text (code) = -5856 bytes (-0.097818%) vet text (code) = -5696 bytes (-0.117334%) yacc text (code) = -4971 bytes (-0.213817%) This leaves me sorely tempted to look into a "real" scheduler to try to do a better job, but I think it might make more sense to look into getting loop information into the register allocator instead. Fixes #14577. Change-Id: I5238b83284ce76dea1eb94084a8cd47277db6827 Reviewed-on: https://go-review.googlesource.com/20240 Run-TryBot: David Chase <drchase@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
2016-03-04 14:19:49 -05:00
type ValHeap struct {
a []*Value
score []int8
cmd/compile: Tinkering with schedule for debug and regalloc This adds a heap-based proper priority queue to the scheduler which made a relatively easy to test quite a few heuristics that "ought to work well". For go tools themselves (which may not be representative) the heuristic that works best is (1) in line-number-order, then (2) from more to fewer args, then (3) in variable ID order. Trying to improve this with information about use at end of blocks turned out to be fruitless -- all of my naive attempts at using that information turned out worse than ignoring it. I can confirm that the stores-early heuristic tends to help; removing it makes the results slightly worse. My metric is code size reduction, which I take to mean fewer spills from register allocation. It's not uniform. Here's the endpoints for "vet" from one set of pretty-good heuristics (this is representative at least). -2208 time.parse 13472 15680 -14.081633% -1514 runtime.pclntab 1002058 1003572 -0.150861% -352 time.Time.AppendFormat 9952 10304 -3.416149% -112 runtime.runGCProg 1984 2096 -5.343511% -64 regexp/syntax.(*parser).factor 7264 7328 -0.873362% -44 go.string.alldata 238630 238674 -0.018435% 48 math/big.(*Float).round 1376 1328 3.614458% 48 text/tabwriter.(*Writer).writeLines 1232 1184 4.054054% 48 math/big.shr 832 784 6.122449% 88 go.func.* 75174 75086 0.117199% 96 time.Date 1968 1872 5.128205% Overall there appears to be an 0.1% decrease in text size. No timings yet, and given the distribution of size reductions it might make sense to wait on those. addr2line text (code) = -4392 bytes (-0.156273%) api text (code) = -5502 bytes (-0.147644%) asm text (code) = -5254 bytes (-0.187810%) cgo text (code) = -4886 bytes (-0.148846%) compile text (code) = -1577 bytes (-0.019346%) * changed cover text (code) = -5236 bytes (-0.137992%) dist text (code) = -5015 bytes (-0.167829%) doc text (code) = -5180 bytes (-0.182121%) fix text (code) = -5000 bytes (-0.215148%) link text (code) = -5092 bytes (-0.152712%) newlink text (code) = -5204 bytes (-0.196986%) nm text (code) = -4398 bytes (-0.156018%) objdump text (code) = -4582 bytes (-0.155046%) pack text (code) = -4503 bytes (-0.294287%) pprof text (code) = -6314 bytes (-0.085177%) trace text (code) = -5856 bytes (-0.097818%) vet text (code) = -5696 bytes (-0.117334%) yacc text (code) = -4971 bytes (-0.213817%) This leaves me sorely tempted to look into a "real" scheduler to try to do a better job, but I think it might make more sense to look into getting loop information into the register allocator instead. Fixes #14577. Change-Id: I5238b83284ce76dea1eb94084a8cd47277db6827 Reviewed-on: https://go-review.googlesource.com/20240 Run-TryBot: David Chase <drchase@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
2016-03-04 14:19:49 -05:00
}
func (h ValHeap) Len() int { return len(h.a) }
func (h ValHeap) Swap(i, j int) { a := h.a; a[i], a[j] = a[j], a[i] }
func (h *ValHeap) Push(x interface{}) {
// Push and Pop use pointer receivers because they modify the slice's length,
// not just its contents.
v := x.(*Value)
h.a = append(h.a, v)
}
func (h *ValHeap) Pop() interface{} {
old := h.a
n := len(old)
x := old[n-1]
h.a = old[0 : n-1]
return x
}
func (h ValHeap) Less(i, j int) bool {
x := h.a[i]
y := h.a[j]
sx := h.score[x.ID]
sy := h.score[y.ID]
if c := sx - sy; c != 0 {
return c > 0 // higher score comes later.
}
if x.Pos != y.Pos { // Favor in-order line stepping
return x.Pos.After(y.Pos)
}
if x.Op != OpPhi {
if c := len(x.Args) - len(y.Args); c != 0 {
return c < 0 // smaller args comes later
}
}
return x.ID > y.ID
}
cmd/compile: Tinkering with schedule for debug and regalloc This adds a heap-based proper priority queue to the scheduler which made a relatively easy to test quite a few heuristics that "ought to work well". For go tools themselves (which may not be representative) the heuristic that works best is (1) in line-number-order, then (2) from more to fewer args, then (3) in variable ID order. Trying to improve this with information about use at end of blocks turned out to be fruitless -- all of my naive attempts at using that information turned out worse than ignoring it. I can confirm that the stores-early heuristic tends to help; removing it makes the results slightly worse. My metric is code size reduction, which I take to mean fewer spills from register allocation. It's not uniform. Here's the endpoints for "vet" from one set of pretty-good heuristics (this is representative at least). -2208 time.parse 13472 15680 -14.081633% -1514 runtime.pclntab 1002058 1003572 -0.150861% -352 time.Time.AppendFormat 9952 10304 -3.416149% -112 runtime.runGCProg 1984 2096 -5.343511% -64 regexp/syntax.(*parser).factor 7264 7328 -0.873362% -44 go.string.alldata 238630 238674 -0.018435% 48 math/big.(*Float).round 1376 1328 3.614458% 48 text/tabwriter.(*Writer).writeLines 1232 1184 4.054054% 48 math/big.shr 832 784 6.122449% 88 go.func.* 75174 75086 0.117199% 96 time.Date 1968 1872 5.128205% Overall there appears to be an 0.1% decrease in text size. No timings yet, and given the distribution of size reductions it might make sense to wait on those. addr2line text (code) = -4392 bytes (-0.156273%) api text (code) = -5502 bytes (-0.147644%) asm text (code) = -5254 bytes (-0.187810%) cgo text (code) = -4886 bytes (-0.148846%) compile text (code) = -1577 bytes (-0.019346%) * changed cover text (code) = -5236 bytes (-0.137992%) dist text (code) = -5015 bytes (-0.167829%) doc text (code) = -5180 bytes (-0.182121%) fix text (code) = -5000 bytes (-0.215148%) link text (code) = -5092 bytes (-0.152712%) newlink text (code) = -5204 bytes (-0.196986%) nm text (code) = -4398 bytes (-0.156018%) objdump text (code) = -4582 bytes (-0.155046%) pack text (code) = -4503 bytes (-0.294287%) pprof text (code) = -6314 bytes (-0.085177%) trace text (code) = -5856 bytes (-0.097818%) vet text (code) = -5696 bytes (-0.117334%) yacc text (code) = -4971 bytes (-0.213817%) This leaves me sorely tempted to look into a "real" scheduler to try to do a better job, but I think it might make more sense to look into getting loop information into the register allocator instead. Fixes #14577. Change-Id: I5238b83284ce76dea1eb94084a8cd47277db6827 Reviewed-on: https://go-review.googlesource.com/20240 Run-TryBot: David Chase <drchase@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
2016-03-04 14:19:49 -05:00
// Schedule the Values in each Block. After this phase returns, the
// order of b.Values matters and is the order in which those values
// will appear in the assembly output. For now it generates a
// reasonable valid schedule using a priority queue. TODO(khr):
// schedule smarter.
func schedule(f *Func) {
// For each value, the number of times it is used in the block
// by values that have not been scheduled yet.
cmd/compile: Tinkering with schedule for debug and regalloc This adds a heap-based proper priority queue to the scheduler which made a relatively easy to test quite a few heuristics that "ought to work well". For go tools themselves (which may not be representative) the heuristic that works best is (1) in line-number-order, then (2) from more to fewer args, then (3) in variable ID order. Trying to improve this with information about use at end of blocks turned out to be fruitless -- all of my naive attempts at using that information turned out worse than ignoring it. I can confirm that the stores-early heuristic tends to help; removing it makes the results slightly worse. My metric is code size reduction, which I take to mean fewer spills from register allocation. It's not uniform. Here's the endpoints for "vet" from one set of pretty-good heuristics (this is representative at least). -2208 time.parse 13472 15680 -14.081633% -1514 runtime.pclntab 1002058 1003572 -0.150861% -352 time.Time.AppendFormat 9952 10304 -3.416149% -112 runtime.runGCProg 1984 2096 -5.343511% -64 regexp/syntax.(*parser).factor 7264 7328 -0.873362% -44 go.string.alldata 238630 238674 -0.018435% 48 math/big.(*Float).round 1376 1328 3.614458% 48 text/tabwriter.(*Writer).writeLines 1232 1184 4.054054% 48 math/big.shr 832 784 6.122449% 88 go.func.* 75174 75086 0.117199% 96 time.Date 1968 1872 5.128205% Overall there appears to be an 0.1% decrease in text size. No timings yet, and given the distribution of size reductions it might make sense to wait on those. addr2line text (code) = -4392 bytes (-0.156273%) api text (code) = -5502 bytes (-0.147644%) asm text (code) = -5254 bytes (-0.187810%) cgo text (code) = -4886 bytes (-0.148846%) compile text (code) = -1577 bytes (-0.019346%) * changed cover text (code) = -5236 bytes (-0.137992%) dist text (code) = -5015 bytes (-0.167829%) doc text (code) = -5180 bytes (-0.182121%) fix text (code) = -5000 bytes (-0.215148%) link text (code) = -5092 bytes (-0.152712%) newlink text (code) = -5204 bytes (-0.196986%) nm text (code) = -4398 bytes (-0.156018%) objdump text (code) = -4582 bytes (-0.155046%) pack text (code) = -4503 bytes (-0.294287%) pprof text (code) = -6314 bytes (-0.085177%) trace text (code) = -5856 bytes (-0.097818%) vet text (code) = -5696 bytes (-0.117334%) yacc text (code) = -4971 bytes (-0.213817%) This leaves me sorely tempted to look into a "real" scheduler to try to do a better job, but I think it might make more sense to look into getting loop information into the register allocator instead. Fixes #14577. Change-Id: I5238b83284ce76dea1eb94084a8cd47277db6827 Reviewed-on: https://go-review.googlesource.com/20240 Run-TryBot: David Chase <drchase@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
2016-03-04 14:19:49 -05:00
uses := make([]int32, f.NumValues())
// reusable priority queue
priq := new(ValHeap)
// "priority" for a value
cmd/compile: Tinkering with schedule for debug and regalloc This adds a heap-based proper priority queue to the scheduler which made a relatively easy to test quite a few heuristics that "ought to work well". For go tools themselves (which may not be representative) the heuristic that works best is (1) in line-number-order, then (2) from more to fewer args, then (3) in variable ID order. Trying to improve this with information about use at end of blocks turned out to be fruitless -- all of my naive attempts at using that information turned out worse than ignoring it. I can confirm that the stores-early heuristic tends to help; removing it makes the results slightly worse. My metric is code size reduction, which I take to mean fewer spills from register allocation. It's not uniform. Here's the endpoints for "vet" from one set of pretty-good heuristics (this is representative at least). -2208 time.parse 13472 15680 -14.081633% -1514 runtime.pclntab 1002058 1003572 -0.150861% -352 time.Time.AppendFormat 9952 10304 -3.416149% -112 runtime.runGCProg 1984 2096 -5.343511% -64 regexp/syntax.(*parser).factor 7264 7328 -0.873362% -44 go.string.alldata 238630 238674 -0.018435% 48 math/big.(*Float).round 1376 1328 3.614458% 48 text/tabwriter.(*Writer).writeLines 1232 1184 4.054054% 48 math/big.shr 832 784 6.122449% 88 go.func.* 75174 75086 0.117199% 96 time.Date 1968 1872 5.128205% Overall there appears to be an 0.1% decrease in text size. No timings yet, and given the distribution of size reductions it might make sense to wait on those. addr2line text (code) = -4392 bytes (-0.156273%) api text (code) = -5502 bytes (-0.147644%) asm text (code) = -5254 bytes (-0.187810%) cgo text (code) = -4886 bytes (-0.148846%) compile text (code) = -1577 bytes (-0.019346%) * changed cover text (code) = -5236 bytes (-0.137992%) dist text (code) = -5015 bytes (-0.167829%) doc text (code) = -5180 bytes (-0.182121%) fix text (code) = -5000 bytes (-0.215148%) link text (code) = -5092 bytes (-0.152712%) newlink text (code) = -5204 bytes (-0.196986%) nm text (code) = -4398 bytes (-0.156018%) objdump text (code) = -4582 bytes (-0.155046%) pack text (code) = -4503 bytes (-0.294287%) pprof text (code) = -6314 bytes (-0.085177%) trace text (code) = -5856 bytes (-0.097818%) vet text (code) = -5696 bytes (-0.117334%) yacc text (code) = -4971 bytes (-0.213817%) This leaves me sorely tempted to look into a "real" scheduler to try to do a better job, but I think it might make more sense to look into getting loop information into the register allocator instead. Fixes #14577. Change-Id: I5238b83284ce76dea1eb94084a8cd47277db6827 Reviewed-on: https://go-review.googlesource.com/20240 Run-TryBot: David Chase <drchase@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
2016-03-04 14:19:49 -05:00
score := make([]int8, f.NumValues())
// scheduling order. We queue values in this list in reverse order.
cmd/compile: stack-allocate 2 worklists in order, dom passes Allocate two more ssa local worklists on the stack. The initial sizes are chosen to cover >99% of the calls. name old time/op new time/op delta Template 281ms ± 2% 283ms ± 5% ~ (p=0.443 n=18+19) Unicode 136ms ± 4% 135ms ± 7% ~ (p=0.277 n=20+20) GoTypes 886ms ± 2% 885ms ± 2% ~ (p=0.862 n=20+20) Compiler 4.03s ± 2% 4.02s ± 1% ~ (p=0.270 n=19+20) SSA 9.66s ± 1% 9.64s ± 2% ~ (p=0.253 n=20+20) Flate 186ms ± 5% 183ms ± 6% ~ (p=0.174 n=20+20) GoParser 222ms ± 4% 219ms ± 4% ~ (p=0.081 n=20+20) Reflect 569ms ± 2% 568ms ± 2% ~ (p=0.686 n=19+19) Tar 258ms ± 4% 256ms ± 3% ~ (p=0.211 n=20+20) XML 319ms ± 2% 317ms ± 3% ~ (p=0.158 n=18+20) name old user-time/op new user-time/op delta Template 396ms ± 6% 392ms ± 6% ~ (p=0.211 n=20+20) Unicode 212ms ±10% 211ms ± 9% ~ (p=0.904 n=20+20) GoTypes 1.21s ± 3% 1.21s ± 2% ~ (p=0.183 n=20+20) Compiler 5.60s ± 2% 5.62s ± 2% ~ (p=0.355 n=18+18) SSA 14.0s ± 6% 13.9s ± 5% ~ (p=0.678 n=20+20) Flate 250ms ± 8% 245ms ± 6% ~ (p=0.166 n=19+20) GoParser 305ms ± 6% 304ms ± 5% ~ (p=0.659 n=20+20) Reflect 760ms ± 3% 758ms ± 4% ~ (p=0.758 n=20+20) Tar 362ms ± 6% 357ms ± 5% ~ (p=0.108 n=20+20) XML 429ms ± 4% 429ms ± 4% ~ (p=0.799 n=20+20) name old alloc/op new alloc/op delta Template 39.0MB ± 0% 38.8MB ± 0% -0.55% (p=0.000 n=20+20) Unicode 29.1MB ± 0% 29.1MB ± 0% -0.06% (p=0.000 n=20+20) GoTypes 116MB ± 0% 115MB ± 0% -0.50% (p=0.000 n=20+20) Compiler 493MB ± 0% 491MB ± 0% -0.46% (p=0.000 n=19+20) SSA 1.40GB ± 0% 1.40GB ± 0% -0.31% (p=0.000 n=19+20) Flate 25.0MB ± 0% 24.9MB ± 0% -0.60% (p=0.000 n=19+19) GoParser 30.9MB ± 0% 30.7MB ± 0% -0.66% (p=0.000 n=20+20) Reflect 77.5MB ± 0% 77.1MB ± 0% -0.52% (p=0.000 n=20+20) Tar 39.2MB ± 0% 39.0MB ± 0% -0.47% (p=0.000 n=20+20) XML 44.8MB ± 0% 44.6MB ± 0% -0.45% (p=0.000 n=20+19) name old allocs/op new allocs/op delta Template 382k ± 0% 379k ± 0% -0.69% (p=0.000 n=20+19) Unicode 337k ± 0% 336k ± 0% -0.09% (p=0.000 n=20+20) GoTypes 1.19M ± 0% 1.18M ± 0% -0.64% (p=0.000 n=20+20) Compiler 4.60M ± 0% 4.58M ± 0% -0.57% (p=0.000 n=20+20) SSA 11.5M ± 0% 11.4M ± 0% -0.42% (p=0.000 n=19+20) Flate 235k ± 0% 233k ± 0% -0.74% (p=0.000 n=20+19) GoParser 316k ± 0% 313k ± 0% -0.69% (p=0.000 n=20+20) Reflect 953k ± 0% 946k ± 0% -0.81% (p=0.000 n=20+20) Tar 391k ± 0% 388k ± 0% -0.61% (p=0.000 n=20+19) XML 413k ± 0% 411k ± 0% -0.56% (p=0.000 n=20+20) Change-Id: I7378174e3550b47df4368b24cf24c8ce1b85c906 Reviewed-on: https://go-review.googlesource.com/104656 Reviewed-by: Daniel Martí <mvdan@mvdan.cc>
2018-04-04 11:57:03 +02:00
// A constant bound allows this to be stack-allocated. 64 is
// enough to cover almost every schedule call.
order := make([]*Value, 0, 64)
// maps mem values to the next live memory value
nextMem := make([]*Value, f.NumValues())
// additional pretend arguments for each Value. Used to enforce load/store ordering.
additionalArgs := make([][]*Value, f.NumValues())
cmd/compile: Tinkering with schedule for debug and regalloc This adds a heap-based proper priority queue to the scheduler which made a relatively easy to test quite a few heuristics that "ought to work well". For go tools themselves (which may not be representative) the heuristic that works best is (1) in line-number-order, then (2) from more to fewer args, then (3) in variable ID order. Trying to improve this with information about use at end of blocks turned out to be fruitless -- all of my naive attempts at using that information turned out worse than ignoring it. I can confirm that the stores-early heuristic tends to help; removing it makes the results slightly worse. My metric is code size reduction, which I take to mean fewer spills from register allocation. It's not uniform. Here's the endpoints for "vet" from one set of pretty-good heuristics (this is representative at least). -2208 time.parse 13472 15680 -14.081633% -1514 runtime.pclntab 1002058 1003572 -0.150861% -352 time.Time.AppendFormat 9952 10304 -3.416149% -112 runtime.runGCProg 1984 2096 -5.343511% -64 regexp/syntax.(*parser).factor 7264 7328 -0.873362% -44 go.string.alldata 238630 238674 -0.018435% 48 math/big.(*Float).round 1376 1328 3.614458% 48 text/tabwriter.(*Writer).writeLines 1232 1184 4.054054% 48 math/big.shr 832 784 6.122449% 88 go.func.* 75174 75086 0.117199% 96 time.Date 1968 1872 5.128205% Overall there appears to be an 0.1% decrease in text size. No timings yet, and given the distribution of size reductions it might make sense to wait on those. addr2line text (code) = -4392 bytes (-0.156273%) api text (code) = -5502 bytes (-0.147644%) asm text (code) = -5254 bytes (-0.187810%) cgo text (code) = -4886 bytes (-0.148846%) compile text (code) = -1577 bytes (-0.019346%) * changed cover text (code) = -5236 bytes (-0.137992%) dist text (code) = -5015 bytes (-0.167829%) doc text (code) = -5180 bytes (-0.182121%) fix text (code) = -5000 bytes (-0.215148%) link text (code) = -5092 bytes (-0.152712%) newlink text (code) = -5204 bytes (-0.196986%) nm text (code) = -4398 bytes (-0.156018%) objdump text (code) = -4582 bytes (-0.155046%) pack text (code) = -4503 bytes (-0.294287%) pprof text (code) = -6314 bytes (-0.085177%) trace text (code) = -5856 bytes (-0.097818%) vet text (code) = -5696 bytes (-0.117334%) yacc text (code) = -4971 bytes (-0.213817%) This leaves me sorely tempted to look into a "real" scheduler to try to do a better job, but I think it might make more sense to look into getting loop information into the register allocator instead. Fixes #14577. Change-Id: I5238b83284ce76dea1eb94084a8cd47277db6827 Reviewed-on: https://go-review.googlesource.com/20240 Run-TryBot: David Chase <drchase@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
2016-03-04 14:19:49 -05:00
for _, b := range f.Blocks {
// Compute score. Larger numbers are scheduled closer to the end of the block.
for _, v := range b.Values {
switch {
case v.Op == OpAMD64LoweredGetClosurePtr || v.Op == OpPPC64LoweredGetClosurePtr ||
v.Op == OpARMLoweredGetClosurePtr || v.Op == OpARM64LoweredGetClosurePtr ||
v.Op == Op386LoweredGetClosurePtr || v.Op == OpMIPS64LoweredGetClosurePtr ||
v.Op == OpS390XLoweredGetClosurePtr || v.Op == OpMIPSLoweredGetClosurePtr ||
v.Op == OpWasmLoweredGetClosurePtr:
cmd/compile: Tinkering with schedule for debug and regalloc This adds a heap-based proper priority queue to the scheduler which made a relatively easy to test quite a few heuristics that "ought to work well". For go tools themselves (which may not be representative) the heuristic that works best is (1) in line-number-order, then (2) from more to fewer args, then (3) in variable ID order. Trying to improve this with information about use at end of blocks turned out to be fruitless -- all of my naive attempts at using that information turned out worse than ignoring it. I can confirm that the stores-early heuristic tends to help; removing it makes the results slightly worse. My metric is code size reduction, which I take to mean fewer spills from register allocation. It's not uniform. Here's the endpoints for "vet" from one set of pretty-good heuristics (this is representative at least). -2208 time.parse 13472 15680 -14.081633% -1514 runtime.pclntab 1002058 1003572 -0.150861% -352 time.Time.AppendFormat 9952 10304 -3.416149% -112 runtime.runGCProg 1984 2096 -5.343511% -64 regexp/syntax.(*parser).factor 7264 7328 -0.873362% -44 go.string.alldata 238630 238674 -0.018435% 48 math/big.(*Float).round 1376 1328 3.614458% 48 text/tabwriter.(*Writer).writeLines 1232 1184 4.054054% 48 math/big.shr 832 784 6.122449% 88 go.func.* 75174 75086 0.117199% 96 time.Date 1968 1872 5.128205% Overall there appears to be an 0.1% decrease in text size. No timings yet, and given the distribution of size reductions it might make sense to wait on those. addr2line text (code) = -4392 bytes (-0.156273%) api text (code) = -5502 bytes (-0.147644%) asm text (code) = -5254 bytes (-0.187810%) cgo text (code) = -4886 bytes (-0.148846%) compile text (code) = -1577 bytes (-0.019346%) * changed cover text (code) = -5236 bytes (-0.137992%) dist text (code) = -5015 bytes (-0.167829%) doc text (code) = -5180 bytes (-0.182121%) fix text (code) = -5000 bytes (-0.215148%) link text (code) = -5092 bytes (-0.152712%) newlink text (code) = -5204 bytes (-0.196986%) nm text (code) = -4398 bytes (-0.156018%) objdump text (code) = -4582 bytes (-0.155046%) pack text (code) = -4503 bytes (-0.294287%) pprof text (code) = -6314 bytes (-0.085177%) trace text (code) = -5856 bytes (-0.097818%) vet text (code) = -5696 bytes (-0.117334%) yacc text (code) = -4971 bytes (-0.213817%) This leaves me sorely tempted to look into a "real" scheduler to try to do a better job, but I think it might make more sense to look into getting loop information into the register allocator instead. Fixes #14577. Change-Id: I5238b83284ce76dea1eb94084a8cd47277db6827 Reviewed-on: https://go-review.googlesource.com/20240 Run-TryBot: David Chase <drchase@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
2016-03-04 14:19:49 -05:00
// We also score GetLoweredClosurePtr as early as possible to ensure that the
// context register is not stomped. GetLoweredClosurePtr should only appear
// in the entry block where there are no phi functions, so there is no
// conflict or ambiguity here.
if b != f.Entry {
f.Fatalf("LoweredGetClosurePtr appeared outside of entry block, b=%s", b.String())
}
score[v.ID] = ScorePhi
case v.Op == OpAMD64LoweredNilCheck || v.Op == OpPPC64LoweredNilCheck ||
v.Op == OpARMLoweredNilCheck || v.Op == OpARM64LoweredNilCheck ||
v.Op == Op386LoweredNilCheck || v.Op == OpMIPS64LoweredNilCheck ||
v.Op == OpS390XLoweredNilCheck || v.Op == OpMIPSLoweredNilCheck ||
v.Op == OpWasmLoweredNilCheck:
// Nil checks must come before loads from the same address.
score[v.ID] = ScoreNilCheck
cmd/compile: Tinkering with schedule for debug and regalloc This adds a heap-based proper priority queue to the scheduler which made a relatively easy to test quite a few heuristics that "ought to work well". For go tools themselves (which may not be representative) the heuristic that works best is (1) in line-number-order, then (2) from more to fewer args, then (3) in variable ID order. Trying to improve this with information about use at end of blocks turned out to be fruitless -- all of my naive attempts at using that information turned out worse than ignoring it. I can confirm that the stores-early heuristic tends to help; removing it makes the results slightly worse. My metric is code size reduction, which I take to mean fewer spills from register allocation. It's not uniform. Here's the endpoints for "vet" from one set of pretty-good heuristics (this is representative at least). -2208 time.parse 13472 15680 -14.081633% -1514 runtime.pclntab 1002058 1003572 -0.150861% -352 time.Time.AppendFormat 9952 10304 -3.416149% -112 runtime.runGCProg 1984 2096 -5.343511% -64 regexp/syntax.(*parser).factor 7264 7328 -0.873362% -44 go.string.alldata 238630 238674 -0.018435% 48 math/big.(*Float).round 1376 1328 3.614458% 48 text/tabwriter.(*Writer).writeLines 1232 1184 4.054054% 48 math/big.shr 832 784 6.122449% 88 go.func.* 75174 75086 0.117199% 96 time.Date 1968 1872 5.128205% Overall there appears to be an 0.1% decrease in text size. No timings yet, and given the distribution of size reductions it might make sense to wait on those. addr2line text (code) = -4392 bytes (-0.156273%) api text (code) = -5502 bytes (-0.147644%) asm text (code) = -5254 bytes (-0.187810%) cgo text (code) = -4886 bytes (-0.148846%) compile text (code) = -1577 bytes (-0.019346%) * changed cover text (code) = -5236 bytes (-0.137992%) dist text (code) = -5015 bytes (-0.167829%) doc text (code) = -5180 bytes (-0.182121%) fix text (code) = -5000 bytes (-0.215148%) link text (code) = -5092 bytes (-0.152712%) newlink text (code) = -5204 bytes (-0.196986%) nm text (code) = -4398 bytes (-0.156018%) objdump text (code) = -4582 bytes (-0.155046%) pack text (code) = -4503 bytes (-0.294287%) pprof text (code) = -6314 bytes (-0.085177%) trace text (code) = -5856 bytes (-0.097818%) vet text (code) = -5696 bytes (-0.117334%) yacc text (code) = -4971 bytes (-0.213817%) This leaves me sorely tempted to look into a "real" scheduler to try to do a better job, but I think it might make more sense to look into getting loop information into the register allocator instead. Fixes #14577. Change-Id: I5238b83284ce76dea1eb94084a8cd47277db6827 Reviewed-on: https://go-review.googlesource.com/20240 Run-TryBot: David Chase <drchase@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
2016-03-04 14:19:49 -05:00
case v.Op == OpPhi:
// We want all the phis first.
score[v.ID] = ScorePhi
case v.Op == OpVarDef:
// We want all the vardefs next.
score[v.ID] = ScoreVarDef
case v.Type.IsMemory():
// Schedule stores as early as possible. This tends to
// reduce register pressure. It also helps make sure
// VARDEF ops are scheduled before the corresponding LEA.
score[v.ID] = ScoreMemory
case v.Op == OpSelect0 || v.Op == OpSelect1:
// Schedule the pseudo-op of reading part of a tuple
// immediately after the tuple-generating op, since
// this value is already live. This also removes its
// false dependency on the other part of the tuple.
// Also ensures tuple is never spilled.
score[v.ID] = ScoreReadTuple
case v.Type.IsFlags() || v.Type.IsTuple():
cmd/compile: Tinkering with schedule for debug and regalloc This adds a heap-based proper priority queue to the scheduler which made a relatively easy to test quite a few heuristics that "ought to work well". For go tools themselves (which may not be representative) the heuristic that works best is (1) in line-number-order, then (2) from more to fewer args, then (3) in variable ID order. Trying to improve this with information about use at end of blocks turned out to be fruitless -- all of my naive attempts at using that information turned out worse than ignoring it. I can confirm that the stores-early heuristic tends to help; removing it makes the results slightly worse. My metric is code size reduction, which I take to mean fewer spills from register allocation. It's not uniform. Here's the endpoints for "vet" from one set of pretty-good heuristics (this is representative at least). -2208 time.parse 13472 15680 -14.081633% -1514 runtime.pclntab 1002058 1003572 -0.150861% -352 time.Time.AppendFormat 9952 10304 -3.416149% -112 runtime.runGCProg 1984 2096 -5.343511% -64 regexp/syntax.(*parser).factor 7264 7328 -0.873362% -44 go.string.alldata 238630 238674 -0.018435% 48 math/big.(*Float).round 1376 1328 3.614458% 48 text/tabwriter.(*Writer).writeLines 1232 1184 4.054054% 48 math/big.shr 832 784 6.122449% 88 go.func.* 75174 75086 0.117199% 96 time.Date 1968 1872 5.128205% Overall there appears to be an 0.1% decrease in text size. No timings yet, and given the distribution of size reductions it might make sense to wait on those. addr2line text (code) = -4392 bytes (-0.156273%) api text (code) = -5502 bytes (-0.147644%) asm text (code) = -5254 bytes (-0.187810%) cgo text (code) = -4886 bytes (-0.148846%) compile text (code) = -1577 bytes (-0.019346%) * changed cover text (code) = -5236 bytes (-0.137992%) dist text (code) = -5015 bytes (-0.167829%) doc text (code) = -5180 bytes (-0.182121%) fix text (code) = -5000 bytes (-0.215148%) link text (code) = -5092 bytes (-0.152712%) newlink text (code) = -5204 bytes (-0.196986%) nm text (code) = -4398 bytes (-0.156018%) objdump text (code) = -4582 bytes (-0.155046%) pack text (code) = -4503 bytes (-0.294287%) pprof text (code) = -6314 bytes (-0.085177%) trace text (code) = -5856 bytes (-0.097818%) vet text (code) = -5696 bytes (-0.117334%) yacc text (code) = -4971 bytes (-0.213817%) This leaves me sorely tempted to look into a "real" scheduler to try to do a better job, but I think it might make more sense to look into getting loop information into the register allocator instead. Fixes #14577. Change-Id: I5238b83284ce76dea1eb94084a8cd47277db6827 Reviewed-on: https://go-review.googlesource.com/20240 Run-TryBot: David Chase <drchase@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
2016-03-04 14:19:49 -05:00
// Schedule flag register generation as late as possible.
// This makes sure that we only have one live flags
// value at a time.
score[v.ID] = ScoreFlags
default:
score[v.ID] = ScoreDefault
}
}
}
for _, b := range f.Blocks {
// Find store chain for block.
// Store chains for different blocks overwrite each other, so
// the calculated store chain is good only for this block.
for _, v := range b.Values {
if v.Op != OpPhi && v.Type.IsMemory() {
for _, w := range v.Args {
if w.Type.IsMemory() {
nextMem[w.ID] = v
}
}
}
}
// Compute uses.
for _, v := range b.Values {
if v.Op == OpPhi {
// If a value is used by a phi, it does not induce
// a scheduling edge because that use is from the
// previous iteration.
continue
}
for _, w := range v.Args {
if w.Block == b {
uses[w.ID]++
}
// Any load must come before the following store.
if !v.Type.IsMemory() && w.Type.IsMemory() {
// v is a load.
s := nextMem[w.ID]
if s == nil || s.Block != b {
continue
}
additionalArgs[s.ID] = append(additionalArgs[s.ID], v)
uses[v.ID]++
}
}
}
cmd/compile: Tinkering with schedule for debug and regalloc This adds a heap-based proper priority queue to the scheduler which made a relatively easy to test quite a few heuristics that "ought to work well". For go tools themselves (which may not be representative) the heuristic that works best is (1) in line-number-order, then (2) from more to fewer args, then (3) in variable ID order. Trying to improve this with information about use at end of blocks turned out to be fruitless -- all of my naive attempts at using that information turned out worse than ignoring it. I can confirm that the stores-early heuristic tends to help; removing it makes the results slightly worse. My metric is code size reduction, which I take to mean fewer spills from register allocation. It's not uniform. Here's the endpoints for "vet" from one set of pretty-good heuristics (this is representative at least). -2208 time.parse 13472 15680 -14.081633% -1514 runtime.pclntab 1002058 1003572 -0.150861% -352 time.Time.AppendFormat 9952 10304 -3.416149% -112 runtime.runGCProg 1984 2096 -5.343511% -64 regexp/syntax.(*parser).factor 7264 7328 -0.873362% -44 go.string.alldata 238630 238674 -0.018435% 48 math/big.(*Float).round 1376 1328 3.614458% 48 text/tabwriter.(*Writer).writeLines 1232 1184 4.054054% 48 math/big.shr 832 784 6.122449% 88 go.func.* 75174 75086 0.117199% 96 time.Date 1968 1872 5.128205% Overall there appears to be an 0.1% decrease in text size. No timings yet, and given the distribution of size reductions it might make sense to wait on those. addr2line text (code) = -4392 bytes (-0.156273%) api text (code) = -5502 bytes (-0.147644%) asm text (code) = -5254 bytes (-0.187810%) cgo text (code) = -4886 bytes (-0.148846%) compile text (code) = -1577 bytes (-0.019346%) * changed cover text (code) = -5236 bytes (-0.137992%) dist text (code) = -5015 bytes (-0.167829%) doc text (code) = -5180 bytes (-0.182121%) fix text (code) = -5000 bytes (-0.215148%) link text (code) = -5092 bytes (-0.152712%) newlink text (code) = -5204 bytes (-0.196986%) nm text (code) = -4398 bytes (-0.156018%) objdump text (code) = -4582 bytes (-0.155046%) pack text (code) = -4503 bytes (-0.294287%) pprof text (code) = -6314 bytes (-0.085177%) trace text (code) = -5856 bytes (-0.097818%) vet text (code) = -5696 bytes (-0.117334%) yacc text (code) = -4971 bytes (-0.213817%) This leaves me sorely tempted to look into a "real" scheduler to try to do a better job, but I think it might make more sense to look into getting loop information into the register allocator instead. Fixes #14577. Change-Id: I5238b83284ce76dea1eb94084a8cd47277db6827 Reviewed-on: https://go-review.googlesource.com/20240 Run-TryBot: David Chase <drchase@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
2016-03-04 14:19:49 -05:00
if b.Control != nil && b.Control.Op != OpPhi {
// Force the control value to be scheduled at the end,
// unless it is a phi value (which must be first).
score[b.Control.ID] = ScoreControl
// Schedule values dependent on the control value at the end.
// This reduces the number of register spills. We don't find
// all values that depend on the control, just values with a
// direct dependency. This is cheaper and in testing there
// was no difference in the number of spills.
for _, v := range b.Values {
if v.Op != OpPhi {
for _, a := range v.Args {
if a == b.Control {
score[v.ID] = ScoreControl
}
}
}
}
}
cmd/compile: Tinkering with schedule for debug and regalloc This adds a heap-based proper priority queue to the scheduler which made a relatively easy to test quite a few heuristics that "ought to work well". For go tools themselves (which may not be representative) the heuristic that works best is (1) in line-number-order, then (2) from more to fewer args, then (3) in variable ID order. Trying to improve this with information about use at end of blocks turned out to be fruitless -- all of my naive attempts at using that information turned out worse than ignoring it. I can confirm that the stores-early heuristic tends to help; removing it makes the results slightly worse. My metric is code size reduction, which I take to mean fewer spills from register allocation. It's not uniform. Here's the endpoints for "vet" from one set of pretty-good heuristics (this is representative at least). -2208 time.parse 13472 15680 -14.081633% -1514 runtime.pclntab 1002058 1003572 -0.150861% -352 time.Time.AppendFormat 9952 10304 -3.416149% -112 runtime.runGCProg 1984 2096 -5.343511% -64 regexp/syntax.(*parser).factor 7264 7328 -0.873362% -44 go.string.alldata 238630 238674 -0.018435% 48 math/big.(*Float).round 1376 1328 3.614458% 48 text/tabwriter.(*Writer).writeLines 1232 1184 4.054054% 48 math/big.shr 832 784 6.122449% 88 go.func.* 75174 75086 0.117199% 96 time.Date 1968 1872 5.128205% Overall there appears to be an 0.1% decrease in text size. No timings yet, and given the distribution of size reductions it might make sense to wait on those. addr2line text (code) = -4392 bytes (-0.156273%) api text (code) = -5502 bytes (-0.147644%) asm text (code) = -5254 bytes (-0.187810%) cgo text (code) = -4886 bytes (-0.148846%) compile text (code) = -1577 bytes (-0.019346%) * changed cover text (code) = -5236 bytes (-0.137992%) dist text (code) = -5015 bytes (-0.167829%) doc text (code) = -5180 bytes (-0.182121%) fix text (code) = -5000 bytes (-0.215148%) link text (code) = -5092 bytes (-0.152712%) newlink text (code) = -5204 bytes (-0.196986%) nm text (code) = -4398 bytes (-0.156018%) objdump text (code) = -4582 bytes (-0.155046%) pack text (code) = -4503 bytes (-0.294287%) pprof text (code) = -6314 bytes (-0.085177%) trace text (code) = -5856 bytes (-0.097818%) vet text (code) = -5696 bytes (-0.117334%) yacc text (code) = -4971 bytes (-0.213817%) This leaves me sorely tempted to look into a "real" scheduler to try to do a better job, but I think it might make more sense to look into getting loop information into the register allocator instead. Fixes #14577. Change-Id: I5238b83284ce76dea1eb94084a8cd47277db6827 Reviewed-on: https://go-review.googlesource.com/20240 Run-TryBot: David Chase <drchase@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
2016-03-04 14:19:49 -05:00
// To put things into a priority queue
// The values that should come last are least.
priq.score = score
priq.a = priq.a[:0]
cmd/compile: Tinkering with schedule for debug and regalloc This adds a heap-based proper priority queue to the scheduler which made a relatively easy to test quite a few heuristics that "ought to work well". For go tools themselves (which may not be representative) the heuristic that works best is (1) in line-number-order, then (2) from more to fewer args, then (3) in variable ID order. Trying to improve this with information about use at end of blocks turned out to be fruitless -- all of my naive attempts at using that information turned out worse than ignoring it. I can confirm that the stores-early heuristic tends to help; removing it makes the results slightly worse. My metric is code size reduction, which I take to mean fewer spills from register allocation. It's not uniform. Here's the endpoints for "vet" from one set of pretty-good heuristics (this is representative at least). -2208 time.parse 13472 15680 -14.081633% -1514 runtime.pclntab 1002058 1003572 -0.150861% -352 time.Time.AppendFormat 9952 10304 -3.416149% -112 runtime.runGCProg 1984 2096 -5.343511% -64 regexp/syntax.(*parser).factor 7264 7328 -0.873362% -44 go.string.alldata 238630 238674 -0.018435% 48 math/big.(*Float).round 1376 1328 3.614458% 48 text/tabwriter.(*Writer).writeLines 1232 1184 4.054054% 48 math/big.shr 832 784 6.122449% 88 go.func.* 75174 75086 0.117199% 96 time.Date 1968 1872 5.128205% Overall there appears to be an 0.1% decrease in text size. No timings yet, and given the distribution of size reductions it might make sense to wait on those. addr2line text (code) = -4392 bytes (-0.156273%) api text (code) = -5502 bytes (-0.147644%) asm text (code) = -5254 bytes (-0.187810%) cgo text (code) = -4886 bytes (-0.148846%) compile text (code) = -1577 bytes (-0.019346%) * changed cover text (code) = -5236 bytes (-0.137992%) dist text (code) = -5015 bytes (-0.167829%) doc text (code) = -5180 bytes (-0.182121%) fix text (code) = -5000 bytes (-0.215148%) link text (code) = -5092 bytes (-0.152712%) newlink text (code) = -5204 bytes (-0.196986%) nm text (code) = -4398 bytes (-0.156018%) objdump text (code) = -4582 bytes (-0.155046%) pack text (code) = -4503 bytes (-0.294287%) pprof text (code) = -6314 bytes (-0.085177%) trace text (code) = -5856 bytes (-0.097818%) vet text (code) = -5696 bytes (-0.117334%) yacc text (code) = -4971 bytes (-0.213817%) This leaves me sorely tempted to look into a "real" scheduler to try to do a better job, but I think it might make more sense to look into getting loop information into the register allocator instead. Fixes #14577. Change-Id: I5238b83284ce76dea1eb94084a8cd47277db6827 Reviewed-on: https://go-review.googlesource.com/20240 Run-TryBot: David Chase <drchase@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
2016-03-04 14:19:49 -05:00
// Initialize priority queue with schedulable values.
for _, v := range b.Values {
if uses[v.ID] == 0 {
cmd/compile: Tinkering with schedule for debug and regalloc This adds a heap-based proper priority queue to the scheduler which made a relatively easy to test quite a few heuristics that "ought to work well". For go tools themselves (which may not be representative) the heuristic that works best is (1) in line-number-order, then (2) from more to fewer args, then (3) in variable ID order. Trying to improve this with information about use at end of blocks turned out to be fruitless -- all of my naive attempts at using that information turned out worse than ignoring it. I can confirm that the stores-early heuristic tends to help; removing it makes the results slightly worse. My metric is code size reduction, which I take to mean fewer spills from register allocation. It's not uniform. Here's the endpoints for "vet" from one set of pretty-good heuristics (this is representative at least). -2208 time.parse 13472 15680 -14.081633% -1514 runtime.pclntab 1002058 1003572 -0.150861% -352 time.Time.AppendFormat 9952 10304 -3.416149% -112 runtime.runGCProg 1984 2096 -5.343511% -64 regexp/syntax.(*parser).factor 7264 7328 -0.873362% -44 go.string.alldata 238630 238674 -0.018435% 48 math/big.(*Float).round 1376 1328 3.614458% 48 text/tabwriter.(*Writer).writeLines 1232 1184 4.054054% 48 math/big.shr 832 784 6.122449% 88 go.func.* 75174 75086 0.117199% 96 time.Date 1968 1872 5.128205% Overall there appears to be an 0.1% decrease in text size. No timings yet, and given the distribution of size reductions it might make sense to wait on those. addr2line text (code) = -4392 bytes (-0.156273%) api text (code) = -5502 bytes (-0.147644%) asm text (code) = -5254 bytes (-0.187810%) cgo text (code) = -4886 bytes (-0.148846%) compile text (code) = -1577 bytes (-0.019346%) * changed cover text (code) = -5236 bytes (-0.137992%) dist text (code) = -5015 bytes (-0.167829%) doc text (code) = -5180 bytes (-0.182121%) fix text (code) = -5000 bytes (-0.215148%) link text (code) = -5092 bytes (-0.152712%) newlink text (code) = -5204 bytes (-0.196986%) nm text (code) = -4398 bytes (-0.156018%) objdump text (code) = -4582 bytes (-0.155046%) pack text (code) = -4503 bytes (-0.294287%) pprof text (code) = -6314 bytes (-0.085177%) trace text (code) = -5856 bytes (-0.097818%) vet text (code) = -5696 bytes (-0.117334%) yacc text (code) = -4971 bytes (-0.213817%) This leaves me sorely tempted to look into a "real" scheduler to try to do a better job, but I think it might make more sense to look into getting loop information into the register allocator instead. Fixes #14577. Change-Id: I5238b83284ce76dea1eb94084a8cd47277db6827 Reviewed-on: https://go-review.googlesource.com/20240 Run-TryBot: David Chase <drchase@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
2016-03-04 14:19:49 -05:00
heap.Push(priq, v)
}
}
// Schedule highest priority value, update use counts, repeat.
order = order[:0]
tuples := make(map[ID][]*Value)
for {
// Find highest priority schedulable value.
cmd/compile: Tinkering with schedule for debug and regalloc This adds a heap-based proper priority queue to the scheduler which made a relatively easy to test quite a few heuristics that "ought to work well". For go tools themselves (which may not be representative) the heuristic that works best is (1) in line-number-order, then (2) from more to fewer args, then (3) in variable ID order. Trying to improve this with information about use at end of blocks turned out to be fruitless -- all of my naive attempts at using that information turned out worse than ignoring it. I can confirm that the stores-early heuristic tends to help; removing it makes the results slightly worse. My metric is code size reduction, which I take to mean fewer spills from register allocation. It's not uniform. Here's the endpoints for "vet" from one set of pretty-good heuristics (this is representative at least). -2208 time.parse 13472 15680 -14.081633% -1514 runtime.pclntab 1002058 1003572 -0.150861% -352 time.Time.AppendFormat 9952 10304 -3.416149% -112 runtime.runGCProg 1984 2096 -5.343511% -64 regexp/syntax.(*parser).factor 7264 7328 -0.873362% -44 go.string.alldata 238630 238674 -0.018435% 48 math/big.(*Float).round 1376 1328 3.614458% 48 text/tabwriter.(*Writer).writeLines 1232 1184 4.054054% 48 math/big.shr 832 784 6.122449% 88 go.func.* 75174 75086 0.117199% 96 time.Date 1968 1872 5.128205% Overall there appears to be an 0.1% decrease in text size. No timings yet, and given the distribution of size reductions it might make sense to wait on those. addr2line text (code) = -4392 bytes (-0.156273%) api text (code) = -5502 bytes (-0.147644%) asm text (code) = -5254 bytes (-0.187810%) cgo text (code) = -4886 bytes (-0.148846%) compile text (code) = -1577 bytes (-0.019346%) * changed cover text (code) = -5236 bytes (-0.137992%) dist text (code) = -5015 bytes (-0.167829%) doc text (code) = -5180 bytes (-0.182121%) fix text (code) = -5000 bytes (-0.215148%) link text (code) = -5092 bytes (-0.152712%) newlink text (code) = -5204 bytes (-0.196986%) nm text (code) = -4398 bytes (-0.156018%) objdump text (code) = -4582 bytes (-0.155046%) pack text (code) = -4503 bytes (-0.294287%) pprof text (code) = -6314 bytes (-0.085177%) trace text (code) = -5856 bytes (-0.097818%) vet text (code) = -5696 bytes (-0.117334%) yacc text (code) = -4971 bytes (-0.213817%) This leaves me sorely tempted to look into a "real" scheduler to try to do a better job, but I think it might make more sense to look into getting loop information into the register allocator instead. Fixes #14577. Change-Id: I5238b83284ce76dea1eb94084a8cd47277db6827 Reviewed-on: https://go-review.googlesource.com/20240 Run-TryBot: David Chase <drchase@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
2016-03-04 14:19:49 -05:00
// Note that schedule is assembled backwards.
if priq.Len() == 0 {
break
}
cmd/compile: Tinkering with schedule for debug and regalloc This adds a heap-based proper priority queue to the scheduler which made a relatively easy to test quite a few heuristics that "ought to work well". For go tools themselves (which may not be representative) the heuristic that works best is (1) in line-number-order, then (2) from more to fewer args, then (3) in variable ID order. Trying to improve this with information about use at end of blocks turned out to be fruitless -- all of my naive attempts at using that information turned out worse than ignoring it. I can confirm that the stores-early heuristic tends to help; removing it makes the results slightly worse. My metric is code size reduction, which I take to mean fewer spills from register allocation. It's not uniform. Here's the endpoints for "vet" from one set of pretty-good heuristics (this is representative at least). -2208 time.parse 13472 15680 -14.081633% -1514 runtime.pclntab 1002058 1003572 -0.150861% -352 time.Time.AppendFormat 9952 10304 -3.416149% -112 runtime.runGCProg 1984 2096 -5.343511% -64 regexp/syntax.(*parser).factor 7264 7328 -0.873362% -44 go.string.alldata 238630 238674 -0.018435% 48 math/big.(*Float).round 1376 1328 3.614458% 48 text/tabwriter.(*Writer).writeLines 1232 1184 4.054054% 48 math/big.shr 832 784 6.122449% 88 go.func.* 75174 75086 0.117199% 96 time.Date 1968 1872 5.128205% Overall there appears to be an 0.1% decrease in text size. No timings yet, and given the distribution of size reductions it might make sense to wait on those. addr2line text (code) = -4392 bytes (-0.156273%) api text (code) = -5502 bytes (-0.147644%) asm text (code) = -5254 bytes (-0.187810%) cgo text (code) = -4886 bytes (-0.148846%) compile text (code) = -1577 bytes (-0.019346%) * changed cover text (code) = -5236 bytes (-0.137992%) dist text (code) = -5015 bytes (-0.167829%) doc text (code) = -5180 bytes (-0.182121%) fix text (code) = -5000 bytes (-0.215148%) link text (code) = -5092 bytes (-0.152712%) newlink text (code) = -5204 bytes (-0.196986%) nm text (code) = -4398 bytes (-0.156018%) objdump text (code) = -4582 bytes (-0.155046%) pack text (code) = -4503 bytes (-0.294287%) pprof text (code) = -6314 bytes (-0.085177%) trace text (code) = -5856 bytes (-0.097818%) vet text (code) = -5696 bytes (-0.117334%) yacc text (code) = -4971 bytes (-0.213817%) This leaves me sorely tempted to look into a "real" scheduler to try to do a better job, but I think it might make more sense to look into getting loop information into the register allocator instead. Fixes #14577. Change-Id: I5238b83284ce76dea1eb94084a8cd47277db6827 Reviewed-on: https://go-review.googlesource.com/20240 Run-TryBot: David Chase <drchase@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
2016-03-04 14:19:49 -05:00
v := heap.Pop(priq).(*Value)
// Add it to the schedule.
// Do not emit tuple-reading ops until we're ready to emit the tuple-generating op.
//TODO: maybe remove ReadTuple score above, if it does not help on performance
switch {
case v.Op == OpSelect0:
if tuples[v.Args[0].ID] == nil {
tuples[v.Args[0].ID] = make([]*Value, 2)
}
tuples[v.Args[0].ID][0] = v
case v.Op == OpSelect1:
if tuples[v.Args[0].ID] == nil {
tuples[v.Args[0].ID] = make([]*Value, 2)
}
tuples[v.Args[0].ID][1] = v
case v.Type.IsTuple() && tuples[v.ID] != nil:
if tuples[v.ID][1] != nil {
order = append(order, tuples[v.ID][1])
}
if tuples[v.ID][0] != nil {
order = append(order, tuples[v.ID][0])
}
delete(tuples, v.ID)
fallthrough
default:
order = append(order, v)
}
// Update use counts of arguments.
for _, w := range v.Args {
if w.Block != b {
continue
}
uses[w.ID]--
if uses[w.ID] == 0 {
// All uses scheduled, w is now schedulable.
cmd/compile: Tinkering with schedule for debug and regalloc This adds a heap-based proper priority queue to the scheduler which made a relatively easy to test quite a few heuristics that "ought to work well". For go tools themselves (which may not be representative) the heuristic that works best is (1) in line-number-order, then (2) from more to fewer args, then (3) in variable ID order. Trying to improve this with information about use at end of blocks turned out to be fruitless -- all of my naive attempts at using that information turned out worse than ignoring it. I can confirm that the stores-early heuristic tends to help; removing it makes the results slightly worse. My metric is code size reduction, which I take to mean fewer spills from register allocation. It's not uniform. Here's the endpoints for "vet" from one set of pretty-good heuristics (this is representative at least). -2208 time.parse 13472 15680 -14.081633% -1514 runtime.pclntab 1002058 1003572 -0.150861% -352 time.Time.AppendFormat 9952 10304 -3.416149% -112 runtime.runGCProg 1984 2096 -5.343511% -64 regexp/syntax.(*parser).factor 7264 7328 -0.873362% -44 go.string.alldata 238630 238674 -0.018435% 48 math/big.(*Float).round 1376 1328 3.614458% 48 text/tabwriter.(*Writer).writeLines 1232 1184 4.054054% 48 math/big.shr 832 784 6.122449% 88 go.func.* 75174 75086 0.117199% 96 time.Date 1968 1872 5.128205% Overall there appears to be an 0.1% decrease in text size. No timings yet, and given the distribution of size reductions it might make sense to wait on those. addr2line text (code) = -4392 bytes (-0.156273%) api text (code) = -5502 bytes (-0.147644%) asm text (code) = -5254 bytes (-0.187810%) cgo text (code) = -4886 bytes (-0.148846%) compile text (code) = -1577 bytes (-0.019346%) * changed cover text (code) = -5236 bytes (-0.137992%) dist text (code) = -5015 bytes (-0.167829%) doc text (code) = -5180 bytes (-0.182121%) fix text (code) = -5000 bytes (-0.215148%) link text (code) = -5092 bytes (-0.152712%) newlink text (code) = -5204 bytes (-0.196986%) nm text (code) = -4398 bytes (-0.156018%) objdump text (code) = -4582 bytes (-0.155046%) pack text (code) = -4503 bytes (-0.294287%) pprof text (code) = -6314 bytes (-0.085177%) trace text (code) = -5856 bytes (-0.097818%) vet text (code) = -5696 bytes (-0.117334%) yacc text (code) = -4971 bytes (-0.213817%) This leaves me sorely tempted to look into a "real" scheduler to try to do a better job, but I think it might make more sense to look into getting loop information into the register allocator instead. Fixes #14577. Change-Id: I5238b83284ce76dea1eb94084a8cd47277db6827 Reviewed-on: https://go-review.googlesource.com/20240 Run-TryBot: David Chase <drchase@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
2016-03-04 14:19:49 -05:00
heap.Push(priq, w)
}
}
for _, w := range additionalArgs[v.ID] {
uses[w.ID]--
if uses[w.ID] == 0 {
// All uses scheduled, w is now schedulable.
cmd/compile: Tinkering with schedule for debug and regalloc This adds a heap-based proper priority queue to the scheduler which made a relatively easy to test quite a few heuristics that "ought to work well". For go tools themselves (which may not be representative) the heuristic that works best is (1) in line-number-order, then (2) from more to fewer args, then (3) in variable ID order. Trying to improve this with information about use at end of blocks turned out to be fruitless -- all of my naive attempts at using that information turned out worse than ignoring it. I can confirm that the stores-early heuristic tends to help; removing it makes the results slightly worse. My metric is code size reduction, which I take to mean fewer spills from register allocation. It's not uniform. Here's the endpoints for "vet" from one set of pretty-good heuristics (this is representative at least). -2208 time.parse 13472 15680 -14.081633% -1514 runtime.pclntab 1002058 1003572 -0.150861% -352 time.Time.AppendFormat 9952 10304 -3.416149% -112 runtime.runGCProg 1984 2096 -5.343511% -64 regexp/syntax.(*parser).factor 7264 7328 -0.873362% -44 go.string.alldata 238630 238674 -0.018435% 48 math/big.(*Float).round 1376 1328 3.614458% 48 text/tabwriter.(*Writer).writeLines 1232 1184 4.054054% 48 math/big.shr 832 784 6.122449% 88 go.func.* 75174 75086 0.117199% 96 time.Date 1968 1872 5.128205% Overall there appears to be an 0.1% decrease in text size. No timings yet, and given the distribution of size reductions it might make sense to wait on those. addr2line text (code) = -4392 bytes (-0.156273%) api text (code) = -5502 bytes (-0.147644%) asm text (code) = -5254 bytes (-0.187810%) cgo text (code) = -4886 bytes (-0.148846%) compile text (code) = -1577 bytes (-0.019346%) * changed cover text (code) = -5236 bytes (-0.137992%) dist text (code) = -5015 bytes (-0.167829%) doc text (code) = -5180 bytes (-0.182121%) fix text (code) = -5000 bytes (-0.215148%) link text (code) = -5092 bytes (-0.152712%) newlink text (code) = -5204 bytes (-0.196986%) nm text (code) = -4398 bytes (-0.156018%) objdump text (code) = -4582 bytes (-0.155046%) pack text (code) = -4503 bytes (-0.294287%) pprof text (code) = -6314 bytes (-0.085177%) trace text (code) = -5856 bytes (-0.097818%) vet text (code) = -5696 bytes (-0.117334%) yacc text (code) = -4971 bytes (-0.213817%) This leaves me sorely tempted to look into a "real" scheduler to try to do a better job, but I think it might make more sense to look into getting loop information into the register allocator instead. Fixes #14577. Change-Id: I5238b83284ce76dea1eb94084a8cd47277db6827 Reviewed-on: https://go-review.googlesource.com/20240 Run-TryBot: David Chase <drchase@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
2016-03-04 14:19:49 -05:00
heap.Push(priq, w)
}
}
}
if len(order) != len(b.Values) {
f.Fatalf("schedule does not include all values in block %s", b)
}
for i := 0; i < len(b.Values); i++ {
b.Values[i] = order[len(b.Values)-1-i]
}
}
f.scheduled = true
}
// storeOrder orders values with respect to stores. That is,
// if v transitively depends on store s, v is ordered after s,
// otherwise v is ordered before s.
// Specifically, values are ordered like
// store1
// NilCheck that depends on store1
// other values that depends on store1
// store2
// NilCheck that depends on store2
// other values that depends on store2
// ...
// The order of non-store and non-NilCheck values are undefined
// (not necessarily dependency order). This should be cheaper
// than a full scheduling as done above.
// Note that simple dependency order won't work: there is no
// dependency between NilChecks and values like IsNonNil.
// Auxiliary data structures are passed in as arguments, so
// that they can be allocated in the caller and be reused.
// This function takes care of reset them.
func storeOrder(values []*Value, sset *sparseSet, storeNumber []int32) []*Value {
if len(values) == 0 {
return values
}
f := values[0].Block.Func
// find all stores
cmd/compile: stack-allocate values worklist in schedule Compiler instrumentation shows that the cap of the stores slice in the storeOrder function is almost always 64 or less. Since the slice does not escape, pre-allocating on the stack a 64-elements one greatly reduces the number of allocations performed by the function. name old time/op new time/op delta Template 289ms ± 5% 283ms ± 3% -1.99% (p=0.000 n=19+20) Unicode 140ms ± 6% 136ms ± 6% -2.61% (p=0.021 n=19+20) GoTypes 915ms ± 2% 895ms ± 2% -2.24% (p=0.000 n=19+20) Compiler 4.15s ± 1% 4.04s ± 2% -2.73% (p=0.000 n=20+20) SSA 10.0s ± 1% 9.8s ± 2% -2.13% (p=0.000 n=20+20) Flate 189ms ± 6% 186ms ± 4% -1.75% (p=0.028 n=19+20) GoParser 229ms ± 5% 224ms ± 4% -2.25% (p=0.001 n=20+19) Reflect 584ms ± 2% 573ms ± 3% -1.83% (p=0.000 n=18+20) Tar 265ms ± 3% 261ms ± 3% -1.33% (p=0.021 n=20+20) XML 328ms ± 2% 321ms ± 2% -2.11% (p=0.000 n=20+20) name old user-time/op new user-time/op delta Template 408ms ± 4% 400ms ± 4% -1.98% (p=0.006 n=19+20) Unicode 216ms ± 9% 216ms ± 7% ~ (p=0.883 n=20+20) GoTypes 1.25s ± 1% 1.23s ± 3% -1.32% (p=0.002 n=19+20) Compiler 5.77s ± 1% 5.69s ± 2% -1.47% (p=0.000 n=18+19) SSA 14.6s ± 5% 14.1s ± 4% -3.45% (p=0.000 n=20+20) Flate 252ms ± 7% 251ms ± 7% ~ (p=0.659 n=20+20) GoParser 314ms ± 5% 310ms ± 5% ~ (p=0.165 n=20+20) Reflect 780ms ± 2% 769ms ± 3% -1.34% (p=0.004 n=19+18) Tar 365ms ± 7% 367ms ± 5% ~ (p=0.841 n=20+20) XML 439ms ± 4% 432ms ± 4% -1.45% (p=0.043 n=20+20) name old alloc/op new alloc/op delta Template 38.9MB ± 0% 38.8MB ± 0% -0.26% (p=0.000 n=19+20) Unicode 29.0MB ± 0% 29.0MB ± 0% -0.02% (p=0.001 n=20+19) GoTypes 115MB ± 0% 115MB ± 0% -0.31% (p=0.000 n=20+20) Compiler 492MB ± 0% 490MB ± 0% -0.41% (p=0.000 n=20+19) SSA 1.40GB ± 0% 1.39GB ± 0% -0.48% (p=0.000 n=20+20) Flate 24.9MB ± 0% 24.9MB ± 0% -0.24% (p=0.000 n=20+20) GoParser 30.9MB ± 0% 30.8MB ± 0% -0.39% (p=0.000 n=20+20) Reflect 77.1MB ± 0% 76.8MB ± 0% -0.32% (p=0.000 n=17+20) Tar 39.1MB ± 0% 39.0MB ± 0% -0.23% (p=0.000 n=20+20) XML 44.7MB ± 0% 44.6MB ± 0% -0.30% (p=0.000 n=20+18) name old allocs/op new allocs/op delta Template 385k ± 0% 382k ± 0% -0.99% (p=0.000 n=20+19) Unicode 336k ± 0% 336k ± 0% -0.08% (p=0.000 n=19+17) GoTypes 1.20M ± 0% 1.18M ± 0% -1.11% (p=0.000 n=20+18) Compiler 4.66M ± 0% 4.59M ± 0% -1.42% (p=0.000 n=19+20) SSA 11.6M ± 0% 11.5M ± 0% -1.49% (p=0.000 n=20+20) Flate 237k ± 0% 235k ± 0% -1.00% (p=0.000 n=20+19) GoParser 319k ± 0% 315k ± 0% -1.12% (p=0.000 n=20+20) Reflect 960k ± 0% 952k ± 0% -0.92% (p=0.000 n=18+20) Tar 394k ± 0% 390k ± 0% -0.87% (p=0.000 n=20+20) XML 418k ± 0% 413k ± 0% -1.18% (p=0.000 n=20+20) Change-Id: I01b9f45b161379967d7a52e23f39ac30dd90edb0 Reviewed-on: https://go-review.googlesource.com/104415 Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org> Reviewed-by: Cherry Zhang <cherryyz@google.com>
2018-04-03 13:46:36 +02:00
// Members of values that are store values.
// A constant bound allows this to be stack-allocated. 64 is
// enough to cover almost every storeOrder call.
stores := make([]*Value, 0, 64)
hasNilCheck := false
sset.clear() // sset is the set of stores that are used in other values
for _, v := range values {
if v.Type.IsMemory() {
stores = append(stores, v)
if v.Op == OpInitMem || v.Op == OpPhi {
continue
}
sset.add(v.MemoryArg().ID) // record that v's memory arg is used
}
if v.Op == OpNilCheck {
hasNilCheck = true
}
}
if len(stores) == 0 || !hasNilCheck && f.pass.name == "nilcheckelim" {
// there is no store, the order does not matter
return values
}
// find last store, which is the one that is not used by other stores
var last *Value
for _, v := range stores {
if !sset.contains(v.ID) {
if last != nil {
f.Fatalf("two stores live simultaneously: %v and %v", v, last)
}
last = v
}
}
// We assign a store number to each value. Store number is the
// index of the latest store that this value transitively depends.
// The i-th store in the current block gets store number 3*i. A nil
// check that depends on the i-th store gets store number 3*i+1.
// Other values that depends on the i-th store gets store number 3*i+2.
// Special case: 0 -- unassigned, 1 or 2 -- the latest store it depends
// is in the previous block (or no store at all, e.g. value is Const).
// First we assign the number to all stores by walking back the store chain,
// then assign the number to other values in DFS order.
count := make([]int32, 3*(len(stores)+1))
sset.clear() // reuse sparse set to ensure that a value is pushed to stack only once
for n, w := len(stores), last; n > 0; n-- {
storeNumber[w.ID] = int32(3 * n)
count[3*n]++
sset.add(w.ID)
if w.Op == OpInitMem || w.Op == OpPhi {
if n != 1 {
f.Fatalf("store order is wrong: there are stores before %v", w)
}
break
}
w = w.MemoryArg()
}
var stack []*Value
for _, v := range values {
if sset.contains(v.ID) {
// in sset means v is a store, or already pushed to stack, or already assigned a store number
continue
}
stack = append(stack, v)
sset.add(v.ID)
for len(stack) > 0 {
w := stack[len(stack)-1]
if storeNumber[w.ID] != 0 {
stack = stack[:len(stack)-1]
continue
}
if w.Op == OpPhi {
// Phi value doesn't depend on store in the current block.
// Do this early to avoid dependency cycle.
storeNumber[w.ID] = 2
count[2]++
stack = stack[:len(stack)-1]
continue
}
max := int32(0) // latest store dependency
argsdone := true
for _, a := range w.Args {
if a.Block != w.Block {
continue
}
if !sset.contains(a.ID) {
stack = append(stack, a)
sset.add(a.ID)
argsdone = false
break
}
if storeNumber[a.ID]/3 > max {
max = storeNumber[a.ID] / 3
}
}
if !argsdone {
continue
}
n := 3*max + 2
if w.Op == OpNilCheck {
n = 3*max + 1
}
storeNumber[w.ID] = n
count[n]++
stack = stack[:len(stack)-1]
}
}
// convert count to prefix sum of counts: count'[i] = sum_{j<=i} count[i]
for i := range count {
if i == 0 {
continue
}
count[i] += count[i-1]
}
if count[len(count)-1] != int32(len(values)) {
f.Fatalf("storeOrder: value is missing, total count = %d, values = %v", count[len(count)-1], values)
}
// place values in count-indexed bins, which are in the desired store order
order := make([]*Value, len(values))
for _, v := range values {
s := storeNumber[v.ID]
order[count[s-1]] = v
count[s-1]++
}
return order
}