cmd/compile: stack allocate backing stores during append
We can already stack allocate the backing store during append if the
resulting backing store doesn't escape. See CL 664299.
This CL enables us to often stack allocate the backing store during
append *even if* the result escapes. Typically, for code like:
func f(n int) []int {
var r []int
for i := range n {
r = append(r, i)
}
return r
}
the backing store for r escapes, but only by returning it.
Could we operate with r on the stack for most of its lifeime,
and only move it to the heap at the return point?
The current implementation of append will need to do an allocation
each time it calls growslice. This will happen on the 1st, 2nd, 4th,
8th, etc. append calls. The allocations done by all but the
last growslice call will then immediately be garbage.
We'd like to avoid doing some of those intermediate allocations
if possible. We rewrite the above code by introducing a move2heap
operation:
func f(n int) []int {
var r []int
for i := range n {
r = append(r, i)
}
r = move2heap(r)
return r
}
Using the move2heap runtime function, which does:
move2heap(r):
If r is already backed by heap storage, return r.
Otherwise, copy r to the heap and return the copy.
Now we can treat the backing store of r allocated at the
append site as not escaping. Previous stack allocation
optimizations now apply, which can use a fixed-size
stack-allocated backing store for r when appending.
See the description in cmd/compile/internal/slice/slice.go
for how we ensure that this optimization is safe.
Change-Id: I81f36e58bade2241d07f67967d8d547fff5302b8
Reviewed-on: https://go-review.googlesource.com/c/go/+/707755
Reviewed-by: Keith Randall <khr@google.com>
Reviewed-by: David Chase <drchase@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2025-09-12 14:43:19 -07:00
|
|
|
// asmcheck
|
|
|
|
|
|
|
|
|
|
// Copyright 2025 The Go Authors. All rights reserved.
|
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
|
|
package codegen
|
|
|
|
|
|
|
|
|
|
func Append1(n int) []int {
|
|
|
|
|
var r []int
|
|
|
|
|
for i := range n {
|
|
|
|
|
// amd64:`.*growslice`
|
|
|
|
|
r = append(r, i)
|
|
|
|
|
}
|
|
|
|
|
// amd64:`.*moveSliceNoCapNoScan`
|
|
|
|
|
return r
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func Append2(n int) (r []int) {
|
|
|
|
|
for i := range n {
|
|
|
|
|
// amd64:`.*growslice`
|
|
|
|
|
r = append(r, i)
|
|
|
|
|
}
|
|
|
|
|
// amd64:`.*moveSliceNoCapNoScan`
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func Append3(n int) (r []int) {
|
|
|
|
|
for i := range n {
|
|
|
|
|
// amd64:`.*growslice`
|
|
|
|
|
r = append(r, i)
|
|
|
|
|
}
|
|
|
|
|
// amd64:`.*moveSliceNoCapNoScan`
|
|
|
|
|
return r
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func Append4(n int) []int {
|
|
|
|
|
var r []int
|
|
|
|
|
for i := range n {
|
|
|
|
|
// amd64:`.*growsliceBuf`
|
|
|
|
|
r = append(r, i)
|
|
|
|
|
}
|
|
|
|
|
println(cap(r))
|
|
|
|
|
// amd64:`.*moveSliceNoScan`
|
|
|
|
|
return r
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func Append5(n int) []int {
|
|
|
|
|
var r []int
|
|
|
|
|
for i := range n {
|
|
|
|
|
// amd64:`.*growsliceBuf`
|
|
|
|
|
r = append(r, i)
|
|
|
|
|
}
|
|
|
|
|
useSlice(r)
|
|
|
|
|
// amd64:`.*moveSliceNoScan`
|
|
|
|
|
return r
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func Append6(n int) []*int {
|
|
|
|
|
var r []*int
|
|
|
|
|
for i := range n {
|
|
|
|
|
// amd64:`.*growslice`
|
|
|
|
|
r = append(r, new(i))
|
|
|
|
|
}
|
|
|
|
|
// amd64:`.*moveSliceNoCap`
|
|
|
|
|
return r
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func Append7(n int) []*int {
|
|
|
|
|
var r []*int
|
|
|
|
|
for i := range n {
|
|
|
|
|
// amd64:`.*growsliceBuf`
|
|
|
|
|
r = append(r, new(i))
|
|
|
|
|
}
|
|
|
|
|
println(cap(r))
|
|
|
|
|
// amd64:`.*moveSlice`
|
|
|
|
|
return r
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func Append8(n int, p *[]int) {
|
|
|
|
|
var r []int
|
|
|
|
|
for i := range n {
|
|
|
|
|
// amd64:`.*growslice`
|
|
|
|
|
r = append(r, i)
|
|
|
|
|
}
|
|
|
|
|
// amd64:`.*moveSliceNoCapNoScan`
|
|
|
|
|
*p = r
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func Append9(n int) []int {
|
|
|
|
|
var r []int
|
|
|
|
|
for i := range n {
|
|
|
|
|
// amd64:`.*growslice`
|
|
|
|
|
r = append(r, i)
|
|
|
|
|
}
|
|
|
|
|
println(len(r))
|
|
|
|
|
// amd64:`.*moveSliceNoCapNoScan`
|
|
|
|
|
return r
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func Append10(n int) []int {
|
|
|
|
|
var r []int
|
|
|
|
|
for i := range n {
|
|
|
|
|
// amd64:`.*growslice`
|
|
|
|
|
r = append(r, i)
|
|
|
|
|
}
|
|
|
|
|
println(r[3])
|
|
|
|
|
// amd64:`.*moveSliceNoCapNoScan`
|
|
|
|
|
return r
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func Append11(n int) []int {
|
|
|
|
|
var r []int
|
|
|
|
|
for i := range n {
|
|
|
|
|
// amd64:`.*growsliceBuf`
|
|
|
|
|
r = append(r, i)
|
|
|
|
|
}
|
|
|
|
|
r = r[3:5]
|
|
|
|
|
// amd64:`.*moveSliceNoScan`
|
|
|
|
|
return r
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func Append12(n int) []int {
|
|
|
|
|
var r []int
|
|
|
|
|
r = nil
|
|
|
|
|
for i := range n {
|
|
|
|
|
// amd64:`.*growslice`
|
|
|
|
|
r = append(r, i)
|
|
|
|
|
}
|
|
|
|
|
// amd64:`.*moveSliceNoCapNoScan`
|
|
|
|
|
return r
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func Append13(n int) []int {
|
|
|
|
|
var r []int
|
|
|
|
|
r, r = nil, nil
|
|
|
|
|
for i := range n {
|
|
|
|
|
// amd64:`.*growslice`
|
|
|
|
|
r = append(r, i)
|
|
|
|
|
}
|
|
|
|
|
// amd64:`.*moveSliceNoCapNoScan`
|
|
|
|
|
return r
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func Append14(n int) []int {
|
|
|
|
|
var r []int
|
|
|
|
|
r = []int{3, 4, 5}
|
|
|
|
|
for i := range n {
|
|
|
|
|
// amd64:`.*growsliceBuf`
|
|
|
|
|
r = append(r, i)
|
|
|
|
|
}
|
|
|
|
|
// amd64:`.*moveSliceNoScan`
|
|
|
|
|
return r
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func Append15(n int) []int {
|
|
|
|
|
r := []int{3, 4, 5}
|
|
|
|
|
for i := range n {
|
|
|
|
|
// amd64:`.*growsliceBuf`
|
|
|
|
|
r = append(r, i)
|
|
|
|
|
}
|
|
|
|
|
// amd64:`.*moveSliceNoScan`
|
|
|
|
|
return r
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func Append16(r []int, n int) []int {
|
|
|
|
|
for i := range n {
|
|
|
|
|
// amd64:`.*growslice`
|
|
|
|
|
r = append(r, i)
|
|
|
|
|
}
|
|
|
|
|
// amd64:`.*moveSliceNoCapNoScan`
|
|
|
|
|
return r
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func Append17(n int) []int {
|
|
|
|
|
var r []int
|
|
|
|
|
for i := range n {
|
|
|
|
|
// amd64:`.*growslice`
|
|
|
|
|
r = append(r, i)
|
|
|
|
|
}
|
|
|
|
|
for i, x := range r {
|
|
|
|
|
println(i, x)
|
|
|
|
|
}
|
|
|
|
|
// amd64:`.*moveSliceNoCapNoScan`
|
|
|
|
|
return r
|
|
|
|
|
}
|
|
|
|
|
|
2025-11-20 09:42:16 -08:00
|
|
|
func Append18(n int, p *[]int) {
|
|
|
|
|
var r []int
|
|
|
|
|
for i := range n {
|
|
|
|
|
// amd64:-`.*moveSliceNoCapNoScan`
|
|
|
|
|
*p = r
|
|
|
|
|
// amd64:`.*growslice`
|
|
|
|
|
r = append(r, i)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func Append19(n int, p [][]int) {
|
|
|
|
|
for j := range p {
|
|
|
|
|
var r []int
|
|
|
|
|
for i := range n {
|
|
|
|
|
// amd64:`.*growslice`
|
|
|
|
|
r = append(r, i)
|
|
|
|
|
}
|
|
|
|
|
// amd64:`.*moveSliceNoCapNoScan`
|
|
|
|
|
p[j] = r
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func Append20(n int, p [][]int) {
|
|
|
|
|
for j := range p {
|
|
|
|
|
var r []int
|
|
|
|
|
// amd64:`.*growslice`
|
|
|
|
|
r = append(r, 0)
|
|
|
|
|
// amd64:-`.*moveSliceNoCapNoScan`
|
|
|
|
|
p[j] = r
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
cmd/compile: stack allocate backing stores during append
We can already stack allocate the backing store during append if the
resulting backing store doesn't escape. See CL 664299.
This CL enables us to often stack allocate the backing store during
append *even if* the result escapes. Typically, for code like:
func f(n int) []int {
var r []int
for i := range n {
r = append(r, i)
}
return r
}
the backing store for r escapes, but only by returning it.
Could we operate with r on the stack for most of its lifeime,
and only move it to the heap at the return point?
The current implementation of append will need to do an allocation
each time it calls growslice. This will happen on the 1st, 2nd, 4th,
8th, etc. append calls. The allocations done by all but the
last growslice call will then immediately be garbage.
We'd like to avoid doing some of those intermediate allocations
if possible. We rewrite the above code by introducing a move2heap
operation:
func f(n int) []int {
var r []int
for i := range n {
r = append(r, i)
}
r = move2heap(r)
return r
}
Using the move2heap runtime function, which does:
move2heap(r):
If r is already backed by heap storage, return r.
Otherwise, copy r to the heap and return the copy.
Now we can treat the backing store of r allocated at the
append site as not escaping. Previous stack allocation
optimizations now apply, which can use a fixed-size
stack-allocated backing store for r when appending.
See the description in cmd/compile/internal/slice/slice.go
for how we ensure that this optimization is safe.
Change-Id: I81f36e58bade2241d07f67967d8d547fff5302b8
Reviewed-on: https://go-review.googlesource.com/c/go/+/707755
Reviewed-by: Keith Randall <khr@google.com>
Reviewed-by: David Chase <drchase@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2025-09-12 14:43:19 -07:00
|
|
|
//go:noinline
|
|
|
|
|
func useSlice(s []int) {
|
|
|
|
|
}
|