2014-07-31 12:43:40 -07:00
|
|
|
// Copyright 2009 The Go Authors. All rights reserved.
|
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
|
|
package runtime
|
|
|
|
|
|
|
|
|
|
import (
|
2021-05-21 13:37:19 -04:00
|
|
|
"internal/abi"
|
2021-06-17 19:10:18 +00:00
|
|
|
"internal/goarch"
|
2018-10-22 20:22:55 +02:00
|
|
|
"runtime/internal/math"
|
2018-03-23 15:45:03 -05:00
|
|
|
"runtime/internal/sys"
|
2014-07-31 12:43:40 -07:00
|
|
|
"unsafe"
|
|
|
|
|
)
|
|
|
|
|
|
2015-04-11 10:01:54 +12:00
|
|
|
type slice struct {
|
2014-07-31 12:43:40 -07:00
|
|
|
array unsafe.Pointer
|
|
|
|
|
len int
|
|
|
|
|
cap int
|
|
|
|
|
}
|
|
|
|
|
|
2022-08-07 17:43:57 +07:00
|
|
|
// A notInHeapSlice is a slice backed by runtime/internal/sys.NotInHeap memory.
|
2017-10-25 13:46:54 -04:00
|
|
|
type notInHeapSlice struct {
|
|
|
|
|
array *notInHeap
|
|
|
|
|
len int
|
|
|
|
|
cap int
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-26 18:30:11 +02:00
|
|
|
func panicmakeslicelen() {
|
|
|
|
|
panic(errorString("makeslice: len out of range"))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func panicmakeslicecap() {
|
|
|
|
|
panic(errorString("makeslice: cap out of range"))
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-23 13:50:07 +02:00
|
|
|
// makeslicecopy allocates a slice of "tolen" elements of type "et",
|
|
|
|
|
// then copies "fromlen" elements of type "et" into that new allocation from "from".
|
|
|
|
|
func makeslicecopy(et *_type, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer {
|
|
|
|
|
var tomem, copymem uintptr
|
|
|
|
|
if uintptr(tolen) > uintptr(fromlen) {
|
|
|
|
|
var overflow bool
|
2023-01-20 16:41:57 -05:00
|
|
|
tomem, overflow = math.MulUintptr(et.Size_, uintptr(tolen))
|
2018-10-23 13:50:07 +02:00
|
|
|
if overflow || tomem > maxAlloc || tolen < 0 {
|
|
|
|
|
panicmakeslicelen()
|
|
|
|
|
}
|
2023-01-20 16:41:57 -05:00
|
|
|
copymem = et.Size_ * uintptr(fromlen)
|
2018-10-23 13:50:07 +02:00
|
|
|
} else {
|
|
|
|
|
// fromlen is a known good length providing and equal or greater than tolen,
|
|
|
|
|
// thereby making tolen a good slice length too as from and to slices have the
|
|
|
|
|
// same element width.
|
2023-01-20 16:41:57 -05:00
|
|
|
tomem = et.Size_ * uintptr(tolen)
|
2018-10-23 13:50:07 +02:00
|
|
|
copymem = tomem
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var to unsafe.Pointer
|
2024-02-27 21:51:31 +00:00
|
|
|
if !et.Pointers() {
|
2018-10-23 13:50:07 +02:00
|
|
|
to = mallocgc(tomem, nil, false)
|
|
|
|
|
if copymem < tomem {
|
|
|
|
|
memclrNoHeapPointers(add(to, copymem), tomem-copymem)
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
// Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory.
|
|
|
|
|
to = mallocgc(tomem, et, true)
|
2020-05-07 23:43:22 +02:00
|
|
|
if copymem > 0 && writeBarrier.enabled {
|
2018-10-23 13:50:07 +02:00
|
|
|
// Only shade the pointers in old.array since we know the destination slice to
|
|
|
|
|
// only contains nil pointers because it has been cleared during alloc.
|
2023-11-14 22:05:53 +00:00
|
|
|
//
|
|
|
|
|
// It's safe to pass a type to this function as an optimization because
|
|
|
|
|
// from and to only ever refer to memory representing whole values of
|
|
|
|
|
// type et. See the comment on bulkBarrierPreWrite.
|
|
|
|
|
bulkBarrierPreWriteSrcOnly(uintptr(to), uintptr(from), copymem, et)
|
2018-10-23 13:50:07 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if raceenabled {
|
|
|
|
|
callerpc := getcallerpc()
|
2021-05-21 13:37:19 -04:00
|
|
|
pc := abi.FuncPCABIInternal(makeslicecopy)
|
2018-10-23 13:50:07 +02:00
|
|
|
racereadrangepc(from, copymem, callerpc, pc)
|
|
|
|
|
}
|
|
|
|
|
if msanenabled {
|
|
|
|
|
msanread(from, copymem)
|
|
|
|
|
}
|
2021-01-05 17:52:43 +08:00
|
|
|
if asanenabled {
|
|
|
|
|
asanread(from, copymem)
|
|
|
|
|
}
|
2018-10-23 13:50:07 +02:00
|
|
|
|
|
|
|
|
memmove(to, from, copymem)
|
|
|
|
|
|
|
|
|
|
return to
|
|
|
|
|
}
|
|
|
|
|
|
2024-05-21 23:24:47 -04:00
|
|
|
// makeslice should be an internal detail,
|
|
|
|
|
// but widely used packages access it using linkname.
|
|
|
|
|
// Notable members of the hall of shame include:
|
|
|
|
|
// - github.com/bytedance/sonic
|
|
|
|
|
//
|
|
|
|
|
// Do not remove or change the type signature.
|
|
|
|
|
// See go.dev/issue/67401.
|
|
|
|
|
//
|
|
|
|
|
//go:linkname makeslice
|
2018-10-14 22:28:58 +02:00
|
|
|
func makeslice(et *_type, len, cap int) unsafe.Pointer {
|
2023-01-20 16:41:57 -05:00
|
|
|
mem, overflow := math.MulUintptr(et.Size_, uintptr(cap))
|
2018-10-16 00:27:42 +02:00
|
|
|
if overflow || mem > maxAlloc || len < 0 || len > cap {
|
|
|
|
|
// NOTE: Produce a 'len out of range' error instead of a
|
|
|
|
|
// 'cap out of range' error when someone does make([]T, bignumber).
|
|
|
|
|
// 'cap out of range' is true too, but since the cap is only being
|
|
|
|
|
// supplied implicitly, saying len is clearer.
|
|
|
|
|
// See golang.org/issue/4085.
|
2023-01-20 16:41:57 -05:00
|
|
|
mem, overflow := math.MulUintptr(et.Size_, uintptr(len))
|
2018-10-16 00:27:42 +02:00
|
|
|
if overflow || mem > maxAlloc || len < 0 {
|
|
|
|
|
panicmakeslicelen()
|
|
|
|
|
}
|
2018-04-26 18:30:11 +02:00
|
|
|
panicmakeslicecap()
|
2014-07-31 12:43:40 -07:00
|
|
|
}
|
2016-04-10 17:32:35 +02:00
|
|
|
|
2018-10-14 22:28:58 +02:00
|
|
|
return mallocgc(mem, et, true)
|
2014-07-31 12:43:40 -07:00
|
|
|
}
|
|
|
|
|
|
2018-10-14 22:28:58 +02:00
|
|
|
func makeslice64(et *_type, len64, cap64 int64) unsafe.Pointer {
|
2016-08-25 14:17:52 +02:00
|
|
|
len := int(len64)
|
|
|
|
|
if int64(len) != len64 {
|
2018-04-26 18:30:11 +02:00
|
|
|
panicmakeslicelen()
|
2016-08-25 14:17:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cap := int(cap64)
|
|
|
|
|
if int64(cap) != cap64 {
|
2018-04-26 18:30:11 +02:00
|
|
|
panicmakeslicecap()
|
2016-08-25 14:17:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return makeslice(et, len, cap)
|
|
|
|
|
}
|
|
|
|
|
|
cmd/compile,runtime: redo growslice calling convention
Instead of passing the original length and the new length, pass
the new length and the length increment. Also use the new length
in all the post-growslice calculations so that the original length
is dead and does not need to be spilled/restored around the growslice.
old: growslice(typ, oldPtr, oldLen, oldCap, newLen) (newPtr, newLen, newCap)
new: growslice(oldPtr, newLen, oldCap, inc, typ) (newPtr, newLen, newCap)
where inc = # of elements added = newLen-oldLen
Also move the element type to the end of the call. This makes register
allocation more efficient, as oldPtr and newPtr can often be in the
same register (e.g. AX on amd64) and thus the phi takes no instructions.
Makes the go binary 0.3% smaller.
Change-Id: I7295a60227dbbeecec2bf039eeef2950a72df760
Reviewed-on: https://go-review.googlesource.com/c/go/+/418554
Run-TryBot: Keith Randall <khr@golang.org>
Reviewed-by: Heschi Kreinick <heschi@google.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com>
2022-07-13 20:22:53 -07:00
|
|
|
// growslice allocates new backing store for a slice.
|
|
|
|
|
//
|
|
|
|
|
// arguments:
|
2022-03-02 13:01:48 -08:00
|
|
|
//
|
|
|
|
|
// oldPtr = pointer to the slice's backing array
|
|
|
|
|
// newLen = new length (= oldLen + num)
|
|
|
|
|
// oldCap = original slice's capacity.
|
|
|
|
|
// num = number of elements being added
|
|
|
|
|
// et = element type
|
cmd/compile,runtime: redo growslice calling convention
Instead of passing the original length and the new length, pass
the new length and the length increment. Also use the new length
in all the post-growslice calculations so that the original length
is dead and does not need to be spilled/restored around the growslice.
old: growslice(typ, oldPtr, oldLen, oldCap, newLen) (newPtr, newLen, newCap)
new: growslice(oldPtr, newLen, oldCap, inc, typ) (newPtr, newLen, newCap)
where inc = # of elements added = newLen-oldLen
Also move the element type to the end of the call. This makes register
allocation more efficient, as oldPtr and newPtr can often be in the
same register (e.g. AX on amd64) and thus the phi takes no instructions.
Makes the go binary 0.3% smaller.
Change-Id: I7295a60227dbbeecec2bf039eeef2950a72df760
Reviewed-on: https://go-review.googlesource.com/c/go/+/418554
Run-TryBot: Keith Randall <khr@golang.org>
Reviewed-by: Heschi Kreinick <heschi@google.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com>
2022-07-13 20:22:53 -07:00
|
|
|
//
|
|
|
|
|
// return values:
|
2022-03-02 13:01:48 -08:00
|
|
|
//
|
|
|
|
|
// newPtr = pointer to the new backing store
|
|
|
|
|
// newLen = same value as the argument
|
|
|
|
|
// newCap = capacity of the new backing store
|
cmd/compile,runtime: redo growslice calling convention
Instead of passing the original length and the new length, pass
the new length and the length increment. Also use the new length
in all the post-growslice calculations so that the original length
is dead and does not need to be spilled/restored around the growslice.
old: growslice(typ, oldPtr, oldLen, oldCap, newLen) (newPtr, newLen, newCap)
new: growslice(oldPtr, newLen, oldCap, inc, typ) (newPtr, newLen, newCap)
where inc = # of elements added = newLen-oldLen
Also move the element type to the end of the call. This makes register
allocation more efficient, as oldPtr and newPtr can often be in the
same register (e.g. AX on amd64) and thus the phi takes no instructions.
Makes the go binary 0.3% smaller.
Change-Id: I7295a60227dbbeecec2bf039eeef2950a72df760
Reviewed-on: https://go-review.googlesource.com/c/go/+/418554
Run-TryBot: Keith Randall <khr@golang.org>
Reviewed-by: Heschi Kreinick <heschi@google.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com>
2022-07-13 20:22:53 -07:00
|
|
|
//
|
|
|
|
|
// Requires that uint(newLen) > uint(oldCap).
|
|
|
|
|
// Assumes the original slice length is newLen - num
|
|
|
|
|
//
|
|
|
|
|
// A new backing store is allocated with space for at least newLen elements.
|
|
|
|
|
// Existing entries [0, oldLen) are copied over to the new backing store.
|
|
|
|
|
// Added entries [oldLen, newLen) are not initialized by growslice
|
|
|
|
|
// (although for pointer-containing element types, they are zeroed). They
|
|
|
|
|
// must be initialized by the caller.
|
|
|
|
|
// Trailing entries [newLen, newCap) are zeroed.
|
|
|
|
|
//
|
|
|
|
|
// growslice's odd calling convention makes the generated code that calls
|
|
|
|
|
// this function simpler. In particular, it accepts and returns the
|
|
|
|
|
// new length so that the old length is not live (does not need to be
|
|
|
|
|
// spilled/restored) and the new length is returned (also does not need
|
|
|
|
|
// to be spilled/restored).
|
2024-05-21 23:02:51 -04:00
|
|
|
//
|
|
|
|
|
// growslice should be an internal detail,
|
|
|
|
|
// but widely used packages access it using linkname.
|
|
|
|
|
// Notable members of the hall of shame include:
|
2024-05-21 23:24:47 -04:00
|
|
|
// - github.com/bytedance/sonic
|
2024-05-21 23:02:51 -04:00
|
|
|
// - github.com/ugorji/go/codec
|
|
|
|
|
//
|
|
|
|
|
// Do not remove or change the type signature.
|
|
|
|
|
// See go.dev/issue/67401.
|
|
|
|
|
//
|
|
|
|
|
//go:linkname growslice
|
cmd/compile,runtime: redo growslice calling convention
Instead of passing the original length and the new length, pass
the new length and the length increment. Also use the new length
in all the post-growslice calculations so that the original length
is dead and does not need to be spilled/restored around the growslice.
old: growslice(typ, oldPtr, oldLen, oldCap, newLen) (newPtr, newLen, newCap)
new: growslice(oldPtr, newLen, oldCap, inc, typ) (newPtr, newLen, newCap)
where inc = # of elements added = newLen-oldLen
Also move the element type to the end of the call. This makes register
allocation more efficient, as oldPtr and newPtr can often be in the
same register (e.g. AX on amd64) and thus the phi takes no instructions.
Makes the go binary 0.3% smaller.
Change-Id: I7295a60227dbbeecec2bf039eeef2950a72df760
Reviewed-on: https://go-review.googlesource.com/c/go/+/418554
Run-TryBot: Keith Randall <khr@golang.org>
Reviewed-by: Heschi Kreinick <heschi@google.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com>
2022-07-13 20:22:53 -07:00
|
|
|
func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice {
|
|
|
|
|
oldLen := newLen - num
|
2014-07-31 12:43:40 -07:00
|
|
|
if raceenabled {
|
2017-09-22 15:16:26 -04:00
|
|
|
callerpc := getcallerpc()
|
2023-01-20 16:41:57 -05:00
|
|
|
racereadrangepc(oldPtr, uintptr(oldLen*int(et.Size_)), callerpc, abi.FuncPCABIInternal(growslice))
|
2014-07-31 12:43:40 -07:00
|
|
|
}
|
2015-10-21 11:04:42 -07:00
|
|
|
if msanenabled {
|
2023-01-20 16:41:57 -05:00
|
|
|
msanread(oldPtr, uintptr(oldLen*int(et.Size_)))
|
2015-10-21 11:04:42 -07:00
|
|
|
}
|
2021-01-05 17:52:43 +08:00
|
|
|
if asanenabled {
|
2023-01-20 16:41:57 -05:00
|
|
|
asanread(oldPtr, uintptr(oldLen*int(et.Size_)))
|
2021-01-05 17:52:43 +08:00
|
|
|
}
|
2014-07-31 12:43:40 -07:00
|
|
|
|
cmd/compile,runtime: redo growslice calling convention
Instead of passing the original length and the new length, pass
the new length and the length increment. Also use the new length
in all the post-growslice calculations so that the original length
is dead and does not need to be spilled/restored around the growslice.
old: growslice(typ, oldPtr, oldLen, oldCap, newLen) (newPtr, newLen, newCap)
new: growslice(oldPtr, newLen, oldCap, inc, typ) (newPtr, newLen, newCap)
where inc = # of elements added = newLen-oldLen
Also move the element type to the end of the call. This makes register
allocation more efficient, as oldPtr and newPtr can often be in the
same register (e.g. AX on amd64) and thus the phi takes no instructions.
Makes the go binary 0.3% smaller.
Change-Id: I7295a60227dbbeecec2bf039eeef2950a72df760
Reviewed-on: https://go-review.googlesource.com/c/go/+/418554
Run-TryBot: Keith Randall <khr@golang.org>
Reviewed-by: Heschi Kreinick <heschi@google.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com>
2022-07-13 20:22:53 -07:00
|
|
|
if newLen < 0 {
|
2022-07-18 11:47:19 -07:00
|
|
|
panic(errorString("growslice: len out of range"))
|
2018-10-22 20:22:55 +02:00
|
|
|
}
|
|
|
|
|
|
2023-01-20 16:41:57 -05:00
|
|
|
if et.Size_ == 0 {
|
2015-03-11 12:07:50 -04:00
|
|
|
// append should not create a slice with nil pointer but non-zero len.
|
cmd/compile,runtime: redo growslice calling convention
Instead of passing the original length and the new length, pass
the new length and the length increment. Also use the new length
in all the post-growslice calculations so that the original length
is dead and does not need to be spilled/restored around the growslice.
old: growslice(typ, oldPtr, oldLen, oldCap, newLen) (newPtr, newLen, newCap)
new: growslice(oldPtr, newLen, oldCap, inc, typ) (newPtr, newLen, newCap)
where inc = # of elements added = newLen-oldLen
Also move the element type to the end of the call. This makes register
allocation more efficient, as oldPtr and newPtr can often be in the
same register (e.g. AX on amd64) and thus the phi takes no instructions.
Makes the go binary 0.3% smaller.
Change-Id: I7295a60227dbbeecec2bf039eeef2950a72df760
Reviewed-on: https://go-review.googlesource.com/c/go/+/418554
Run-TryBot: Keith Randall <khr@golang.org>
Reviewed-by: Heschi Kreinick <heschi@google.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com>
2022-07-13 20:22:53 -07:00
|
|
|
// We assume that append doesn't need to preserve oldPtr in this case.
|
|
|
|
|
return slice{unsafe.Pointer(&zerobase), newLen, newLen}
|
2014-07-31 12:43:40 -07:00
|
|
|
}
|
|
|
|
|
|
2023-05-17 17:33:15 +03:00
|
|
|
newcap := nextslicecap(newLen, oldCap)
|
2016-03-13 18:58:17 +01:00
|
|
|
|
2017-08-12 17:37:13 +02:00
|
|
|
var overflow bool
|
2016-10-03 12:45:12 -07:00
|
|
|
var lenmem, newlenmem, capmem uintptr
|
2023-01-20 16:41:57 -05:00
|
|
|
// Specialize for common values of et.Size.
|
2018-03-23 15:45:03 -05:00
|
|
|
// For 1 we don't need any division/multiplication.
|
2022-02-04 21:14:13 -08:00
|
|
|
// For goarch.PtrSize, compiler will optimize division/multiplication into a shift by a constant.
|
2018-03-23 15:45:03 -05:00
|
|
|
// For powers of 2, use a variable shift.
|
2024-02-27 21:51:31 +00:00
|
|
|
noscan := !et.Pointers()
|
2018-03-23 15:45:03 -05:00
|
|
|
switch {
|
2023-01-20 16:41:57 -05:00
|
|
|
case et.Size_ == 1:
|
cmd/compile,runtime: redo growslice calling convention
Instead of passing the original length and the new length, pass
the new length and the length increment. Also use the new length
in all the post-growslice calculations so that the original length
is dead and does not need to be spilled/restored around the growslice.
old: growslice(typ, oldPtr, oldLen, oldCap, newLen) (newPtr, newLen, newCap)
new: growslice(oldPtr, newLen, oldCap, inc, typ) (newPtr, newLen, newCap)
where inc = # of elements added = newLen-oldLen
Also move the element type to the end of the call. This makes register
allocation more efficient, as oldPtr and newPtr can often be in the
same register (e.g. AX on amd64) and thus the phi takes no instructions.
Makes the go binary 0.3% smaller.
Change-Id: I7295a60227dbbeecec2bf039eeef2950a72df760
Reviewed-on: https://go-review.googlesource.com/c/go/+/418554
Run-TryBot: Keith Randall <khr@golang.org>
Reviewed-by: Heschi Kreinick <heschi@google.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com>
2022-07-13 20:22:53 -07:00
|
|
|
lenmem = uintptr(oldLen)
|
|
|
|
|
newlenmem = uintptr(newLen)
|
runtime: implement experiment to replace heap bitmap with alloc headers
This change replaces the 1-bit-per-word heap bitmap for most size
classes with allocation headers for objects that contain pointers. The
header consists of a single pointer to a type. All allocations with
headers are treated as implicitly containing one or more instances of
the type in the header.
As the name implies, headers are usually stored as the first word of an
object. There are two additional exceptions to where headers are stored
and how they're used.
Objects smaller than 512 bytes do not have headers. Instead, a heap
bitmap is reserved at the end of spans for objects of this size. A full
word of overhead is too much for these small objects. The bitmap is of
the same format of the old bitmap, minus the noMorePtrs bits which are
unnecessary. All the objects <512 bytes have a bitmap less than a
pointer-word in size, and that was the granularity at which noMorePtrs
could stop scanning early anyway.
Objects that are larger than 32 KiB (which have their own span) have
their headers stored directly in the span, to allow power-of-two-sized
allocations to not spill over into an extra page.
The full implementation is behind GOEXPERIMENT=allocheaders.
The purpose of this change is performance. First and foremost, with
headers we no longer have to unroll pointer/scalar data at allocation
time for most size classes. Small size classes still need some
unrolling, but their bitmaps are small so we can optimize that case
fairly well. Larger objects effectively have their pointer/scalar data
unrolled on-demand from type data, which is much more compactly
represented and results in less TLB pressure. Furthermore, since the
headers are usually right next to the object and where we're about to
start scanning, we get an additional temporal locality benefit in the
data cache when looking up type metadata. The pointer/scalar data is
now effectively unrolled on-demand, but it's also simpler to unroll than
before; that unrolled data is never written anywhere, and for arrays we
get the benefit of retreading the same data per element, as opposed to
looking it up from scratch for each pointer-word of bitmap. Lastly,
because we no longer have a heap bitmap that spans the entire heap,
there's a flat 1.5% memory use reduction. This is balanced slightly by
some objects possibly being bumped up a size class, but most objects are
not tightly optimized to size class sizes so there's some memory to
spare, making the header basically free in those cases.
See the follow-up CL which turns on this experiment by default for
benchmark results. (CL 538217.)
Change-Id: I4c9034ee200650d06d8bdecd579d5f7c1bbf1fc5
Reviewed-on: https://go-review.googlesource.com/c/go/+/437955
Reviewed-by: Cherry Mui <cherryyz@google.com>
Reviewed-by: Keith Randall <khr@golang.org>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2022-09-11 04:07:41 +00:00
|
|
|
capmem = roundupsize(uintptr(newcap), noscan)
|
2018-01-01 21:51:47 -05:00
|
|
|
overflow = uintptr(newcap) > maxAlloc
|
2016-03-13 18:58:17 +01:00
|
|
|
newcap = int(capmem)
|
2023-01-20 16:41:57 -05:00
|
|
|
case et.Size_ == goarch.PtrSize:
|
cmd/compile,runtime: redo growslice calling convention
Instead of passing the original length and the new length, pass
the new length and the length increment. Also use the new length
in all the post-growslice calculations so that the original length
is dead and does not need to be spilled/restored around the growslice.
old: growslice(typ, oldPtr, oldLen, oldCap, newLen) (newPtr, newLen, newCap)
new: growslice(oldPtr, newLen, oldCap, inc, typ) (newPtr, newLen, newCap)
where inc = # of elements added = newLen-oldLen
Also move the element type to the end of the call. This makes register
allocation more efficient, as oldPtr and newPtr can often be in the
same register (e.g. AX on amd64) and thus the phi takes no instructions.
Makes the go binary 0.3% smaller.
Change-Id: I7295a60227dbbeecec2bf039eeef2950a72df760
Reviewed-on: https://go-review.googlesource.com/c/go/+/418554
Run-TryBot: Keith Randall <khr@golang.org>
Reviewed-by: Heschi Kreinick <heschi@google.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com>
2022-07-13 20:22:53 -07:00
|
|
|
lenmem = uintptr(oldLen) * goarch.PtrSize
|
|
|
|
|
newlenmem = uintptr(newLen) * goarch.PtrSize
|
runtime: implement experiment to replace heap bitmap with alloc headers
This change replaces the 1-bit-per-word heap bitmap for most size
classes with allocation headers for objects that contain pointers. The
header consists of a single pointer to a type. All allocations with
headers are treated as implicitly containing one or more instances of
the type in the header.
As the name implies, headers are usually stored as the first word of an
object. There are two additional exceptions to where headers are stored
and how they're used.
Objects smaller than 512 bytes do not have headers. Instead, a heap
bitmap is reserved at the end of spans for objects of this size. A full
word of overhead is too much for these small objects. The bitmap is of
the same format of the old bitmap, minus the noMorePtrs bits which are
unnecessary. All the objects <512 bytes have a bitmap less than a
pointer-word in size, and that was the granularity at which noMorePtrs
could stop scanning early anyway.
Objects that are larger than 32 KiB (which have their own span) have
their headers stored directly in the span, to allow power-of-two-sized
allocations to not spill over into an extra page.
The full implementation is behind GOEXPERIMENT=allocheaders.
The purpose of this change is performance. First and foremost, with
headers we no longer have to unroll pointer/scalar data at allocation
time for most size classes. Small size classes still need some
unrolling, but their bitmaps are small so we can optimize that case
fairly well. Larger objects effectively have their pointer/scalar data
unrolled on-demand from type data, which is much more compactly
represented and results in less TLB pressure. Furthermore, since the
headers are usually right next to the object and where we're about to
start scanning, we get an additional temporal locality benefit in the
data cache when looking up type metadata. The pointer/scalar data is
now effectively unrolled on-demand, but it's also simpler to unroll than
before; that unrolled data is never written anywhere, and for arrays we
get the benefit of retreading the same data per element, as opposed to
looking it up from scratch for each pointer-word of bitmap. Lastly,
because we no longer have a heap bitmap that spans the entire heap,
there's a flat 1.5% memory use reduction. This is balanced slightly by
some objects possibly being bumped up a size class, but most objects are
not tightly optimized to size class sizes so there's some memory to
spare, making the header basically free in those cases.
See the follow-up CL which turns on this experiment by default for
benchmark results. (CL 538217.)
Change-Id: I4c9034ee200650d06d8bdecd579d5f7c1bbf1fc5
Reviewed-on: https://go-review.googlesource.com/c/go/+/437955
Reviewed-by: Cherry Mui <cherryyz@google.com>
Reviewed-by: Keith Randall <khr@golang.org>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2022-09-11 04:07:41 +00:00
|
|
|
capmem = roundupsize(uintptr(newcap)*goarch.PtrSize, noscan)
|
2021-06-16 23:05:44 +00:00
|
|
|
overflow = uintptr(newcap) > maxAlloc/goarch.PtrSize
|
|
|
|
|
newcap = int(capmem / goarch.PtrSize)
|
2023-01-20 16:41:57 -05:00
|
|
|
case isPowerOfTwo(et.Size_):
|
2018-03-23 15:45:03 -05:00
|
|
|
var shift uintptr
|
2021-06-16 23:05:44 +00:00
|
|
|
if goarch.PtrSize == 8 {
|
2018-03-23 15:45:03 -05:00
|
|
|
// Mask shift for better code generation.
|
2023-01-20 16:41:57 -05:00
|
|
|
shift = uintptr(sys.TrailingZeros64(uint64(et.Size_))) & 63
|
2018-03-23 15:45:03 -05:00
|
|
|
} else {
|
2023-01-20 16:41:57 -05:00
|
|
|
shift = uintptr(sys.TrailingZeros32(uint32(et.Size_))) & 31
|
2018-03-23 15:45:03 -05:00
|
|
|
}
|
cmd/compile,runtime: redo growslice calling convention
Instead of passing the original length and the new length, pass
the new length and the length increment. Also use the new length
in all the post-growslice calculations so that the original length
is dead and does not need to be spilled/restored around the growslice.
old: growslice(typ, oldPtr, oldLen, oldCap, newLen) (newPtr, newLen, newCap)
new: growslice(oldPtr, newLen, oldCap, inc, typ) (newPtr, newLen, newCap)
where inc = # of elements added = newLen-oldLen
Also move the element type to the end of the call. This makes register
allocation more efficient, as oldPtr and newPtr can often be in the
same register (e.g. AX on amd64) and thus the phi takes no instructions.
Makes the go binary 0.3% smaller.
Change-Id: I7295a60227dbbeecec2bf039eeef2950a72df760
Reviewed-on: https://go-review.googlesource.com/c/go/+/418554
Run-TryBot: Keith Randall <khr@golang.org>
Reviewed-by: Heschi Kreinick <heschi@google.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com>
2022-07-13 20:22:53 -07:00
|
|
|
lenmem = uintptr(oldLen) << shift
|
|
|
|
|
newlenmem = uintptr(newLen) << shift
|
runtime: implement experiment to replace heap bitmap with alloc headers
This change replaces the 1-bit-per-word heap bitmap for most size
classes with allocation headers for objects that contain pointers. The
header consists of a single pointer to a type. All allocations with
headers are treated as implicitly containing one or more instances of
the type in the header.
As the name implies, headers are usually stored as the first word of an
object. There are two additional exceptions to where headers are stored
and how they're used.
Objects smaller than 512 bytes do not have headers. Instead, a heap
bitmap is reserved at the end of spans for objects of this size. A full
word of overhead is too much for these small objects. The bitmap is of
the same format of the old bitmap, minus the noMorePtrs bits which are
unnecessary. All the objects <512 bytes have a bitmap less than a
pointer-word in size, and that was the granularity at which noMorePtrs
could stop scanning early anyway.
Objects that are larger than 32 KiB (which have their own span) have
their headers stored directly in the span, to allow power-of-two-sized
allocations to not spill over into an extra page.
The full implementation is behind GOEXPERIMENT=allocheaders.
The purpose of this change is performance. First and foremost, with
headers we no longer have to unroll pointer/scalar data at allocation
time for most size classes. Small size classes still need some
unrolling, but their bitmaps are small so we can optimize that case
fairly well. Larger objects effectively have their pointer/scalar data
unrolled on-demand from type data, which is much more compactly
represented and results in less TLB pressure. Furthermore, since the
headers are usually right next to the object and where we're about to
start scanning, we get an additional temporal locality benefit in the
data cache when looking up type metadata. The pointer/scalar data is
now effectively unrolled on-demand, but it's also simpler to unroll than
before; that unrolled data is never written anywhere, and for arrays we
get the benefit of retreading the same data per element, as opposed to
looking it up from scratch for each pointer-word of bitmap. Lastly,
because we no longer have a heap bitmap that spans the entire heap,
there's a flat 1.5% memory use reduction. This is balanced slightly by
some objects possibly being bumped up a size class, but most objects are
not tightly optimized to size class sizes so there's some memory to
spare, making the header basically free in those cases.
See the follow-up CL which turns on this experiment by default for
benchmark results. (CL 538217.)
Change-Id: I4c9034ee200650d06d8bdecd579d5f7c1bbf1fc5
Reviewed-on: https://go-review.googlesource.com/c/go/+/437955
Reviewed-by: Cherry Mui <cherryyz@google.com>
Reviewed-by: Keith Randall <khr@golang.org>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2022-09-11 04:07:41 +00:00
|
|
|
capmem = roundupsize(uintptr(newcap)<<shift, noscan)
|
2018-03-23 15:45:03 -05:00
|
|
|
overflow = uintptr(newcap) > (maxAlloc >> shift)
|
|
|
|
|
newcap = int(capmem >> shift)
|
2022-04-29 13:21:44 -07:00
|
|
|
capmem = uintptr(newcap) << shift
|
2016-03-24 17:42:01 +01:00
|
|
|
default:
|
2023-01-20 16:41:57 -05:00
|
|
|
lenmem = uintptr(oldLen) * et.Size_
|
|
|
|
|
newlenmem = uintptr(newLen) * et.Size_
|
|
|
|
|
capmem, overflow = math.MulUintptr(et.Size_, uintptr(newcap))
|
runtime: implement experiment to replace heap bitmap with alloc headers
This change replaces the 1-bit-per-word heap bitmap for most size
classes with allocation headers for objects that contain pointers. The
header consists of a single pointer to a type. All allocations with
headers are treated as implicitly containing one or more instances of
the type in the header.
As the name implies, headers are usually stored as the first word of an
object. There are two additional exceptions to where headers are stored
and how they're used.
Objects smaller than 512 bytes do not have headers. Instead, a heap
bitmap is reserved at the end of spans for objects of this size. A full
word of overhead is too much for these small objects. The bitmap is of
the same format of the old bitmap, minus the noMorePtrs bits which are
unnecessary. All the objects <512 bytes have a bitmap less than a
pointer-word in size, and that was the granularity at which noMorePtrs
could stop scanning early anyway.
Objects that are larger than 32 KiB (which have their own span) have
their headers stored directly in the span, to allow power-of-two-sized
allocations to not spill over into an extra page.
The full implementation is behind GOEXPERIMENT=allocheaders.
The purpose of this change is performance. First and foremost, with
headers we no longer have to unroll pointer/scalar data at allocation
time for most size classes. Small size classes still need some
unrolling, but their bitmaps are small so we can optimize that case
fairly well. Larger objects effectively have their pointer/scalar data
unrolled on-demand from type data, which is much more compactly
represented and results in less TLB pressure. Furthermore, since the
headers are usually right next to the object and where we're about to
start scanning, we get an additional temporal locality benefit in the
data cache when looking up type metadata. The pointer/scalar data is
now effectively unrolled on-demand, but it's also simpler to unroll than
before; that unrolled data is never written anywhere, and for arrays we
get the benefit of retreading the same data per element, as opposed to
looking it up from scratch for each pointer-word of bitmap. Lastly,
because we no longer have a heap bitmap that spans the entire heap,
there's a flat 1.5% memory use reduction. This is balanced slightly by
some objects possibly being bumped up a size class, but most objects are
not tightly optimized to size class sizes so there's some memory to
spare, making the header basically free in those cases.
See the follow-up CL which turns on this experiment by default for
benchmark results. (CL 538217.)
Change-Id: I4c9034ee200650d06d8bdecd579d5f7c1bbf1fc5
Reviewed-on: https://go-review.googlesource.com/c/go/+/437955
Reviewed-by: Cherry Mui <cherryyz@google.com>
Reviewed-by: Keith Randall <khr@golang.org>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2022-09-11 04:07:41 +00:00
|
|
|
capmem = roundupsize(capmem, noscan)
|
2023-01-20 16:41:57 -05:00
|
|
|
newcap = int(capmem / et.Size_)
|
|
|
|
|
capmem = uintptr(newcap) * et.Size_
|
2016-03-24 17:42:01 +01:00
|
|
|
}
|
|
|
|
|
|
2018-10-22 20:22:55 +02:00
|
|
|
// The check of overflow in addition to capmem > maxAlloc is needed
|
|
|
|
|
// to prevent an overflow which can be used to trigger a segfault
|
|
|
|
|
// on 32bit architectures with this example program:
|
2017-08-12 17:37:13 +02:00
|
|
|
//
|
|
|
|
|
// type T [1<<27 + 1]int64
|
|
|
|
|
//
|
|
|
|
|
// var d T
|
|
|
|
|
// var s []T
|
|
|
|
|
//
|
|
|
|
|
// func main() {
|
|
|
|
|
// s = append(s, d, d, d, d)
|
|
|
|
|
// print(len(s), "\n")
|
|
|
|
|
// }
|
2018-10-22 20:22:55 +02:00
|
|
|
if overflow || capmem > maxAlloc {
|
2022-07-18 11:47:19 -07:00
|
|
|
panic(errorString("growslice: len out of range"))
|
2016-03-13 18:58:17 +01:00
|
|
|
}
|
|
|
|
|
|
2014-07-31 12:43:40 -07:00
|
|
|
var p unsafe.Pointer
|
2024-02-27 21:51:31 +00:00
|
|
|
if !et.Pointers() {
|
2016-04-19 19:35:10 -07:00
|
|
|
p = mallocgc(capmem, nil, false)
|
cmd/compile,runtime: redo growslice calling convention
Instead of passing the original length and the new length, pass
the new length and the length increment. Also use the new length
in all the post-growslice calculations so that the original length
is dead and does not need to be spilled/restored around the growslice.
old: growslice(typ, oldPtr, oldLen, oldCap, newLen) (newPtr, newLen, newCap)
new: growslice(oldPtr, newLen, oldCap, inc, typ) (newPtr, newLen, newCap)
where inc = # of elements added = newLen-oldLen
Also move the element type to the end of the call. This makes register
allocation more efficient, as oldPtr and newPtr can often be in the
same register (e.g. AX on amd64) and thus the phi takes no instructions.
Makes the go binary 0.3% smaller.
Change-Id: I7295a60227dbbeecec2bf039eeef2950a72df760
Reviewed-on: https://go-review.googlesource.com/c/go/+/418554
Run-TryBot: Keith Randall <khr@golang.org>
Reviewed-by: Heschi Kreinick <heschi@google.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com>
2022-07-13 20:22:53 -07:00
|
|
|
// The append() that calls growslice is going to overwrite from oldLen to newLen.
|
2016-10-03 12:45:12 -07:00
|
|
|
// Only clear the part that will not be overwritten.
|
2022-03-02 13:01:48 -08:00
|
|
|
// The reflect_growslice() that calls growslice will manually clear
|
|
|
|
|
// the region not cleared here.
|
2016-10-17 18:41:56 -04:00
|
|
|
memclrNoHeapPointers(add(p, newlenmem), capmem-newlenmem)
|
2014-07-31 12:43:40 -07:00
|
|
|
} else {
|
2015-06-11 16:49:38 +03:00
|
|
|
// Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory.
|
2016-04-19 19:35:10 -07:00
|
|
|
p = mallocgc(capmem, et, true)
|
2019-03-22 14:51:33 -07:00
|
|
|
if lenmem > 0 && writeBarrier.enabled {
|
cmd/compile,runtime: redo growslice calling convention
Instead of passing the original length and the new length, pass
the new length and the length increment. Also use the new length
in all the post-growslice calculations so that the original length
is dead and does not need to be spilled/restored around the growslice.
old: growslice(typ, oldPtr, oldLen, oldCap, newLen) (newPtr, newLen, newCap)
new: growslice(oldPtr, newLen, oldCap, inc, typ) (newPtr, newLen, newCap)
where inc = # of elements added = newLen-oldLen
Also move the element type to the end of the call. This makes register
allocation more efficient, as oldPtr and newPtr can often be in the
same register (e.g. AX on amd64) and thus the phi takes no instructions.
Makes the go binary 0.3% smaller.
Change-Id: I7295a60227dbbeecec2bf039eeef2950a72df760
Reviewed-on: https://go-review.googlesource.com/c/go/+/418554
Run-TryBot: Keith Randall <khr@golang.org>
Reviewed-by: Heschi Kreinick <heschi@google.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com>
2022-07-13 20:22:53 -07:00
|
|
|
// Only shade the pointers in oldPtr since we know the destination slice p
|
2018-06-03 13:00:19 +02:00
|
|
|
// only contains nil pointers because it has been cleared during alloc.
|
2023-11-14 22:05:53 +00:00
|
|
|
//
|
|
|
|
|
// It's safe to pass a type to this function as an optimization because
|
|
|
|
|
// from and to only ever refer to memory representing whole values of
|
|
|
|
|
// type et. See the comment on bulkBarrierPreWrite.
|
|
|
|
|
bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(oldPtr), lenmem-et.Size_+et.PtrBytes, et)
|
2014-12-22 22:42:05 -05:00
|
|
|
}
|
2014-07-31 12:43:40 -07:00
|
|
|
}
|
cmd/compile,runtime: redo growslice calling convention
Instead of passing the original length and the new length, pass
the new length and the length increment. Also use the new length
in all the post-growslice calculations so that the original length
is dead and does not need to be spilled/restored around the growslice.
old: growslice(typ, oldPtr, oldLen, oldCap, newLen) (newPtr, newLen, newCap)
new: growslice(oldPtr, newLen, oldCap, inc, typ) (newPtr, newLen, newCap)
where inc = # of elements added = newLen-oldLen
Also move the element type to the end of the call. This makes register
allocation more efficient, as oldPtr and newPtr can often be in the
same register (e.g. AX on amd64) and thus the phi takes no instructions.
Makes the go binary 0.3% smaller.
Change-Id: I7295a60227dbbeecec2bf039eeef2950a72df760
Reviewed-on: https://go-review.googlesource.com/c/go/+/418554
Run-TryBot: Keith Randall <khr@golang.org>
Reviewed-by: Heschi Kreinick <heschi@google.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com>
2022-07-13 20:22:53 -07:00
|
|
|
memmove(p, oldPtr, lenmem)
|
2014-07-31 12:43:40 -07:00
|
|
|
|
cmd/compile,runtime: redo growslice calling convention
Instead of passing the original length and the new length, pass
the new length and the length increment. Also use the new length
in all the post-growslice calculations so that the original length
is dead and does not need to be spilled/restored around the growslice.
old: growslice(typ, oldPtr, oldLen, oldCap, newLen) (newPtr, newLen, newCap)
new: growslice(oldPtr, newLen, oldCap, inc, typ) (newPtr, newLen, newCap)
where inc = # of elements added = newLen-oldLen
Also move the element type to the end of the call. This makes register
allocation more efficient, as oldPtr and newPtr can often be in the
same register (e.g. AX on amd64) and thus the phi takes no instructions.
Makes the go binary 0.3% smaller.
Change-Id: I7295a60227dbbeecec2bf039eeef2950a72df760
Reviewed-on: https://go-review.googlesource.com/c/go/+/418554
Run-TryBot: Keith Randall <khr@golang.org>
Reviewed-by: Heschi Kreinick <heschi@google.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com>
2022-07-13 20:22:53 -07:00
|
|
|
return slice{p, newLen, newcap}
|
2014-07-31 12:43:40 -07:00
|
|
|
}
|
|
|
|
|
|
2023-05-17 17:33:15 +03:00
|
|
|
// nextslicecap computes the next appropriate slice length.
|
|
|
|
|
func nextslicecap(newLen, oldCap int) int {
|
|
|
|
|
newcap := oldCap
|
|
|
|
|
doublecap := newcap + newcap
|
|
|
|
|
if newLen > doublecap {
|
|
|
|
|
return newLen
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const threshold = 256
|
|
|
|
|
if oldCap < threshold {
|
|
|
|
|
return doublecap
|
|
|
|
|
}
|
|
|
|
|
for {
|
|
|
|
|
// Transition from growing 2x for small slices
|
|
|
|
|
// to growing 1.25x for large slices. This formula
|
|
|
|
|
// gives a smooth-ish transition between the two.
|
|
|
|
|
newcap += (newcap + 3*threshold) >> 2
|
|
|
|
|
|
|
|
|
|
// We need to check `newcap >= newLen` and whether `newcap` overflowed.
|
|
|
|
|
// newLen is guaranteed to be larger than zero, hence
|
|
|
|
|
// when newcap overflows then `uint(newcap) > uint(newLen)`.
|
|
|
|
|
// This allows to check for both with the same comparison.
|
|
|
|
|
if uint(newcap) >= uint(newLen) {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Set newcap to the requested cap when
|
|
|
|
|
// the newcap calculation overflowed.
|
|
|
|
|
if newcap <= 0 {
|
|
|
|
|
return newLen
|
|
|
|
|
}
|
|
|
|
|
return newcap
|
|
|
|
|
}
|
|
|
|
|
|
2022-03-02 13:01:48 -08:00
|
|
|
//go:linkname reflect_growslice reflect.growslice
|
|
|
|
|
func reflect_growslice(et *_type, old slice, num int) slice {
|
|
|
|
|
// Semantically equivalent to slices.Grow, except that the caller
|
|
|
|
|
// is responsible for ensuring that old.len+num > old.cap.
|
|
|
|
|
num -= old.cap - old.len // preserve memory of old[old.len:old.cap]
|
|
|
|
|
new := growslice(old.array, old.cap+num, old.cap, num, et)
|
|
|
|
|
// growslice does not zero out new[old.cap:new.len] since it assumes that
|
|
|
|
|
// the memory will be overwritten by an append() that called growslice.
|
|
|
|
|
// Since the caller of reflect_growslice is not append(),
|
|
|
|
|
// zero out this region before returning the slice to the reflect package.
|
2024-02-27 21:51:31 +00:00
|
|
|
if !et.Pointers() {
|
2023-01-20 16:41:57 -05:00
|
|
|
oldcapmem := uintptr(old.cap) * et.Size_
|
|
|
|
|
newlenmem := uintptr(new.len) * et.Size_
|
2022-03-02 13:01:48 -08:00
|
|
|
memclrNoHeapPointers(add(new.array, oldcapmem), newlenmem-oldcapmem)
|
|
|
|
|
}
|
|
|
|
|
new.len = old.len // preserve the old length
|
|
|
|
|
return new
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-23 15:45:03 -05:00
|
|
|
func isPowerOfTwo(x uintptr) bool {
|
|
|
|
|
return x&(x-1) == 0
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-14 16:30:43 +02:00
|
|
|
// slicecopy is used to copy from a string or slice of pointerless elements into a slice.
|
|
|
|
|
func slicecopy(toPtr unsafe.Pointer, toLen int, fromPtr unsafe.Pointer, fromLen int, width uintptr) int {
|
|
|
|
|
if fromLen == 0 || toLen == 0 {
|
2014-07-31 12:43:40 -07:00
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-14 16:30:43 +02:00
|
|
|
n := fromLen
|
2020-01-31 21:01:55 -08:00
|
|
|
if toLen < n {
|
|
|
|
|
n = toLen
|
2014-07-31 12:43:40 -07:00
|
|
|
}
|
|
|
|
|
|
2014-12-30 12:31:17 -08:00
|
|
|
if width == 0 {
|
|
|
|
|
return n
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-14 16:30:43 +02:00
|
|
|
size := uintptr(n) * width
|
2014-07-31 12:43:40 -07:00
|
|
|
if raceenabled {
|
2017-09-22 15:16:26 -04:00
|
|
|
callerpc := getcallerpc()
|
2021-05-21 13:37:19 -04:00
|
|
|
pc := abi.FuncPCABIInternal(slicecopy)
|
2020-09-14 16:30:43 +02:00
|
|
|
racereadrangepc(fromPtr, size, callerpc, pc)
|
|
|
|
|
racewriterangepc(toPtr, size, callerpc, pc)
|
2014-07-31 12:43:40 -07:00
|
|
|
}
|
2015-10-21 11:04:42 -07:00
|
|
|
if msanenabled {
|
2020-09-14 16:30:43 +02:00
|
|
|
msanread(fromPtr, size)
|
|
|
|
|
msanwrite(toPtr, size)
|
2015-10-21 11:04:42 -07:00
|
|
|
}
|
2021-01-05 17:52:43 +08:00
|
|
|
if asanenabled {
|
|
|
|
|
asanread(fromPtr, size)
|
|
|
|
|
asanwrite(toPtr, size)
|
|
|
|
|
}
|
2014-07-31 12:43:40 -07:00
|
|
|
|
|
|
|
|
if size == 1 { // common case worth about 2x to do here
|
|
|
|
|
// TODO: is this still worth it with new memmove impl?
|
2020-09-14 16:30:43 +02:00
|
|
|
*(*byte)(toPtr) = *(*byte)(fromPtr) // known to be a byte pointer
|
2014-07-31 12:43:40 -07:00
|
|
|
} else {
|
2020-09-14 16:30:43 +02:00
|
|
|
memmove(toPtr, fromPtr, size)
|
2014-07-31 12:43:40 -07:00
|
|
|
}
|
|
|
|
|
return n
|
|
|
|
|
}
|
2022-12-08 03:51:04 -08:00
|
|
|
|
|
|
|
|
//go:linkname bytealg_MakeNoZero internal/bytealg.MakeNoZero
|
|
|
|
|
func bytealg_MakeNoZero(len int) []byte {
|
|
|
|
|
if uintptr(len) > maxAlloc {
|
|
|
|
|
panicmakeslicelen()
|
|
|
|
|
}
|
2024-02-02 15:54:24 +00:00
|
|
|
cap := roundupsize(uintptr(len), true)
|
|
|
|
|
return unsafe.Slice((*byte)(mallocgc(uintptr(cap), nil, false)), cap)[:len]
|
2022-12-08 03:51:04 -08:00
|
|
|
}
|