2014-07-31 12:43:40 -07:00
|
|
|
// Copyright 2009 The Go Authors. All rights reserved.
|
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
|
|
package runtime
|
|
|
|
|
|
|
|
|
|
import (
|
2021-05-21 13:37:19 -04:00
|
|
|
"internal/abi"
|
2018-10-22 20:22:55 +02:00
|
|
|
"runtime/internal/math"
|
2018-03-23 15:45:03 -05:00
|
|
|
"runtime/internal/sys"
|
2021-06-16 23:05:44 +00:00
|
|
|
"internal/goarch"
|
2014-07-31 12:43:40 -07:00
|
|
|
"unsafe"
|
|
|
|
|
)
|
|
|
|
|
|
2015-04-11 10:01:54 +12:00
|
|
|
type slice struct {
|
2014-07-31 12:43:40 -07:00
|
|
|
array unsafe.Pointer
|
|
|
|
|
len int
|
|
|
|
|
cap int
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-15 19:49:30 +00:00
|
|
|
// A notInHeapSlice is a slice backed by go:notinheap memory.
|
2017-10-25 13:46:54 -04:00
|
|
|
type notInHeapSlice struct {
|
|
|
|
|
array *notInHeap
|
|
|
|
|
len int
|
|
|
|
|
cap int
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-26 18:30:11 +02:00
|
|
|
func panicmakeslicelen() {
|
|
|
|
|
panic(errorString("makeslice: len out of range"))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func panicmakeslicecap() {
|
|
|
|
|
panic(errorString("makeslice: cap out of range"))
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-23 13:50:07 +02:00
|
|
|
// makeslicecopy allocates a slice of "tolen" elements of type "et",
|
|
|
|
|
// then copies "fromlen" elements of type "et" into that new allocation from "from".
|
|
|
|
|
func makeslicecopy(et *_type, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer {
|
|
|
|
|
var tomem, copymem uintptr
|
|
|
|
|
if uintptr(tolen) > uintptr(fromlen) {
|
|
|
|
|
var overflow bool
|
|
|
|
|
tomem, overflow = math.MulUintptr(et.size, uintptr(tolen))
|
|
|
|
|
if overflow || tomem > maxAlloc || tolen < 0 {
|
|
|
|
|
panicmakeslicelen()
|
|
|
|
|
}
|
|
|
|
|
copymem = et.size * uintptr(fromlen)
|
|
|
|
|
} else {
|
|
|
|
|
// fromlen is a known good length providing and equal or greater than tolen,
|
|
|
|
|
// thereby making tolen a good slice length too as from and to slices have the
|
|
|
|
|
// same element width.
|
|
|
|
|
tomem = et.size * uintptr(tolen)
|
|
|
|
|
copymem = tomem
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var to unsafe.Pointer
|
|
|
|
|
if et.ptrdata == 0 {
|
|
|
|
|
to = mallocgc(tomem, nil, false)
|
|
|
|
|
if copymem < tomem {
|
|
|
|
|
memclrNoHeapPointers(add(to, copymem), tomem-copymem)
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
// Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory.
|
|
|
|
|
to = mallocgc(tomem, et, true)
|
2020-05-07 23:43:22 +02:00
|
|
|
if copymem > 0 && writeBarrier.enabled {
|
2018-10-23 13:50:07 +02:00
|
|
|
// Only shade the pointers in old.array since we know the destination slice to
|
|
|
|
|
// only contains nil pointers because it has been cleared during alloc.
|
|
|
|
|
bulkBarrierPreWriteSrcOnly(uintptr(to), uintptr(from), copymem)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if raceenabled {
|
|
|
|
|
callerpc := getcallerpc()
|
2021-05-21 13:37:19 -04:00
|
|
|
pc := abi.FuncPCABIInternal(makeslicecopy)
|
2018-10-23 13:50:07 +02:00
|
|
|
racereadrangepc(from, copymem, callerpc, pc)
|
|
|
|
|
}
|
|
|
|
|
if msanenabled {
|
|
|
|
|
msanread(from, copymem)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
memmove(to, from, copymem)
|
|
|
|
|
|
|
|
|
|
return to
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-14 22:28:58 +02:00
|
|
|
func makeslice(et *_type, len, cap int) unsafe.Pointer {
|
2018-10-16 00:27:42 +02:00
|
|
|
mem, overflow := math.MulUintptr(et.size, uintptr(cap))
|
|
|
|
|
if overflow || mem > maxAlloc || len < 0 || len > cap {
|
|
|
|
|
// NOTE: Produce a 'len out of range' error instead of a
|
|
|
|
|
// 'cap out of range' error when someone does make([]T, bignumber).
|
|
|
|
|
// 'cap out of range' is true too, but since the cap is only being
|
|
|
|
|
// supplied implicitly, saying len is clearer.
|
|
|
|
|
// See golang.org/issue/4085.
|
|
|
|
|
mem, overflow := math.MulUintptr(et.size, uintptr(len))
|
|
|
|
|
if overflow || mem > maxAlloc || len < 0 {
|
|
|
|
|
panicmakeslicelen()
|
|
|
|
|
}
|
2018-04-26 18:30:11 +02:00
|
|
|
panicmakeslicecap()
|
2014-07-31 12:43:40 -07:00
|
|
|
}
|
2016-04-10 17:32:35 +02:00
|
|
|
|
2018-10-14 22:28:58 +02:00
|
|
|
return mallocgc(mem, et, true)
|
2014-07-31 12:43:40 -07:00
|
|
|
}
|
|
|
|
|
|
2018-10-14 22:28:58 +02:00
|
|
|
func makeslice64(et *_type, len64, cap64 int64) unsafe.Pointer {
|
2016-08-25 14:17:52 +02:00
|
|
|
len := int(len64)
|
|
|
|
|
if int64(len) != len64 {
|
2018-04-26 18:30:11 +02:00
|
|
|
panicmakeslicelen()
|
2016-08-25 14:17:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cap := int(cap64)
|
|
|
|
|
if int64(cap) != cap64 {
|
2018-04-26 18:30:11 +02:00
|
|
|
panicmakeslicecap()
|
2016-08-25 14:17:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return makeslice(et, len, cap)
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-21 02:11:15 -07:00
|
|
|
func unsafeslice(et *_type, len int) {
|
|
|
|
|
mem, overflow := math.MulUintptr(et.size, uintptr(len))
|
|
|
|
|
if overflow || mem > maxAlloc || len < 0 {
|
|
|
|
|
panicunsafeslicelen()
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func unsafeslice64(et *_type, len64 int64) {
|
|
|
|
|
len := int(len64)
|
|
|
|
|
if int64(len) != len64 {
|
|
|
|
|
panicunsafeslicelen()
|
|
|
|
|
}
|
|
|
|
|
unsafeslice(et, len)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func panicunsafeslicelen() {
|
|
|
|
|
panic(errorString("unsafe.Slice: len out of range"))
|
|
|
|
|
}
|
|
|
|
|
|
2015-06-25 19:27:20 -04:00
|
|
|
// growslice handles slice growth during append.
|
2016-04-19 15:38:59 -07:00
|
|
|
// It is passed the slice element type, the old slice, and the desired new minimum capacity,
|
2015-06-25 19:27:20 -04:00
|
|
|
// and it returns a new slice with at least that capacity, with the old data
|
|
|
|
|
// copied into it.
|
cmd/compile: avoid a spill in append fast path
Instead of spilling newlen, recalculate it.
This removes a spill from the fast path,
at the cost of a cheap recalculation
on the (rare) growth path.
This uses 8 bytes less of stack space.
It generates two more bytes of code,
but that is due to suboptimal register allocation;
see far below.
Runtime append microbenchmarks are all over the map,
presumably due to incidental code movement.
Sample code:
func s(b []byte) []byte {
b = append(b, 1, 2, 3)
return b
}
Before:
"".s t=1 size=160 args=0x30 locals=0x48
0x0000 00000 (append.go:8) TEXT "".s(SB), $72-48
0x0000 00000 (append.go:8) MOVQ (TLS), CX
0x0009 00009 (append.go:8) CMPQ SP, 16(CX)
0x000d 00013 (append.go:8) JLS 149
0x0013 00019 (append.go:8) SUBQ $72, SP
0x0017 00023 (append.go:8) FUNCDATA $0, gclocals·6432f8c6a0d23fa7bee6c5d96f21a92a(SB)
0x0017 00023 (append.go:8) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:9) MOVQ "".b+88(FP), CX
0x001c 00028 (append.go:9) LEAQ 3(CX), DX
0x0020 00032 (append.go:9) MOVQ DX, "".autotmp_0+64(SP)
0x0025 00037 (append.go:9) MOVQ "".b+96(FP), BX
0x002a 00042 (append.go:9) CMPQ DX, BX
0x002d 00045 (append.go:9) JGT $0, 86
0x002f 00047 (append.go:8) MOVQ "".b+80(FP), AX
0x0034 00052 (append.go:9) MOVB $1, (AX)(CX*1)
0x0038 00056 (append.go:9) MOVB $2, 1(AX)(CX*1)
0x003d 00061 (append.go:9) MOVB $3, 2(AX)(CX*1)
0x0042 00066 (append.go:10) MOVQ AX, "".~r1+104(FP)
0x0047 00071 (append.go:10) MOVQ DX, "".~r1+112(FP)
0x004c 00076 (append.go:10) MOVQ BX, "".~r1+120(FP)
0x0051 00081 (append.go:10) ADDQ $72, SP
0x0055 00085 (append.go:10) RET
0x0056 00086 (append.go:9) LEAQ type.[]uint8(SB), AX
0x005d 00093 (append.go:9) MOVQ AX, (SP)
0x0061 00097 (append.go:9) MOVQ "".b+80(FP), BP
0x0066 00102 (append.go:9) MOVQ BP, 8(SP)
0x006b 00107 (append.go:9) MOVQ CX, 16(SP)
0x0070 00112 (append.go:9) MOVQ BX, 24(SP)
0x0075 00117 (append.go:9) MOVQ DX, 32(SP)
0x007a 00122 (append.go:9) PCDATA $0, $0
0x007a 00122 (append.go:9) CALL runtime.growslice(SB)
0x007f 00127 (append.go:9) MOVQ 40(SP), AX
0x0084 00132 (append.go:9) MOVQ 56(SP), BX
0x0089 00137 (append.go:8) MOVQ "".b+88(FP), CX
0x008e 00142 (append.go:9) MOVQ "".autotmp_0+64(SP), DX
0x0093 00147 (append.go:9) JMP 52
0x0095 00149 (append.go:9) NOP
0x0095 00149 (append.go:8) CALL runtime.morestack_noctxt(SB)
0x009a 00154 (append.go:8) JMP 0
After:
"".s t=1 size=176 args=0x30 locals=0x40
0x0000 00000 (append.go:8) TEXT "".s(SB), $64-48
0x0000 00000 (append.go:8) MOVQ (TLS), CX
0x0009 00009 (append.go:8) CMPQ SP, 16(CX)
0x000d 00013 (append.go:8) JLS 151
0x0013 00019 (append.go:8) SUBQ $64, SP
0x0017 00023 (append.go:8) FUNCDATA $0, gclocals·6432f8c6a0d23fa7bee6c5d96f21a92a(SB)
0x0017 00023 (append.go:8) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:9) MOVQ "".b+80(FP), CX
0x001c 00028 (append.go:9) LEAQ 3(CX), DX
0x0020 00032 (append.go:9) MOVQ "".b+88(FP), BX
0x0025 00037 (append.go:9) CMPQ DX, BX
0x0028 00040 (append.go:9) JGT $0, 81
0x002a 00042 (append.go:8) MOVQ "".b+72(FP), AX
0x002f 00047 (append.go:9) MOVB $1, (AX)(CX*1)
0x0033 00051 (append.go:9) MOVB $2, 1(AX)(CX*1)
0x0038 00056 (append.go:9) MOVB $3, 2(AX)(CX*1)
0x003d 00061 (append.go:10) MOVQ AX, "".~r1+96(FP)
0x0042 00066 (append.go:10) MOVQ DX, "".~r1+104(FP)
0x0047 00071 (append.go:10) MOVQ BX, "".~r1+112(FP)
0x004c 00076 (append.go:10) ADDQ $64, SP
0x0050 00080 (append.go:10) RET
0x0051 00081 (append.go:9) LEAQ type.[]uint8(SB), AX
0x0058 00088 (append.go:9) MOVQ AX, (SP)
0x005c 00092 (append.go:9) MOVQ "".b+72(FP), BP
0x0061 00097 (append.go:9) MOVQ BP, 8(SP)
0x0066 00102 (append.go:9) MOVQ CX, 16(SP)
0x006b 00107 (append.go:9) MOVQ BX, 24(SP)
0x0070 00112 (append.go:9) MOVQ DX, 32(SP)
0x0075 00117 (append.go:9) PCDATA $0, $0
0x0075 00117 (append.go:9) CALL runtime.growslice(SB)
0x007a 00122 (append.go:9) MOVQ 40(SP), AX
0x007f 00127 (append.go:9) MOVQ 48(SP), CX
0x0084 00132 (append.go:9) MOVQ 56(SP), BX
0x0089 00137 (append.go:9) ADDQ $3, CX
0x008d 00141 (append.go:9) MOVQ CX, DX
0x0090 00144 (append.go:8) MOVQ "".b+80(FP), CX
0x0095 00149 (append.go:9) JMP 47
0x0097 00151 (append.go:9) NOP
0x0097 00151 (append.go:8) CALL runtime.morestack_noctxt(SB)
0x009c 00156 (append.go:8) JMP 0
Observe that in the following sequence,
we should use DX directly instead of using
CX as a temporary register, which would make
the new code a strict improvement on the old:
0x007f 00127 (append.go:9) MOVQ 48(SP), CX
0x0084 00132 (append.go:9) MOVQ 56(SP), BX
0x0089 00137 (append.go:9) ADDQ $3, CX
0x008d 00141 (append.go:9) MOVQ CX, DX
0x0090 00144 (append.go:8) MOVQ "".b+80(FP), CX
Change-Id: I4ee50b18fa53865901d2d7f86c2cbb54c6fa6924
Reviewed-on: https://go-review.googlesource.com/21812
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-04-10 09:08:00 -07:00
|
|
|
// The new slice's length is set to the old slice's length,
|
|
|
|
|
// NOT to the new requested capacity.
|
|
|
|
|
// This is for codegen convenience. The old slice's length is used immediately
|
|
|
|
|
// to calculate where to write new values during an append.
|
|
|
|
|
// TODO: When the old backend is gone, reconsider this decision.
|
|
|
|
|
// The SSA backend might prefer the new length or to return only ptr/cap and save stack space.
|
2016-04-19 15:38:59 -07:00
|
|
|
func growslice(et *_type, old slice, cap int) slice {
|
2014-07-31 12:43:40 -07:00
|
|
|
if raceenabled {
|
2017-09-22 15:16:26 -04:00
|
|
|
callerpc := getcallerpc()
|
2021-05-21 13:37:19 -04:00
|
|
|
racereadrangepc(old.array, uintptr(old.len*int(et.size)), callerpc, abi.FuncPCABIInternal(growslice))
|
2014-07-31 12:43:40 -07:00
|
|
|
}
|
2015-10-21 11:04:42 -07:00
|
|
|
if msanenabled {
|
2016-04-19 15:38:59 -07:00
|
|
|
msanread(old.array, uintptr(old.len*int(et.size)))
|
2015-10-21 11:04:42 -07:00
|
|
|
}
|
2014-07-31 12:43:40 -07:00
|
|
|
|
2018-10-22 20:22:55 +02:00
|
|
|
if cap < old.cap {
|
|
|
|
|
panic(errorString("growslice: cap out of range"))
|
|
|
|
|
}
|
|
|
|
|
|
2014-07-31 12:43:40 -07:00
|
|
|
if et.size == 0 {
|
2015-03-11 12:07:50 -04:00
|
|
|
// append should not create a slice with nil pointer but non-zero len.
|
|
|
|
|
// We assume that append doesn't need to preserve old.array in this case.
|
2015-04-11 10:01:54 +12:00
|
|
|
return slice{unsafe.Pointer(&zerobase), old.len, cap}
|
2014-07-31 12:43:40 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
newcap := old.cap
|
2016-03-24 01:55:42 +01:00
|
|
|
doublecap := newcap + newcap
|
|
|
|
|
if cap > doublecap {
|
2014-07-31 12:43:40 -07:00
|
|
|
newcap = cap
|
|
|
|
|
} else {
|
2020-09-24 19:26:33 -07:00
|
|
|
if old.cap < 1024 {
|
2016-03-24 01:55:42 +01:00
|
|
|
newcap = doublecap
|
|
|
|
|
} else {
|
2017-08-23 09:05:29 +02:00
|
|
|
// Check 0 < newcap to detect overflow
|
|
|
|
|
// and prevent an infinite loop.
|
|
|
|
|
for 0 < newcap && newcap < cap {
|
2014-07-31 12:43:40 -07:00
|
|
|
newcap += newcap / 4
|
|
|
|
|
}
|
2017-08-23 09:05:29 +02:00
|
|
|
// Set newcap to the requested cap when
|
|
|
|
|
// the newcap calculation overflowed.
|
|
|
|
|
if newcap <= 0 {
|
|
|
|
|
newcap = cap
|
|
|
|
|
}
|
2014-07-31 12:43:40 -07:00
|
|
|
}
|
|
|
|
|
}
|
2016-03-13 18:58:17 +01:00
|
|
|
|
2017-08-12 17:37:13 +02:00
|
|
|
var overflow bool
|
2016-10-03 12:45:12 -07:00
|
|
|
var lenmem, newlenmem, capmem uintptr
|
2018-03-23 15:45:03 -05:00
|
|
|
// Specialize for common values of et.size.
|
|
|
|
|
// For 1 we don't need any division/multiplication.
|
2018-04-20 10:22:54 -07:00
|
|
|
// For sys.PtrSize, compiler will optimize division/multiplication into a shift by a constant.
|
2018-03-23 15:45:03 -05:00
|
|
|
// For powers of 2, use a variable shift.
|
|
|
|
|
switch {
|
|
|
|
|
case et.size == 1:
|
2016-03-24 17:42:01 +01:00
|
|
|
lenmem = uintptr(old.len)
|
2016-10-03 12:45:12 -07:00
|
|
|
newlenmem = uintptr(cap)
|
2016-03-24 17:42:01 +01:00
|
|
|
capmem = roundupsize(uintptr(newcap))
|
2018-01-01 21:51:47 -05:00
|
|
|
overflow = uintptr(newcap) > maxAlloc
|
2016-03-13 18:58:17 +01:00
|
|
|
newcap = int(capmem)
|
2021-06-16 23:05:44 +00:00
|
|
|
case et.size == goarch.PtrSize:
|
|
|
|
|
lenmem = uintptr(old.len) * goarch.PtrSize
|
|
|
|
|
newlenmem = uintptr(cap) * goarch.PtrSize
|
|
|
|
|
capmem = roundupsize(uintptr(newcap) * goarch.PtrSize)
|
|
|
|
|
overflow = uintptr(newcap) > maxAlloc/goarch.PtrSize
|
|
|
|
|
newcap = int(capmem / goarch.PtrSize)
|
2018-03-23 15:45:03 -05:00
|
|
|
case isPowerOfTwo(et.size):
|
|
|
|
|
var shift uintptr
|
2021-06-16 23:05:44 +00:00
|
|
|
if goarch.PtrSize == 8 {
|
2018-03-23 15:45:03 -05:00
|
|
|
// Mask shift for better code generation.
|
|
|
|
|
shift = uintptr(sys.Ctz64(uint64(et.size))) & 63
|
|
|
|
|
} else {
|
|
|
|
|
shift = uintptr(sys.Ctz32(uint32(et.size))) & 31
|
|
|
|
|
}
|
|
|
|
|
lenmem = uintptr(old.len) << shift
|
|
|
|
|
newlenmem = uintptr(cap) << shift
|
|
|
|
|
capmem = roundupsize(uintptr(newcap) << shift)
|
|
|
|
|
overflow = uintptr(newcap) > (maxAlloc >> shift)
|
|
|
|
|
newcap = int(capmem >> shift)
|
2016-03-24 17:42:01 +01:00
|
|
|
default:
|
|
|
|
|
lenmem = uintptr(old.len) * et.size
|
2016-10-03 12:45:12 -07:00
|
|
|
newlenmem = uintptr(cap) * et.size
|
2018-10-22 20:22:55 +02:00
|
|
|
capmem, overflow = math.MulUintptr(et.size, uintptr(newcap))
|
|
|
|
|
capmem = roundupsize(capmem)
|
2016-03-13 18:58:17 +01:00
|
|
|
newcap = int(capmem / et.size)
|
2016-03-24 17:42:01 +01:00
|
|
|
}
|
|
|
|
|
|
2018-10-22 20:22:55 +02:00
|
|
|
// The check of overflow in addition to capmem > maxAlloc is needed
|
|
|
|
|
// to prevent an overflow which can be used to trigger a segfault
|
|
|
|
|
// on 32bit architectures with this example program:
|
2017-08-12 17:37:13 +02:00
|
|
|
//
|
|
|
|
|
// type T [1<<27 + 1]int64
|
|
|
|
|
//
|
|
|
|
|
// var d T
|
|
|
|
|
// var s []T
|
|
|
|
|
//
|
|
|
|
|
// func main() {
|
|
|
|
|
// s = append(s, d, d, d, d)
|
|
|
|
|
// print(len(s), "\n")
|
|
|
|
|
// }
|
2018-10-22 20:22:55 +02:00
|
|
|
if overflow || capmem > maxAlloc {
|
2016-03-24 17:42:01 +01:00
|
|
|
panic(errorString("growslice: cap out of range"))
|
2016-03-13 18:58:17 +01:00
|
|
|
}
|
|
|
|
|
|
2014-07-31 12:43:40 -07:00
|
|
|
var p unsafe.Pointer
|
2019-03-25 12:34:27 -07:00
|
|
|
if et.ptrdata == 0 {
|
2016-04-19 19:35:10 -07:00
|
|
|
p = mallocgc(capmem, nil, false)
|
2016-10-03 12:45:12 -07:00
|
|
|
// The append() that calls growslice is going to overwrite from old.len to cap (which will be the new length).
|
|
|
|
|
// Only clear the part that will not be overwritten.
|
2016-10-17 18:41:56 -04:00
|
|
|
memclrNoHeapPointers(add(p, newlenmem), capmem-newlenmem)
|
2014-07-31 12:43:40 -07:00
|
|
|
} else {
|
2015-06-11 16:49:38 +03:00
|
|
|
// Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory.
|
2016-04-19 19:35:10 -07:00
|
|
|
p = mallocgc(capmem, et, true)
|
2019-03-22 14:51:33 -07:00
|
|
|
if lenmem > 0 && writeBarrier.enabled {
|
2018-06-03 13:00:19 +02:00
|
|
|
// Only shade the pointers in old.array since we know the destination slice p
|
|
|
|
|
// only contains nil pointers because it has been cleared during alloc.
|
2020-04-02 17:14:25 -07:00
|
|
|
bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(old.array), lenmem-et.size+et.ptrdata)
|
2014-12-22 22:42:05 -05:00
|
|
|
}
|
2014-07-31 12:43:40 -07:00
|
|
|
}
|
2018-06-01 14:30:49 +02:00
|
|
|
memmove(p, old.array, lenmem)
|
2014-07-31 12:43:40 -07:00
|
|
|
|
2015-04-11 10:01:54 +12:00
|
|
|
return slice{p, old.len, newcap}
|
2014-07-31 12:43:40 -07:00
|
|
|
}
|
|
|
|
|
|
2018-03-23 15:45:03 -05:00
|
|
|
func isPowerOfTwo(x uintptr) bool {
|
|
|
|
|
return x&(x-1) == 0
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-14 16:30:43 +02:00
|
|
|
// slicecopy is used to copy from a string or slice of pointerless elements into a slice.
|
|
|
|
|
func slicecopy(toPtr unsafe.Pointer, toLen int, fromPtr unsafe.Pointer, fromLen int, width uintptr) int {
|
|
|
|
|
if fromLen == 0 || toLen == 0 {
|
2014-07-31 12:43:40 -07:00
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-14 16:30:43 +02:00
|
|
|
n := fromLen
|
2020-01-31 21:01:55 -08:00
|
|
|
if toLen < n {
|
|
|
|
|
n = toLen
|
2014-07-31 12:43:40 -07:00
|
|
|
}
|
|
|
|
|
|
2014-12-30 12:31:17 -08:00
|
|
|
if width == 0 {
|
|
|
|
|
return n
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-14 16:30:43 +02:00
|
|
|
size := uintptr(n) * width
|
2014-07-31 12:43:40 -07:00
|
|
|
if raceenabled {
|
2017-09-22 15:16:26 -04:00
|
|
|
callerpc := getcallerpc()
|
2021-05-21 13:37:19 -04:00
|
|
|
pc := abi.FuncPCABIInternal(slicecopy)
|
2020-09-14 16:30:43 +02:00
|
|
|
racereadrangepc(fromPtr, size, callerpc, pc)
|
|
|
|
|
racewriterangepc(toPtr, size, callerpc, pc)
|
2014-07-31 12:43:40 -07:00
|
|
|
}
|
2015-10-21 11:04:42 -07:00
|
|
|
if msanenabled {
|
2020-09-14 16:30:43 +02:00
|
|
|
msanread(fromPtr, size)
|
|
|
|
|
msanwrite(toPtr, size)
|
2015-10-21 11:04:42 -07:00
|
|
|
}
|
2014-07-31 12:43:40 -07:00
|
|
|
|
|
|
|
|
if size == 1 { // common case worth about 2x to do here
|
|
|
|
|
// TODO: is this still worth it with new memmove impl?
|
2020-09-14 16:30:43 +02:00
|
|
|
*(*byte)(toPtr) = *(*byte)(fromPtr) // known to be a byte pointer
|
2014-07-31 12:43:40 -07:00
|
|
|
} else {
|
2020-09-14 16:30:43 +02:00
|
|
|
memmove(toPtr, fromPtr, size)
|
2014-07-31 12:43:40 -07:00
|
|
|
}
|
|
|
|
|
return n
|
|
|
|
|
}
|