mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
internal/abi: refactor (basic) type struct into one definition
This touches a lot of files, which is bad, but it is also good, since there's N copies of this information commoned into 1. The new files in internal/abi are copied from the end of the stack; ultimately this will all end up being used. Change-Id: Ia252c0055aaa72ca569411ef9f9e96e3d610889e Reviewed-on: https://go-review.googlesource.com/c/go/+/462995 TryBot-Result: Gopher Robot <gobot@golang.org> Reviewed-by: Carlos Amedee <carlos@golang.org> Run-TryBot: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@golang.org>
This commit is contained in:
parent
dace96b9a1
commit
bdc6ae579a
46 changed files with 1479 additions and 711 deletions
|
|
@ -39,21 +39,21 @@ func makeslicecopy(et *_type, tolen int, fromlen int, from unsafe.Pointer) unsaf
|
|||
var tomem, copymem uintptr
|
||||
if uintptr(tolen) > uintptr(fromlen) {
|
||||
var overflow bool
|
||||
tomem, overflow = math.MulUintptr(et.size, uintptr(tolen))
|
||||
tomem, overflow = math.MulUintptr(et.Size_, uintptr(tolen))
|
||||
if overflow || tomem > maxAlloc || tolen < 0 {
|
||||
panicmakeslicelen()
|
||||
}
|
||||
copymem = et.size * uintptr(fromlen)
|
||||
copymem = et.Size_ * uintptr(fromlen)
|
||||
} else {
|
||||
// fromlen is a known good length providing and equal or greater than tolen,
|
||||
// thereby making tolen a good slice length too as from and to slices have the
|
||||
// same element width.
|
||||
tomem = et.size * uintptr(tolen)
|
||||
tomem = et.Size_ * uintptr(tolen)
|
||||
copymem = tomem
|
||||
}
|
||||
|
||||
var to unsafe.Pointer
|
||||
if et.ptrdata == 0 {
|
||||
if et.PtrBytes == 0 {
|
||||
to = mallocgc(tomem, nil, false)
|
||||
if copymem < tomem {
|
||||
memclrNoHeapPointers(add(to, copymem), tomem-copymem)
|
||||
|
|
@ -86,14 +86,14 @@ func makeslicecopy(et *_type, tolen int, fromlen int, from unsafe.Pointer) unsaf
|
|||
}
|
||||
|
||||
func makeslice(et *_type, len, cap int) unsafe.Pointer {
|
||||
mem, overflow := math.MulUintptr(et.size, uintptr(cap))
|
||||
mem, overflow := math.MulUintptr(et.Size_, uintptr(cap))
|
||||
if overflow || mem > maxAlloc || len < 0 || len > cap {
|
||||
// NOTE: Produce a 'len out of range' error instead of a
|
||||
// 'cap out of range' error when someone does make([]T, bignumber).
|
||||
// 'cap out of range' is true too, but since the cap is only being
|
||||
// supplied implicitly, saying len is clearer.
|
||||
// See golang.org/issue/4085.
|
||||
mem, overflow := math.MulUintptr(et.size, uintptr(len))
|
||||
mem, overflow := math.MulUintptr(et.Size_, uintptr(len))
|
||||
if overflow || mem > maxAlloc || len < 0 {
|
||||
panicmakeslicelen()
|
||||
}
|
||||
|
|
@ -158,20 +158,20 @@ func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice
|
|||
oldLen := newLen - num
|
||||
if raceenabled {
|
||||
callerpc := getcallerpc()
|
||||
racereadrangepc(oldPtr, uintptr(oldLen*int(et.size)), callerpc, abi.FuncPCABIInternal(growslice))
|
||||
racereadrangepc(oldPtr, uintptr(oldLen*int(et.Size_)), callerpc, abi.FuncPCABIInternal(growslice))
|
||||
}
|
||||
if msanenabled {
|
||||
msanread(oldPtr, uintptr(oldLen*int(et.size)))
|
||||
msanread(oldPtr, uintptr(oldLen*int(et.Size_)))
|
||||
}
|
||||
if asanenabled {
|
||||
asanread(oldPtr, uintptr(oldLen*int(et.size)))
|
||||
asanread(oldPtr, uintptr(oldLen*int(et.Size_)))
|
||||
}
|
||||
|
||||
if newLen < 0 {
|
||||
panic(errorString("growslice: len out of range"))
|
||||
}
|
||||
|
||||
if et.size == 0 {
|
||||
if et.Size_ == 0 {
|
||||
// append should not create a slice with nil pointer but non-zero len.
|
||||
// We assume that append doesn't need to preserve oldPtr in this case.
|
||||
return slice{unsafe.Pointer(&zerobase), newLen, newLen}
|
||||
|
|
@ -204,30 +204,30 @@ func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice
|
|||
|
||||
var overflow bool
|
||||
var lenmem, newlenmem, capmem uintptr
|
||||
// Specialize for common values of et.size.
|
||||
// Specialize for common values of et.Size.
|
||||
// For 1 we don't need any division/multiplication.
|
||||
// For goarch.PtrSize, compiler will optimize division/multiplication into a shift by a constant.
|
||||
// For powers of 2, use a variable shift.
|
||||
switch {
|
||||
case et.size == 1:
|
||||
case et.Size_ == 1:
|
||||
lenmem = uintptr(oldLen)
|
||||
newlenmem = uintptr(newLen)
|
||||
capmem = roundupsize(uintptr(newcap))
|
||||
overflow = uintptr(newcap) > maxAlloc
|
||||
newcap = int(capmem)
|
||||
case et.size == goarch.PtrSize:
|
||||
case et.Size_ == goarch.PtrSize:
|
||||
lenmem = uintptr(oldLen) * goarch.PtrSize
|
||||
newlenmem = uintptr(newLen) * goarch.PtrSize
|
||||
capmem = roundupsize(uintptr(newcap) * goarch.PtrSize)
|
||||
overflow = uintptr(newcap) > maxAlloc/goarch.PtrSize
|
||||
newcap = int(capmem / goarch.PtrSize)
|
||||
case isPowerOfTwo(et.size):
|
||||
case isPowerOfTwo(et.Size_):
|
||||
var shift uintptr
|
||||
if goarch.PtrSize == 8 {
|
||||
// Mask shift for better code generation.
|
||||
shift = uintptr(sys.TrailingZeros64(uint64(et.size))) & 63
|
||||
shift = uintptr(sys.TrailingZeros64(uint64(et.Size_))) & 63
|
||||
} else {
|
||||
shift = uintptr(sys.TrailingZeros32(uint32(et.size))) & 31
|
||||
shift = uintptr(sys.TrailingZeros32(uint32(et.Size_))) & 31
|
||||
}
|
||||
lenmem = uintptr(oldLen) << shift
|
||||
newlenmem = uintptr(newLen) << shift
|
||||
|
|
@ -236,12 +236,12 @@ func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice
|
|||
newcap = int(capmem >> shift)
|
||||
capmem = uintptr(newcap) << shift
|
||||
default:
|
||||
lenmem = uintptr(oldLen) * et.size
|
||||
newlenmem = uintptr(newLen) * et.size
|
||||
capmem, overflow = math.MulUintptr(et.size, uintptr(newcap))
|
||||
lenmem = uintptr(oldLen) * et.Size_
|
||||
newlenmem = uintptr(newLen) * et.Size_
|
||||
capmem, overflow = math.MulUintptr(et.Size_, uintptr(newcap))
|
||||
capmem = roundupsize(capmem)
|
||||
newcap = int(capmem / et.size)
|
||||
capmem = uintptr(newcap) * et.size
|
||||
newcap = int(capmem / et.Size_)
|
||||
capmem = uintptr(newcap) * et.Size_
|
||||
}
|
||||
|
||||
// The check of overflow in addition to capmem > maxAlloc is needed
|
||||
|
|
@ -262,7 +262,7 @@ func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice
|
|||
}
|
||||
|
||||
var p unsafe.Pointer
|
||||
if et.ptrdata == 0 {
|
||||
if et.PtrBytes == 0 {
|
||||
p = mallocgc(capmem, nil, false)
|
||||
// The append() that calls growslice is going to overwrite from oldLen to newLen.
|
||||
// Only clear the part that will not be overwritten.
|
||||
|
|
@ -275,7 +275,7 @@ func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice
|
|||
if lenmem > 0 && writeBarrier.enabled {
|
||||
// Only shade the pointers in oldPtr since we know the destination slice p
|
||||
// only contains nil pointers because it has been cleared during alloc.
|
||||
bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(oldPtr), lenmem-et.size+et.ptrdata)
|
||||
bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(oldPtr), lenmem-et.Size_+et.PtrBytes)
|
||||
}
|
||||
}
|
||||
memmove(p, oldPtr, lenmem)
|
||||
|
|
@ -293,9 +293,9 @@ func reflect_growslice(et *_type, old slice, num int) slice {
|
|||
// the memory will be overwritten by an append() that called growslice.
|
||||
// Since the caller of reflect_growslice is not append(),
|
||||
// zero out this region before returning the slice to the reflect package.
|
||||
if et.ptrdata == 0 {
|
||||
oldcapmem := uintptr(old.cap) * et.size
|
||||
newlenmem := uintptr(new.len) * et.size
|
||||
if et.PtrBytes == 0 {
|
||||
oldcapmem := uintptr(old.cap) * et.Size_
|
||||
newlenmem := uintptr(new.len) * et.Size_
|
||||
memclrNoHeapPointers(add(new.array, oldcapmem), newlenmem-oldcapmem)
|
||||
}
|
||||
new.len = old.len // preserve the old length
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue