runtime: move per-type types to internal/abi

Change-Id: I1f031f0f83a94bebe41d3978a91a903dc5bcda66
Reviewed-on: https://go-review.googlesource.com/c/go/+/489276
Reviewed-by: Keith Randall <khr@google.com>
Reviewed-by: Keith Randall <khr@golang.org>
Run-TryBot: David Chase <drchase@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
This commit is contained in:
David Chase 2023-04-25 19:14:05 -04:00
parent a2838ec5f2
commit 2e93fe0a9f
20 changed files with 392 additions and 523 deletions

View file

@ -207,11 +207,11 @@ func evacuated(b *bmap) bool {
}
func (b *bmap) overflow(t *maptype) *bmap {
return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-goarch.PtrSize))
return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize))
}
func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
*(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-goarch.PtrSize)) = ovf
*(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize)) = ovf
}
func (b *bmap) keys() unsafe.Pointer {
@ -252,7 +252,7 @@ func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
ovf = h.extra.nextOverflow
if ovf.overflow(t) == nil {
// We're not at the end of the preallocated overflow buckets. Bump the pointer.
h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.bucketsize)))
h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.BucketSize)))
} else {
// This is the last preallocated overflow bucket.
// Reset the overflow pointer on this bucket,
@ -261,10 +261,10 @@ func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
h.extra.nextOverflow = nil
}
} else {
ovf = (*bmap)(newobject(t.bucket))
ovf = (*bmap)(newobject(t.Bucket))
}
h.incrnoverflow()
if t.bucket.PtrBytes == 0 {
if t.Bucket.PtrBytes == 0 {
h.createOverflow()
*h.extra.overflow = append(*h.extra.overflow, ovf)
}
@ -303,7 +303,7 @@ func makemap_small() *hmap {
// If h != nil, the map can be created directly in h.
// If h.buckets != nil, bucket pointed to can be used as the first bucket.
func makemap(t *maptype, hint int, h *hmap) *hmap {
mem, overflow := math.MulUintptr(uintptr(hint), t.bucket.Size_)
mem, overflow := math.MulUintptr(uintptr(hint), t.Bucket.Size_)
if overflow || mem > maxAlloc {
hint = 0
}
@ -353,22 +353,22 @@ func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets un
// required to insert the median number of elements
// used with this value of b.
nbuckets += bucketShift(b - 4)
sz := t.bucket.Size_ * nbuckets
sz := t.Bucket.Size_ * nbuckets
up := roundupsize(sz)
if up != sz {
nbuckets = up / t.bucket.Size_
nbuckets = up / t.Bucket.Size_
}
}
if dirtyalloc == nil {
buckets = newarray(t.bucket, int(nbuckets))
buckets = newarray(t.Bucket, int(nbuckets))
} else {
// dirtyalloc was previously generated by
// the above newarray(t.bucket, int(nbuckets))
// the above newarray(t.Bucket, int(nbuckets))
// but may not be empty.
buckets = dirtyalloc
size := t.bucket.Size_ * nbuckets
if t.bucket.PtrBytes != 0 {
size := t.Bucket.Size_ * nbuckets
if t.Bucket.PtrBytes != 0 {
memclrHasPointers(buckets, size)
} else {
memclrNoHeapPointers(buckets, size)
@ -381,8 +381,8 @@ func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets un
// we use the convention that if a preallocated overflow bucket's overflow
// pointer is nil, then there are more available by bumping the pointer.
// We need a safe non-nil pointer for the last overflow bucket; just use buckets.
nextOverflow = (*bmap)(add(buckets, base*uintptr(t.bucketsize)))
last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.bucketsize)))
nextOverflow = (*bmap)(add(buckets, base*uintptr(t.BucketSize)))
last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.BucketSize)))
last.setoverflow(t, (*bmap)(buckets))
}
return buckets, nextOverflow
@ -398,32 +398,32 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
callerpc := getcallerpc()
pc := abi.FuncPCABIInternal(mapaccess1)
racereadpc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
raceReadObjectPC(t.Key, key, callerpc, pc)
}
if msanenabled && h != nil {
msanread(key, t.key.Size_)
msanread(key, t.Key.Size_)
}
if asanenabled && h != nil {
asanread(key, t.key.Size_)
asanread(key, t.Key.Size_)
}
if h == nil || h.count == 0 {
if t.hashMightPanic() {
t.hasher(key, 0) // see issue 23734
if t.HashMightPanic() {
t.Hasher(key, 0) // see issue 23734
}
return unsafe.Pointer(&zeroVal[0])
}
if h.flags&hashWriting != 0 {
fatal("concurrent map read and map write")
}
hash := t.hasher(key, uintptr(h.hash0))
hash := t.Hasher(key, uintptr(h.hash0))
m := bucketMask(h.B)
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
if c := h.oldbuckets; c != nil {
if !h.sameSizeGrow() {
// There used to be half as many buckets; mask down one more power of two.
m >>= 1
}
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
if !evacuated(oldb) {
b = oldb
}
@ -438,13 +438,13 @@ bucketloop:
}
continue
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
if t.indirectkey() {
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
if t.IndirectKey() {
k = *((*unsafe.Pointer)(k))
}
if t.key.Equal(key, k) {
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
if t.indirectelem() {
if t.Key.Equal(key, k) {
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
if t.IndirectElem() {
e = *((*unsafe.Pointer)(e))
}
return e
@ -459,32 +459,32 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
callerpc := getcallerpc()
pc := abi.FuncPCABIInternal(mapaccess2)
racereadpc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
raceReadObjectPC(t.Key, key, callerpc, pc)
}
if msanenabled && h != nil {
msanread(key, t.key.Size_)
msanread(key, t.Key.Size_)
}
if asanenabled && h != nil {
asanread(key, t.key.Size_)
asanread(key, t.Key.Size_)
}
if h == nil || h.count == 0 {
if t.hashMightPanic() {
t.hasher(key, 0) // see issue 23734
if t.HashMightPanic() {
t.Hasher(key, 0) // see issue 23734
}
return unsafe.Pointer(&zeroVal[0]), false
}
if h.flags&hashWriting != 0 {
fatal("concurrent map read and map write")
}
hash := t.hasher(key, uintptr(h.hash0))
hash := t.Hasher(key, uintptr(h.hash0))
m := bucketMask(h.B)
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
if c := h.oldbuckets; c != nil {
if !h.sameSizeGrow() {
// There used to be half as many buckets; mask down one more power of two.
m >>= 1
}
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
if !evacuated(oldb) {
b = oldb
}
@ -499,13 +499,13 @@ bucketloop:
}
continue
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
if t.indirectkey() {
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
if t.IndirectKey() {
k = *((*unsafe.Pointer)(k))
}
if t.key.Equal(key, k) {
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
if t.indirectelem() {
if t.Key.Equal(key, k) {
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
if t.IndirectElem() {
e = *((*unsafe.Pointer)(e))
}
return e, true
@ -520,15 +520,15 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe
if h == nil || h.count == 0 {
return nil, nil
}
hash := t.hasher(key, uintptr(h.hash0))
hash := t.Hasher(key, uintptr(h.hash0))
m := bucketMask(h.B)
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
if c := h.oldbuckets; c != nil {
if !h.sameSizeGrow() {
// There used to be half as many buckets; mask down one more power of two.
m >>= 1
}
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
if !evacuated(oldb) {
b = oldb
}
@ -543,13 +543,13 @@ bucketloop:
}
continue
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
if t.indirectkey() {
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
if t.IndirectKey() {
k = *((*unsafe.Pointer)(k))
}
if t.key.Equal(key, k) {
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
if t.indirectelem() {
if t.Key.Equal(key, k) {
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
if t.IndirectElem() {
e = *((*unsafe.Pointer)(e))
}
return k, e
@ -584,25 +584,25 @@ func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
callerpc := getcallerpc()
pc := abi.FuncPCABIInternal(mapassign)
racewritepc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
raceReadObjectPC(t.Key, key, callerpc, pc)
}
if msanenabled {
msanread(key, t.key.Size_)
msanread(key, t.Key.Size_)
}
if asanenabled {
asanread(key, t.key.Size_)
asanread(key, t.Key.Size_)
}
if h.flags&hashWriting != 0 {
fatal("concurrent map writes")
}
hash := t.hasher(key, uintptr(h.hash0))
hash := t.Hasher(key, uintptr(h.hash0))
// Set hashWriting after calling t.hasher, since t.hasher may panic,
// in which case we have not actually done a write.
h.flags ^= hashWriting
if h.buckets == nil {
h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
h.buckets = newobject(t.Bucket) // newarray(t.Bucket, 1)
}
again:
@ -610,7 +610,7 @@ again:
if h.growing() {
growWork(t, h, bucket)
}
b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
top := tophash(hash)
var inserti *uint8
@ -622,26 +622,26 @@ bucketloop:
if b.tophash[i] != top {
if isEmpty(b.tophash[i]) && inserti == nil {
inserti = &b.tophash[i]
insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
}
if b.tophash[i] == emptyRest {
break bucketloop
}
continue
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
if t.indirectkey() {
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
if t.IndirectKey() {
k = *((*unsafe.Pointer)(k))
}
if !t.key.Equal(key, k) {
if !t.Key.Equal(key, k) {
continue
}
// already have a mapping for key. Update it.
if t.needkeyupdate() {
typedmemmove(t.key, k, key)
if t.NeedKeyUpdate() {
typedmemmove(t.Key, k, key)
}
elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
goto done
}
ovf := b.overflow(t)
@ -665,20 +665,20 @@ bucketloop:
newb := h.newoverflow(t, b)
inserti = &newb.tophash[0]
insertk = add(unsafe.Pointer(newb), dataOffset)
elem = add(insertk, bucketCnt*uintptr(t.keysize))
elem = add(insertk, bucketCnt*uintptr(t.KeySize))
}
// store new key/elem at insert position
if t.indirectkey() {
kmem := newobject(t.key)
if t.IndirectKey() {
kmem := newobject(t.Key)
*(*unsafe.Pointer)(insertk) = kmem
insertk = kmem
}
if t.indirectelem() {
vmem := newobject(t.elem)
if t.IndirectElem() {
vmem := newobject(t.Elem)
*(*unsafe.Pointer)(elem) = vmem
}
typedmemmove(t.key, insertk, key)
typedmemmove(t.Key, insertk, key)
*inserti = top
h.count++
@ -687,7 +687,7 @@ done:
fatal("concurrent map writes")
}
h.flags &^= hashWriting
if t.indirectelem() {
if t.IndirectElem() {
elem = *((*unsafe.Pointer)(elem))
}
return elem
@ -698,17 +698,17 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
callerpc := getcallerpc()
pc := abi.FuncPCABIInternal(mapdelete)
racewritepc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
raceReadObjectPC(t.Key, key, callerpc, pc)
}
if msanenabled && h != nil {
msanread(key, t.key.Size_)
msanread(key, t.Key.Size_)
}
if asanenabled && h != nil {
asanread(key, t.key.Size_)
asanread(key, t.Key.Size_)
}
if h == nil || h.count == 0 {
if t.hashMightPanic() {
t.hasher(key, 0) // see issue 23734
if t.HashMightPanic() {
t.Hasher(key, 0) // see issue 23734
}
return
}
@ -716,7 +716,7 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
fatal("concurrent map writes")
}
hash := t.hasher(key, uintptr(h.hash0))
hash := t.Hasher(key, uintptr(h.hash0))
// Set hashWriting after calling t.hasher, since t.hasher may panic,
// in which case we have not actually done a write (delete).
@ -726,7 +726,7 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
if h.growing() {
growWork(t, h, bucket)
}
b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
bOrig := b
top := tophash(hash)
search:
@ -738,27 +738,27 @@ search:
}
continue
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
k2 := k
if t.indirectkey() {
if t.IndirectKey() {
k2 = *((*unsafe.Pointer)(k2))
}
if !t.key.Equal(key, k2) {
if !t.Key.Equal(key, k2) {
continue
}
// Only clear key if there are pointers in it.
if t.indirectkey() {
if t.IndirectKey() {
*(*unsafe.Pointer)(k) = nil
} else if t.key.PtrBytes != 0 {
memclrHasPointers(k, t.key.Size_)
} else if t.Key.PtrBytes != 0 {
memclrHasPointers(k, t.Key.Size_)
}
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
if t.indirectelem() {
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
if t.IndirectElem() {
*(*unsafe.Pointer)(e) = nil
} else if t.elem.PtrBytes != 0 {
memclrHasPointers(e, t.elem.Size_)
} else if t.Elem.PtrBytes != 0 {
memclrHasPointers(e, t.Elem.Size_)
} else {
memclrNoHeapPointers(e, t.elem.Size_)
memclrNoHeapPointers(e, t.Elem.Size_)
}
b.tophash[i] = emptyOne
// If the bucket now ends in a bunch of emptyOne states,
@ -832,7 +832,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) {
// grab snapshot of bucket state
it.B = h.B
it.buckets = h.buckets
if t.bucket.PtrBytes == 0 {
if t.Bucket.PtrBytes == 0 {
// Allocate the current slice and remember pointers to both current and old.
// This preserves all relevant overflow buckets alive even if
// the table grows and/or overflow buckets are added to the table
@ -893,15 +893,15 @@ next:
// bucket hasn't been evacuated) then we need to iterate through the old
// bucket and only return the ones that will be migrated to this bucket.
oldbucket := bucket & it.h.oldbucketmask()
b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
if !evacuated(b) {
checkBucket = bucket
} else {
b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
checkBucket = noCheck
}
} else {
b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
checkBucket = noCheck
}
bucket++
@ -918,11 +918,11 @@ next:
// in the middle of a bucket. It's feasible, just tricky.
continue
}
k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
if t.indirectkey() {
k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.KeySize))
if t.IndirectKey() {
k = *((*unsafe.Pointer)(k))
}
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.elemsize))
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize))
if checkBucket != noCheck && !h.sameSizeGrow() {
// Special case: iterator was started during a grow to a larger size
// and the grow is not done yet. We're working on a bucket whose
@ -931,10 +931,10 @@ next:
// through the oldbucket, skipping any keys that will go
// to the other new bucket (each oldbucket expands to two
// buckets during a grow).
if t.reflexivekey() || t.key.Equal(k, k) {
if t.ReflexiveKey() || t.Key.Equal(k, k) {
// If the item in the oldbucket is not destined for
// the current new bucket in the iteration, skip it.
hash := t.hasher(k, uintptr(h.hash0))
hash := t.Hasher(k, uintptr(h.hash0))
if hash&bucketMask(it.B) != checkBucket {
continue
}
@ -952,13 +952,13 @@ next:
}
}
if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
!(t.reflexivekey() || t.key.Equal(k, k)) {
!(t.ReflexiveKey() || t.Key.Equal(k, k)) {
// This is the golden data, we can return it.
// OR
// key!=key, so the entry can't be deleted or updated, so we can just return it.
// That's lucky for us because when key!=key we can't look it up successfully.
it.key = k
if t.indirectelem() {
if t.IndirectElem() {
e = *((*unsafe.Pointer)(e))
}
it.elem = e
@ -1011,7 +1011,7 @@ func mapclear(t *maptype, h *hmap) {
// Mark buckets empty, so existing iterators can be terminated, see issue #59411.
markBucketsEmpty := func(bucket unsafe.Pointer, mask uintptr) {
for i := uintptr(0); i <= mask; i++ {
b := (*bmap)(add(bucket, i*uintptr(t.bucketsize)))
b := (*bmap)(add(bucket, i*uintptr(t.BucketSize)))
for ; b != nil; b = b.overflow(t) {
for i := uintptr(0); i < bucketCnt; i++ {
b.tophash[i] = emptyRest
@ -1154,7 +1154,7 @@ func growWork(t *maptype, h *hmap, bucket uintptr) {
}
func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool {
b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.bucketsize)))
b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.BucketSize)))
return evacuated(b)
}
@ -1167,7 +1167,7 @@ type evacDst struct {
}
func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
newbit := h.noldbuckets()
if !evacuated(b) {
// TODO: reuse overflow buckets instead of using new ones, if there
@ -1176,23 +1176,23 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
// xy contains the x and y (low and high) evacuation destinations.
var xy [2]evacDst
x := &xy[0]
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
x.k = add(unsafe.Pointer(x.b), dataOffset)
x.e = add(x.k, bucketCnt*uintptr(t.keysize))
x.e = add(x.k, bucketCnt*uintptr(t.KeySize))
if !h.sameSizeGrow() {
// Only calculate y pointers if we're growing bigger.
// Otherwise GC can see bad pointers.
y := &xy[1]
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
y.k = add(unsafe.Pointer(y.b), dataOffset)
y.e = add(y.k, bucketCnt*uintptr(t.keysize))
y.e = add(y.k, bucketCnt*uintptr(t.KeySize))
}
for ; b != nil; b = b.overflow(t) {
k := add(unsafe.Pointer(b), dataOffset)
e := add(k, bucketCnt*uintptr(t.keysize))
for i := 0; i < bucketCnt; i, k, e = i+1, add(k, uintptr(t.keysize)), add(e, uintptr(t.elemsize)) {
e := add(k, bucketCnt*uintptr(t.KeySize))
for i := 0; i < bucketCnt; i, k, e = i+1, add(k, uintptr(t.KeySize)), add(e, uintptr(t.ValueSize)) {
top := b.tophash[i]
if isEmpty(top) {
b.tophash[i] = evacuatedEmpty
@ -1202,15 +1202,15 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
throw("bad map state")
}
k2 := k
if t.indirectkey() {
if t.IndirectKey() {
k2 = *((*unsafe.Pointer)(k2))
}
var useY uint8
if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need
// to send this key/elem to bucket x or bucket y).
hash := t.hasher(k2, uintptr(h.hash0))
if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.Equal(k2, k2) {
hash := t.Hasher(k2, uintptr(h.hash0))
if h.flags&iterator != 0 && !t.ReflexiveKey() && !t.Key.Equal(k2, k2) {
// If key != key (NaNs), then the hash could be (and probably
// will be) entirely different from the old hash. Moreover,
// it isn't reproducible. Reproducibility is required in the
@ -1242,35 +1242,35 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
dst.b = h.newoverflow(t, dst.b)
dst.i = 0
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
dst.e = add(dst.k, bucketCnt*uintptr(t.keysize))
dst.e = add(dst.k, bucketCnt*uintptr(t.KeySize))
}
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
if t.indirectkey() {
if t.IndirectKey() {
*(*unsafe.Pointer)(dst.k) = k2 // copy pointer
} else {
typedmemmove(t.key, dst.k, k) // copy elem
typedmemmove(t.Key, dst.k, k) // copy elem
}
if t.indirectelem() {
if t.IndirectElem() {
*(*unsafe.Pointer)(dst.e) = *(*unsafe.Pointer)(e)
} else {
typedmemmove(t.elem, dst.e, e)
typedmemmove(t.Elem, dst.e, e)
}
dst.i++
// These updates might push these pointers past the end of the
// key or elem arrays. That's ok, as we have the overflow pointer
// at the end of the bucket to protect against pointing past the
// end of the bucket.
dst.k = add(dst.k, uintptr(t.keysize))
dst.e = add(dst.e, uintptr(t.elemsize))
dst.k = add(dst.k, uintptr(t.KeySize))
dst.e = add(dst.e, uintptr(t.ValueSize))
}
}
// Unlink the overflow buckets & clear key/elem to help GC.
if h.flags&oldIterator == 0 && t.bucket.PtrBytes != 0 {
b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
if h.flags&oldIterator == 0 && t.Bucket.PtrBytes != 0 {
b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
// Preserve b.tophash because the evacuation
// state is maintained there.
ptr := add(b, dataOffset)
n := uintptr(t.bucketsize) - dataOffset
n := uintptr(t.BucketSize) - dataOffset
memclrHasPointers(ptr, n)
}
}
@ -1309,36 +1309,36 @@ func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
//go:linkname reflect_makemap reflect.makemap
func reflect_makemap(t *maptype, cap int) *hmap {
// Check invariants and reflects math.
if t.key.Equal == nil {
if t.Key.Equal == nil {
throw("runtime.reflect_makemap: unsupported map key type")
}
if t.key.Size_ > maxKeySize && (!t.indirectkey() || t.keysize != uint8(goarch.PtrSize)) ||
t.key.Size_ <= maxKeySize && (t.indirectkey() || t.keysize != uint8(t.key.Size_)) {
if t.Key.Size_ > maxKeySize && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) ||
t.Key.Size_ <= maxKeySize && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) {
throw("key size wrong")
}
if t.elem.Size_ > maxElemSize && (!t.indirectelem() || t.elemsize != uint8(goarch.PtrSize)) ||
t.elem.Size_ <= maxElemSize && (t.indirectelem() || t.elemsize != uint8(t.elem.Size_)) {
if t.Elem.Size_ > maxElemSize && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) ||
t.Elem.Size_ <= maxElemSize && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) {
throw("elem size wrong")
}
if t.key.Align_ > bucketCnt {
if t.Key.Align_ > bucketCnt {
throw("key align too big")
}
if t.elem.Align_ > bucketCnt {
if t.Elem.Align_ > bucketCnt {
throw("elem align too big")
}
if t.key.Size_%uintptr(t.key.Align_) != 0 {
if t.Key.Size_%uintptr(t.Key.Align_) != 0 {
throw("key size not a multiple of key align")
}
if t.elem.Size_%uintptr(t.elem.Align_) != 0 {
if t.Elem.Size_%uintptr(t.Elem.Align_) != 0 {
throw("elem size not a multiple of elem align")
}
if bucketCnt < 8 {
throw("bucketsize too small for proper alignment")
}
if dataOffset%uintptr(t.key.Align_) != 0 {
if dataOffset%uintptr(t.Key.Align_) != 0 {
throw("need padding in bucket (key)")
}
if dataOffset%uintptr(t.elem.Align_) != 0 {
if dataOffset%uintptr(t.Elem.Align_) != 0 {
throw("need padding in bucket (elem)")
}
@ -1368,13 +1368,13 @@ func reflect_mapaccess_faststr(t *maptype, h *hmap, key string) unsafe.Pointer {
//go:linkname reflect_mapassign reflect.mapassign
func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, elem unsafe.Pointer) {
p := mapassign(t, h, key)
typedmemmove(t.elem, p, elem)
typedmemmove(t.Elem, p, elem)
}
//go:linkname reflect_mapassign_faststr reflect.mapassign_faststr
func reflect_mapassign_faststr(t *maptype, h *hmap, key string, elem unsafe.Pointer) {
p := mapassign_faststr(t, h, key)
typedmemmove(t.elem, p, elem)
typedmemmove(t.Elem, p, elem)
}
//go:linkname reflect_mapdelete reflect.mapdelete
@ -1474,21 +1474,21 @@ func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int)
pos = 0
}
srcK := add(unsafe.Pointer(src), dataOffset+uintptr(i)*uintptr(t.keysize))
srcEle := add(unsafe.Pointer(src), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(i)*uintptr(t.elemsize))
dstK := add(unsafe.Pointer(dst), dataOffset+uintptr(pos)*uintptr(t.keysize))
dstEle := add(unsafe.Pointer(dst), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(pos)*uintptr(t.elemsize))
srcK := add(unsafe.Pointer(src), dataOffset+uintptr(i)*uintptr(t.KeySize))
srcEle := add(unsafe.Pointer(src), dataOffset+bucketCnt*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize))
dstK := add(unsafe.Pointer(dst), dataOffset+uintptr(pos)*uintptr(t.KeySize))
dstEle := add(unsafe.Pointer(dst), dataOffset+bucketCnt*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize))
dst.tophash[pos] = src.tophash[i]
if t.indirectkey() {
if t.IndirectKey() {
*(*unsafe.Pointer)(dstK) = *(*unsafe.Pointer)(srcK)
} else {
typedmemmove(t.key, dstK, srcK)
typedmemmove(t.Key, dstK, srcK)
}
if t.indirectelem() {
if t.IndirectElem() {
*(*unsafe.Pointer)(dstEle) = *(*unsafe.Pointer)(srcEle)
} else {
typedmemmove(t.elem, dstEle, srcEle)
typedmemmove(t.Elem, dstEle, srcEle)
}
pos++
h.count++
@ -1511,23 +1511,23 @@ func mapclone2(t *maptype, src *hmap) *hmap {
}
if src.B == 0 {
dst.buckets = newobject(t.bucket)
dst.buckets = newobject(t.Bucket)
dst.count = src.count
typedmemmove(t.bucket, dst.buckets, src.buckets)
typedmemmove(t.Bucket, dst.buckets, src.buckets)
return dst
}
//src.B != 0
if dst.B == 0 {
dst.buckets = newobject(t.bucket)
dst.buckets = newobject(t.Bucket)
}
dstArraySize := int(bucketShift(dst.B))
srcArraySize := int(bucketShift(src.B))
for i := 0; i < dstArraySize; i++ {
dstBmap := (*bmap)(add(dst.buckets, uintptr(i*int(t.bucketsize))))
dstBmap := (*bmap)(add(dst.buckets, uintptr(i*int(t.BucketSize))))
pos := 0
for j := 0; j < srcArraySize; j += dstArraySize {
srcBmap := (*bmap)(add(src.buckets, uintptr((i+j)*int(t.bucketsize))))
srcBmap := (*bmap)(add(src.buckets, uintptr((i+j)*int(t.BucketSize))))
for srcBmap != nil {
dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
srcBmap = srcBmap.overflow(t)
@ -1547,7 +1547,7 @@ func mapclone2(t *maptype, src *hmap) *hmap {
oldSrcArraySize := int(bucketShift(oldB))
for i := 0; i < oldSrcArraySize; i++ {
srcBmap := (*bmap)(add(srcOldbuckets, uintptr(i*int(t.bucketsize))))
srcBmap := (*bmap)(add(srcOldbuckets, uintptr(i*int(t.BucketSize))))
if evacuated(srcBmap) {
continue
}
@ -1576,17 +1576,17 @@ func mapclone2(t *maptype, src *hmap) *hmap {
fatal("concurrent map clone and map write")
}
srcK := add(unsafe.Pointer(srcBmap), dataOffset+i*uintptr(t.keysize))
if t.indirectkey() {
srcK := add(unsafe.Pointer(srcBmap), dataOffset+i*uintptr(t.KeySize))
if t.IndirectKey() {
srcK = *((*unsafe.Pointer)(srcK))
}
srcEle := add(unsafe.Pointer(srcBmap), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
if t.indirectelem() {
srcEle := add(unsafe.Pointer(srcBmap), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
if t.IndirectElem() {
srcEle = *((*unsafe.Pointer)(srcEle))
}
dstEle := mapassign(t, dst, srcK)
typedmemmove(t.elem, dstEle, srcEle)
typedmemmove(t.Elem, dstEle, srcEle)
}
srcBmap = srcBmap.overflow(t)
}