2018-01-27 12:38:13 +01:00
|
|
|
// Copyright 2018 The Go Authors. All rights reserved.
|
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
|
|
package runtime
|
|
|
|
|
|
|
|
|
|
import (
|
2021-05-21 13:37:19 -04:00
|
|
|
"internal/abi"
|
2021-06-16 23:05:44 +00:00
|
|
|
"internal/goarch"
|
2018-01-27 12:38:13 +01:00
|
|
|
"unsafe"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
|
|
|
|
|
if raceenabled && h != nil {
|
|
|
|
|
callerpc := getcallerpc()
|
2021-05-21 13:37:19 -04:00
|
|
|
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_faststr))
|
2018-01-27 12:38:13 +01:00
|
|
|
}
|
|
|
|
|
if h == nil || h.count == 0 {
|
|
|
|
|
return unsafe.Pointer(&zeroVal[0])
|
|
|
|
|
}
|
|
|
|
|
if h.flags&hashWriting != 0 {
|
2022-03-04 13:24:04 -05:00
|
|
|
fatal("concurrent map read and map write")
|
2018-01-27 12:38:13 +01:00
|
|
|
}
|
|
|
|
|
key := stringStructOf(&ky)
|
|
|
|
|
if h.B == 0 {
|
|
|
|
|
// One-bucket table.
|
|
|
|
|
b := (*bmap)(h.buckets)
|
|
|
|
|
if key.len < 32 {
|
|
|
|
|
// short key, doing lots of comparisons is ok
|
2021-06-16 23:05:44 +00:00
|
|
|
for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
2018-01-27 12:38:13 +01:00
|
|
|
k := (*stringStruct)(kptr)
|
2018-10-15 15:14:48 -07:00
|
|
|
if k.len != key.len || isEmpty(b.tophash[i]) {
|
|
|
|
|
if b.tophash[i] == emptyRest {
|
|
|
|
|
break
|
|
|
|
|
}
|
2018-01-27 12:38:13 +01:00
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
|
2023-04-25 19:14:05 -04:00
|
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize))
|
2018-01-27 12:38:13 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return unsafe.Pointer(&zeroVal[0])
|
|
|
|
|
}
|
|
|
|
|
// long key, try not to do more comparisons than necessary
|
|
|
|
|
keymaybe := uintptr(bucketCnt)
|
2021-06-16 23:05:44 +00:00
|
|
|
for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
2018-01-27 12:38:13 +01:00
|
|
|
k := (*stringStruct)(kptr)
|
2018-10-15 15:14:48 -07:00
|
|
|
if k.len != key.len || isEmpty(b.tophash[i]) {
|
|
|
|
|
if b.tophash[i] == emptyRest {
|
|
|
|
|
break
|
|
|
|
|
}
|
2018-01-27 12:38:13 +01:00
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
if k.str == key.str {
|
2023-04-25 19:14:05 -04:00
|
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize))
|
2018-01-27 12:38:13 +01:00
|
|
|
}
|
|
|
|
|
// check first 4 bytes
|
|
|
|
|
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
// check last 4 bytes
|
|
|
|
|
if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
if keymaybe != bucketCnt {
|
|
|
|
|
// Two keys are potential matches. Use hash to distinguish them.
|
|
|
|
|
goto dohash
|
|
|
|
|
}
|
|
|
|
|
keymaybe = i
|
|
|
|
|
}
|
|
|
|
|
if keymaybe != bucketCnt {
|
2021-06-16 23:05:44 +00:00
|
|
|
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
|
2018-01-27 12:38:13 +01:00
|
|
|
if memequal(k.str, key.str, uintptr(key.len)) {
|
2023-04-25 19:14:05 -04:00
|
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize))
|
2018-01-27 12:38:13 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return unsafe.Pointer(&zeroVal[0])
|
|
|
|
|
}
|
|
|
|
|
dohash:
|
2023-04-25 19:14:05 -04:00
|
|
|
hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
|
2018-01-27 12:38:13 +01:00
|
|
|
m := bucketMask(h.B)
|
2023-04-25 19:14:05 -04:00
|
|
|
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
|
2018-01-27 12:38:13 +01:00
|
|
|
if c := h.oldbuckets; c != nil {
|
|
|
|
|
if !h.sameSizeGrow() {
|
|
|
|
|
// There used to be half as many buckets; mask down one more power of two.
|
|
|
|
|
m >>= 1
|
|
|
|
|
}
|
2023-04-25 19:14:05 -04:00
|
|
|
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
|
2018-01-27 12:38:13 +01:00
|
|
|
if !evacuated(oldb) {
|
|
|
|
|
b = oldb
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
top := tophash(hash)
|
|
|
|
|
for ; b != nil; b = b.overflow(t) {
|
2021-06-16 23:05:44 +00:00
|
|
|
for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
2018-01-27 12:38:13 +01:00
|
|
|
k := (*stringStruct)(kptr)
|
|
|
|
|
if k.len != key.len || b.tophash[i] != top {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
|
2023-04-25 19:14:05 -04:00
|
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize))
|
2018-01-27 12:38:13 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return unsafe.Pointer(&zeroVal[0])
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
|
|
|
|
|
if raceenabled && h != nil {
|
|
|
|
|
callerpc := getcallerpc()
|
2021-05-21 13:37:19 -04:00
|
|
|
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_faststr))
|
2018-01-27 12:38:13 +01:00
|
|
|
}
|
|
|
|
|
if h == nil || h.count == 0 {
|
|
|
|
|
return unsafe.Pointer(&zeroVal[0]), false
|
|
|
|
|
}
|
|
|
|
|
if h.flags&hashWriting != 0 {
|
2022-03-04 13:24:04 -05:00
|
|
|
fatal("concurrent map read and map write")
|
2018-01-27 12:38:13 +01:00
|
|
|
}
|
|
|
|
|
key := stringStructOf(&ky)
|
|
|
|
|
if h.B == 0 {
|
|
|
|
|
// One-bucket table.
|
|
|
|
|
b := (*bmap)(h.buckets)
|
|
|
|
|
if key.len < 32 {
|
|
|
|
|
// short key, doing lots of comparisons is ok
|
2021-06-16 23:05:44 +00:00
|
|
|
for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
2018-01-27 12:38:13 +01:00
|
|
|
k := (*stringStruct)(kptr)
|
2018-10-15 15:14:48 -07:00
|
|
|
if k.len != key.len || isEmpty(b.tophash[i]) {
|
|
|
|
|
if b.tophash[i] == emptyRest {
|
|
|
|
|
break
|
|
|
|
|
}
|
2018-01-27 12:38:13 +01:00
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
|
2023-04-25 19:14:05 -04:00
|
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
|
2018-01-27 12:38:13 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return unsafe.Pointer(&zeroVal[0]), false
|
|
|
|
|
}
|
|
|
|
|
// long key, try not to do more comparisons than necessary
|
|
|
|
|
keymaybe := uintptr(bucketCnt)
|
2021-06-16 23:05:44 +00:00
|
|
|
for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
2018-01-27 12:38:13 +01:00
|
|
|
k := (*stringStruct)(kptr)
|
2018-10-15 15:14:48 -07:00
|
|
|
if k.len != key.len || isEmpty(b.tophash[i]) {
|
|
|
|
|
if b.tophash[i] == emptyRest {
|
|
|
|
|
break
|
|
|
|
|
}
|
2018-01-27 12:38:13 +01:00
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
if k.str == key.str {
|
2023-04-25 19:14:05 -04:00
|
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
|
2018-01-27 12:38:13 +01:00
|
|
|
}
|
|
|
|
|
// check first 4 bytes
|
|
|
|
|
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
// check last 4 bytes
|
|
|
|
|
if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
if keymaybe != bucketCnt {
|
|
|
|
|
// Two keys are potential matches. Use hash to distinguish them.
|
|
|
|
|
goto dohash
|
|
|
|
|
}
|
|
|
|
|
keymaybe = i
|
|
|
|
|
}
|
|
|
|
|
if keymaybe != bucketCnt {
|
2021-06-16 23:05:44 +00:00
|
|
|
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
|
2018-01-27 12:38:13 +01:00
|
|
|
if memequal(k.str, key.str, uintptr(key.len)) {
|
2023-04-25 19:14:05 -04:00
|
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)), true
|
2018-01-27 12:38:13 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return unsafe.Pointer(&zeroVal[0]), false
|
|
|
|
|
}
|
|
|
|
|
dohash:
|
2023-04-25 19:14:05 -04:00
|
|
|
hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
|
2018-01-27 12:38:13 +01:00
|
|
|
m := bucketMask(h.B)
|
2023-04-25 19:14:05 -04:00
|
|
|
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
|
2018-01-27 12:38:13 +01:00
|
|
|
if c := h.oldbuckets; c != nil {
|
|
|
|
|
if !h.sameSizeGrow() {
|
|
|
|
|
// There used to be half as many buckets; mask down one more power of two.
|
|
|
|
|
m >>= 1
|
|
|
|
|
}
|
2023-04-25 19:14:05 -04:00
|
|
|
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
|
2018-01-27 12:38:13 +01:00
|
|
|
if !evacuated(oldb) {
|
|
|
|
|
b = oldb
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
top := tophash(hash)
|
|
|
|
|
for ; b != nil; b = b.overflow(t) {
|
2021-06-16 23:05:44 +00:00
|
|
|
for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
2018-01-27 12:38:13 +01:00
|
|
|
k := (*stringStruct)(kptr)
|
|
|
|
|
if k.len != key.len || b.tophash[i] != top {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
|
2023-04-25 19:14:05 -04:00
|
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
|
2018-01-27 12:38:13 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return unsafe.Pointer(&zeroVal[0]), false
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
|
|
|
|
|
if h == nil {
|
|
|
|
|
panic(plainError("assignment to entry in nil map"))
|
|
|
|
|
}
|
|
|
|
|
if raceenabled {
|
|
|
|
|
callerpc := getcallerpc()
|
2021-05-21 13:37:19 -04:00
|
|
|
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_faststr))
|
2018-01-27 12:38:13 +01:00
|
|
|
}
|
|
|
|
|
if h.flags&hashWriting != 0 {
|
2022-03-04 13:24:04 -05:00
|
|
|
fatal("concurrent map writes")
|
2018-01-27 12:38:13 +01:00
|
|
|
}
|
|
|
|
|
key := stringStructOf(&s)
|
2023-04-25 19:14:05 -04:00
|
|
|
hash := t.Hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
|
2018-01-27 12:38:13 +01:00
|
|
|
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
// Set hashWriting after calling t.hasher for consistency with mapassign.
|
2018-07-31 11:24:37 -07:00
|
|
|
h.flags ^= hashWriting
|
2018-01-27 12:38:13 +01:00
|
|
|
|
|
|
|
|
if h.buckets == nil {
|
2023-04-25 19:14:05 -04:00
|
|
|
h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
|
2018-01-27 12:38:13 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
again:
|
|
|
|
|
bucket := hash & bucketMask(h.B)
|
|
|
|
|
if h.growing() {
|
|
|
|
|
growWork_faststr(t, h, bucket)
|
|
|
|
|
}
|
2023-04-25 19:14:05 -04:00
|
|
|
b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
|
2018-01-27 12:38:13 +01:00
|
|
|
top := tophash(hash)
|
|
|
|
|
|
|
|
|
|
var insertb *bmap
|
|
|
|
|
var inserti uintptr
|
|
|
|
|
var insertk unsafe.Pointer
|
|
|
|
|
|
2018-10-15 15:14:48 -07:00
|
|
|
bucketloop:
|
2018-01-27 12:38:13 +01:00
|
|
|
for {
|
|
|
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
|
|
|
|
if b.tophash[i] != top {
|
2018-10-15 15:14:48 -07:00
|
|
|
if isEmpty(b.tophash[i]) && insertb == nil {
|
2018-01-27 12:38:13 +01:00
|
|
|
insertb = b
|
|
|
|
|
inserti = i
|
|
|
|
|
}
|
2018-10-15 15:14:48 -07:00
|
|
|
if b.tophash[i] == emptyRest {
|
|
|
|
|
break bucketloop
|
|
|
|
|
}
|
2018-01-27 12:38:13 +01:00
|
|
|
continue
|
|
|
|
|
}
|
2021-06-16 23:05:44 +00:00
|
|
|
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*goarch.PtrSize))
|
2018-01-27 12:38:13 +01:00
|
|
|
if k.len != key.len {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
// already have a mapping for key. Update it.
|
|
|
|
|
inserti = i
|
|
|
|
|
insertb = b
|
2021-03-26 23:29:25 +07:00
|
|
|
// Overwrite existing key, so it can be garbage collected.
|
|
|
|
|
// The size is already guaranteed to be set correctly.
|
|
|
|
|
k.str = key.str
|
2018-01-27 12:38:13 +01:00
|
|
|
goto done
|
|
|
|
|
}
|
|
|
|
|
ovf := b.overflow(t)
|
|
|
|
|
if ovf == nil {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
b = ovf
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Did not find mapping for key. Allocate new cell & add entry.
|
|
|
|
|
|
|
|
|
|
// If we hit the max load factor or we have too many overflow buckets,
|
|
|
|
|
// and we're not already in the middle of growing, start growing.
|
|
|
|
|
if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
|
|
|
|
|
hashGrow(t, h)
|
|
|
|
|
goto again // Growing the table invalidates everything, so try again
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if insertb == nil {
|
2020-09-28 17:38:13 +08:00
|
|
|
// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
|
2018-01-27 12:38:13 +01:00
|
|
|
insertb = h.newoverflow(t, b)
|
|
|
|
|
inserti = 0 // not necessary, but avoids needlessly spilling inserti
|
|
|
|
|
}
|
|
|
|
|
insertb.tophash[inserti&(bucketCnt-1)] = top // mask inserti to avoid bounds checks
|
|
|
|
|
|
2021-06-16 23:05:44 +00:00
|
|
|
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*goarch.PtrSize)
|
2018-01-27 12:38:13 +01:00
|
|
|
// store new key at insert position
|
|
|
|
|
*((*stringStruct)(insertk)) = *key
|
|
|
|
|
h.count++
|
|
|
|
|
|
|
|
|
|
done:
|
2023-04-25 19:14:05 -04:00
|
|
|
elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*goarch.PtrSize+inserti*uintptr(t.ValueSize))
|
2018-01-27 12:38:13 +01:00
|
|
|
if h.flags&hashWriting == 0 {
|
2022-03-04 13:24:04 -05:00
|
|
|
fatal("concurrent map writes")
|
2018-01-27 12:38:13 +01:00
|
|
|
}
|
|
|
|
|
h.flags &^= hashWriting
|
2019-04-22 13:37:08 -07:00
|
|
|
return elem
|
2018-01-27 12:38:13 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func mapdelete_faststr(t *maptype, h *hmap, ky string) {
|
|
|
|
|
if raceenabled && h != nil {
|
|
|
|
|
callerpc := getcallerpc()
|
2021-05-21 13:37:19 -04:00
|
|
|
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_faststr))
|
2018-01-27 12:38:13 +01:00
|
|
|
}
|
|
|
|
|
if h == nil || h.count == 0 {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
if h.flags&hashWriting != 0 {
|
2022-03-04 13:24:04 -05:00
|
|
|
fatal("concurrent map writes")
|
2018-01-27 12:38:13 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
key := stringStructOf(&ky)
|
2023-04-25 19:14:05 -04:00
|
|
|
hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
|
2018-01-27 12:38:13 +01:00
|
|
|
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
// Set hashWriting after calling t.hasher for consistency with mapdelete
|
2018-07-31 11:24:37 -07:00
|
|
|
h.flags ^= hashWriting
|
2018-01-27 12:38:13 +01:00
|
|
|
|
|
|
|
|
bucket := hash & bucketMask(h.B)
|
|
|
|
|
if h.growing() {
|
|
|
|
|
growWork_faststr(t, h, bucket)
|
|
|
|
|
}
|
2023-04-25 19:14:05 -04:00
|
|
|
b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
|
2018-10-15 17:24:21 -07:00
|
|
|
bOrig := b
|
2018-01-27 12:38:13 +01:00
|
|
|
top := tophash(hash)
|
|
|
|
|
search:
|
|
|
|
|
for ; b != nil; b = b.overflow(t) {
|
2021-06-16 23:05:44 +00:00
|
|
|
for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
2018-01-27 12:38:13 +01:00
|
|
|
k := (*stringStruct)(kptr)
|
|
|
|
|
if k.len != key.len || b.tophash[i] != top {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
// Clear key's pointer.
|
|
|
|
|
k.str = nil
|
2023-04-25 19:14:05 -04:00
|
|
|
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize))
|
|
|
|
|
if t.Elem.PtrBytes != 0 {
|
|
|
|
|
memclrHasPointers(e, t.Elem.Size_)
|
2018-06-20 22:19:56 -07:00
|
|
|
} else {
|
2023-04-25 19:14:05 -04:00
|
|
|
memclrNoHeapPointers(e, t.Elem.Size_)
|
2018-01-27 12:38:13 +01:00
|
|
|
}
|
2018-10-15 15:14:48 -07:00
|
|
|
b.tophash[i] = emptyOne
|
2018-10-15 17:24:21 -07:00
|
|
|
// If the bucket now ends in a bunch of emptyOne states,
|
|
|
|
|
// change those to emptyRest states.
|
|
|
|
|
if i == bucketCnt-1 {
|
|
|
|
|
if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
|
|
|
|
|
goto notLast
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
if b.tophash[i+1] != emptyRest {
|
|
|
|
|
goto notLast
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
for {
|
|
|
|
|
b.tophash[i] = emptyRest
|
|
|
|
|
if i == 0 {
|
|
|
|
|
if b == bOrig {
|
|
|
|
|
break // beginning of initial bucket, we're done.
|
|
|
|
|
}
|
|
|
|
|
// Find previous bucket, continue at its last entry.
|
|
|
|
|
c := b
|
|
|
|
|
for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
|
|
|
|
|
}
|
|
|
|
|
i = bucketCnt - 1
|
|
|
|
|
} else {
|
|
|
|
|
i--
|
|
|
|
|
}
|
|
|
|
|
if b.tophash[i] != emptyOne {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
notLast:
|
2018-01-27 12:38:13 +01:00
|
|
|
h.count--
|
2020-09-04 17:47:44 +07:00
|
|
|
// Reset the hash seed to make it more difficult for attackers to
|
|
|
|
|
// repeatedly trigger hash collisions. See issue 25237.
|
|
|
|
|
if h.count == 0 {
|
math/rand, math/rand/v2: use ChaCha8 for global rand
Move ChaCha8 code into internal/chacha8rand and use it to implement
runtime.rand, which is used for the unseeded global source for
both math/rand and math/rand/v2. This also affects the calculation of
the start point for iteration over very very large maps (when the
32-bit fastrand is not big enough).
The benefit is that misuse of the global random number generators
in math/rand and math/rand/v2 in contexts where non-predictable
randomness is important for security reasons is no longer a
security problem, removing a common mistake among programmers
who are unaware of the different kinds of randomness.
The cost is an extra 304 bytes per thread stored in the m struct
plus 2-3ns more per random uint64 due to the more sophisticated
algorithm. Using PCG looks like it would cost about the same,
although I haven't benchmarked that.
Before this, the math/rand and math/rand/v2 global generator
was wyrand (https://github.com/wangyi-fudan/wyhash).
For math/rand, using wyrand instead of the Mitchell/Reeds/Thompson
ALFG was justifiable, since the latter was not any better.
But for math/rand/v2, the global generator really should be
at least as good as one of the well-studied, specific algorithms
provided directly by the package, and it's not.
(Wyrand is still reasonable for scheduling and cache decisions.)
Good randomness does have a cost: about twice wyrand.
Also rationalize the various runtime rand references.
goos: linux
goarch: amd64
pkg: math/rand/v2
cpu: AMD Ryzen 9 7950X 16-Core Processor
│ bbb48afeb7.amd64 │ 5cf807d1ea.amd64 │
│ sec/op │ sec/op vs base │
ChaCha8-32 1.862n ± 2% 1.861n ± 2% ~ (p=0.825 n=20)
PCG_DXSM-32 1.471n ± 1% 1.460n ± 2% ~ (p=0.153 n=20)
SourceUint64-32 1.636n ± 2% 1.582n ± 1% -3.30% (p=0.000 n=20)
GlobalInt64-32 2.087n ± 1% 3.663n ± 1% +75.54% (p=0.000 n=20)
GlobalInt64Parallel-32 0.1042n ± 1% 0.2026n ± 1% +94.48% (p=0.000 n=20)
GlobalUint64-32 2.263n ± 2% 3.724n ± 1% +64.57% (p=0.000 n=20)
GlobalUint64Parallel-32 0.1019n ± 1% 0.1973n ± 1% +93.67% (p=0.000 n=20)
Int64-32 1.771n ± 1% 1.774n ± 1% ~ (p=0.449 n=20)
Uint64-32 1.863n ± 2% 1.866n ± 1% ~ (p=0.364 n=20)
GlobalIntN1000-32 3.134n ± 3% 4.730n ± 2% +50.95% (p=0.000 n=20)
IntN1000-32 2.489n ± 1% 2.489n ± 1% ~ (p=0.683 n=20)
Int64N1000-32 2.521n ± 1% 2.516n ± 1% ~ (p=0.394 n=20)
Int64N1e8-32 2.479n ± 1% 2.478n ± 2% ~ (p=0.743 n=20)
Int64N1e9-32 2.530n ± 2% 2.514n ± 2% ~ (p=0.193 n=20)
Int64N2e9-32 2.501n ± 1% 2.494n ± 1% ~ (p=0.616 n=20)
Int64N1e18-32 3.227n ± 1% 3.205n ± 1% ~ (p=0.101 n=20)
Int64N2e18-32 3.647n ± 1% 3.599n ± 1% ~ (p=0.019 n=20)
Int64N4e18-32 5.135n ± 1% 5.069n ± 2% ~ (p=0.034 n=20)
Int32N1000-32 2.657n ± 1% 2.637n ± 1% ~ (p=0.180 n=20)
Int32N1e8-32 2.636n ± 1% 2.636n ± 1% ~ (p=0.763 n=20)
Int32N1e9-32 2.660n ± 2% 2.638n ± 1% ~ (p=0.358 n=20)
Int32N2e9-32 2.662n ± 2% 2.618n ± 2% ~ (p=0.064 n=20)
Float32-32 2.272n ± 2% 2.239n ± 2% ~ (p=0.194 n=20)
Float64-32 2.272n ± 1% 2.286n ± 2% ~ (p=0.763 n=20)
ExpFloat64-32 3.762n ± 1% 3.744n ± 1% ~ (p=0.171 n=20)
NormFloat64-32 3.706n ± 1% 3.655n ± 2% ~ (p=0.066 n=20)
Perm3-32 32.93n ± 3% 34.62n ± 1% +5.13% (p=0.000 n=20)
Perm30-32 202.9n ± 1% 204.0n ± 1% ~ (p=0.482 n=20)
Perm30ViaShuffle-32 115.0n ± 1% 114.9n ± 1% ~ (p=0.358 n=20)
ShuffleOverhead-32 112.8n ± 1% 112.7n ± 1% ~ (p=0.692 n=20)
Concurrent-32 2.107n ± 0% 3.725n ± 1% +76.75% (p=0.000 n=20)
goos: darwin
goarch: arm64
pkg: math/rand/v2
│ bbb48afeb7.arm64 │ 5cf807d1ea.arm64 │
│ sec/op │ sec/op vs base │
ChaCha8-8 2.480n ± 0% 2.429n ± 0% -2.04% (p=0.000 n=20)
PCG_DXSM-8 2.531n ± 0% 2.530n ± 0% ~ (p=0.877 n=20)
SourceUint64-8 2.534n ± 0% 2.533n ± 0% ~ (p=0.732 n=20)
GlobalInt64-8 2.172n ± 1% 4.794n ± 0% +120.67% (p=0.000 n=20)
GlobalInt64Parallel-8 0.4320n ± 0% 0.9605n ± 0% +122.32% (p=0.000 n=20)
GlobalUint64-8 2.182n ± 0% 4.770n ± 0% +118.58% (p=0.000 n=20)
GlobalUint64Parallel-8 0.4307n ± 0% 0.9583n ± 0% +122.51% (p=0.000 n=20)
Int64-8 4.107n ± 0% 4.104n ± 0% ~ (p=0.416 n=20)
Uint64-8 4.080n ± 0% 4.080n ± 0% ~ (p=0.052 n=20)
GlobalIntN1000-8 2.814n ± 2% 5.643n ± 0% +100.50% (p=0.000 n=20)
IntN1000-8 4.141n ± 0% 4.139n ± 0% ~ (p=0.140 n=20)
Int64N1000-8 4.140n ± 0% 4.140n ± 0% ~ (p=0.313 n=20)
Int64N1e8-8 4.140n ± 0% 4.139n ± 0% ~ (p=0.103 n=20)
Int64N1e9-8 4.139n ± 0% 4.140n ± 0% ~ (p=0.761 n=20)
Int64N2e9-8 4.140n ± 0% 4.140n ± 0% ~ (p=0.636 n=20)
Int64N1e18-8 5.266n ± 0% 5.326n ± 1% +1.14% (p=0.001 n=20)
Int64N2e18-8 6.052n ± 0% 6.167n ± 0% +1.90% (p=0.000 n=20)
Int64N4e18-8 8.826n ± 0% 9.051n ± 0% +2.55% (p=0.000 n=20)
Int32N1000-8 4.127n ± 0% 4.132n ± 0% +0.12% (p=0.000 n=20)
Int32N1e8-8 4.126n ± 0% 4.131n ± 0% +0.12% (p=0.000 n=20)
Int32N1e9-8 4.127n ± 0% 4.132n ± 0% +0.12% (p=0.000 n=20)
Int32N2e9-8 4.132n ± 0% 4.131n ± 0% ~ (p=0.017 n=20)
Float32-8 4.109n ± 0% 4.105n ± 0% ~ (p=0.379 n=20)
Float64-8 4.107n ± 0% 4.106n ± 0% ~ (p=0.867 n=20)
ExpFloat64-8 5.339n ± 0% 5.383n ± 0% +0.82% (p=0.000 n=20)
NormFloat64-8 5.735n ± 0% 5.737n ± 1% ~ (p=0.856 n=20)
Perm3-8 26.65n ± 0% 26.80n ± 1% +0.58% (p=0.000 n=20)
Perm30-8 194.8n ± 1% 197.0n ± 0% +1.18% (p=0.000 n=20)
Perm30ViaShuffle-8 156.6n ± 0% 157.6n ± 1% +0.61% (p=0.000 n=20)
ShuffleOverhead-8 124.9n ± 0% 125.5n ± 0% +0.52% (p=0.000 n=20)
Concurrent-8 2.434n ± 3% 5.066n ± 0% +108.09% (p=0.000 n=20)
goos: linux
goarch: 386
pkg: math/rand/v2
cpu: AMD Ryzen 9 7950X 16-Core Processor
│ bbb48afeb7.386 │ 5cf807d1ea.386 │
│ sec/op │ sec/op vs base │
ChaCha8-32 11.295n ± 1% 4.748n ± 2% -57.96% (p=0.000 n=20)
PCG_DXSM-32 7.693n ± 1% 7.738n ± 2% ~ (p=0.542 n=20)
SourceUint64-32 7.658n ± 2% 7.622n ± 2% ~ (p=0.344 n=20)
GlobalInt64-32 3.473n ± 2% 7.526n ± 2% +116.73% (p=0.000 n=20)
GlobalInt64Parallel-32 0.3198n ± 0% 0.5444n ± 0% +70.22% (p=0.000 n=20)
GlobalUint64-32 3.612n ± 0% 7.575n ± 1% +109.69% (p=0.000 n=20)
GlobalUint64Parallel-32 0.3168n ± 0% 0.5403n ± 0% +70.51% (p=0.000 n=20)
Int64-32 7.673n ± 2% 7.789n ± 1% ~ (p=0.122 n=20)
Uint64-32 7.773n ± 1% 7.827n ± 2% ~ (p=0.920 n=20)
GlobalIntN1000-32 6.268n ± 1% 9.581n ± 1% +52.87% (p=0.000 n=20)
IntN1000-32 10.33n ± 2% 10.45n ± 1% ~ (p=0.233 n=20)
Int64N1000-32 10.98n ± 2% 11.01n ± 1% ~ (p=0.401 n=20)
Int64N1e8-32 11.19n ± 2% 10.97n ± 1% ~ (p=0.033 n=20)
Int64N1e9-32 11.06n ± 1% 11.08n ± 1% ~ (p=0.498 n=20)
Int64N2e9-32 11.10n ± 1% 11.01n ± 2% ~ (p=0.995 n=20)
Int64N1e18-32 15.23n ± 2% 15.04n ± 1% ~ (p=0.973 n=20)
Int64N2e18-32 15.89n ± 1% 15.85n ± 1% ~ (p=0.409 n=20)
Int64N4e18-32 18.96n ± 2% 19.34n ± 2% ~ (p=0.048 n=20)
Int32N1000-32 10.46n ± 2% 10.44n ± 2% ~ (p=0.480 n=20)
Int32N1e8-32 10.46n ± 2% 10.49n ± 2% ~ (p=0.951 n=20)
Int32N1e9-32 10.28n ± 2% 10.26n ± 1% ~ (p=0.431 n=20)
Int32N2e9-32 10.50n ± 2% 10.44n ± 2% ~ (p=0.249 n=20)
Float32-32 13.80n ± 2% 13.80n ± 2% ~ (p=0.751 n=20)
Float64-32 23.55n ± 2% 23.87n ± 0% ~ (p=0.408 n=20)
ExpFloat64-32 15.36n ± 1% 15.29n ± 2% ~ (p=0.316 n=20)
NormFloat64-32 13.57n ± 1% 13.79n ± 1% +1.66% (p=0.005 n=20)
Perm3-32 45.70n ± 2% 46.99n ± 2% +2.81% (p=0.001 n=20)
Perm30-32 399.0n ± 1% 403.8n ± 1% +1.19% (p=0.006 n=20)
Perm30ViaShuffle-32 349.0n ± 1% 350.4n ± 1% ~ (p=0.909 n=20)
ShuffleOverhead-32 322.3n ± 1% 323.8n ± 1% ~ (p=0.410 n=20)
Concurrent-32 3.331n ± 1% 7.312n ± 1% +119.50% (p=0.000 n=20)
For #61716.
Change-Id: Ibdddeed85c34d9ae397289dc899e04d4845f9ed2
Reviewed-on: https://go-review.googlesource.com/c/go/+/516860
Reviewed-by: Michael Pratt <mpratt@google.com>
Reviewed-by: Filippo Valsorda <filippo@golang.org>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2023-08-06 13:26:28 +10:00
|
|
|
h.hash0 = uint32(rand())
|
2020-09-04 17:47:44 +07:00
|
|
|
}
|
2018-01-27 12:38:13 +01:00
|
|
|
break search
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if h.flags&hashWriting == 0 {
|
2022-03-04 13:24:04 -05:00
|
|
|
fatal("concurrent map writes")
|
2018-01-27 12:38:13 +01:00
|
|
|
}
|
|
|
|
|
h.flags &^= hashWriting
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func growWork_faststr(t *maptype, h *hmap, bucket uintptr) {
|
|
|
|
|
// make sure we evacuate the oldbucket corresponding
|
|
|
|
|
// to the bucket we're about to use
|
|
|
|
|
evacuate_faststr(t, h, bucket&h.oldbucketmask())
|
|
|
|
|
|
|
|
|
|
// evacuate one more oldbucket to make progress on growing
|
|
|
|
|
if h.growing() {
|
|
|
|
|
evacuate_faststr(t, h, h.nevacuate)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
|
2023-04-25 19:14:05 -04:00
|
|
|
b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
|
2018-01-27 12:38:13 +01:00
|
|
|
newbit := h.noldbuckets()
|
|
|
|
|
if !evacuated(b) {
|
|
|
|
|
// TODO: reuse overflow buckets instead of using new ones, if there
|
|
|
|
|
// is no iterator using the old buckets. (If !oldIterator.)
|
|
|
|
|
|
|
|
|
|
// xy contains the x and y (low and high) evacuation destinations.
|
|
|
|
|
var xy [2]evacDst
|
|
|
|
|
x := &xy[0]
|
2023-04-25 19:14:05 -04:00
|
|
|
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
|
2018-01-27 12:38:13 +01:00
|
|
|
x.k = add(unsafe.Pointer(x.b), dataOffset)
|
2021-06-16 23:05:44 +00:00
|
|
|
x.e = add(x.k, bucketCnt*2*goarch.PtrSize)
|
2018-01-27 12:38:13 +01:00
|
|
|
|
|
|
|
|
if !h.sameSizeGrow() {
|
|
|
|
|
// Only calculate y pointers if we're growing bigger.
|
|
|
|
|
// Otherwise GC can see bad pointers.
|
|
|
|
|
y := &xy[1]
|
2023-04-25 19:14:05 -04:00
|
|
|
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
|
2018-01-27 12:38:13 +01:00
|
|
|
y.k = add(unsafe.Pointer(y.b), dataOffset)
|
2021-06-16 23:05:44 +00:00
|
|
|
y.e = add(y.k, bucketCnt*2*goarch.PtrSize)
|
2018-01-27 12:38:13 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for ; b != nil; b = b.overflow(t) {
|
|
|
|
|
k := add(unsafe.Pointer(b), dataOffset)
|
2021-06-16 23:05:44 +00:00
|
|
|
e := add(k, bucketCnt*2*goarch.PtrSize)
|
2023-04-25 19:14:05 -04:00
|
|
|
for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.ValueSize)) {
|
2018-01-27 12:38:13 +01:00
|
|
|
top := b.tophash[i]
|
2018-10-15 15:14:48 -07:00
|
|
|
if isEmpty(top) {
|
2018-01-27 12:38:13 +01:00
|
|
|
b.tophash[i] = evacuatedEmpty
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
if top < minTopHash {
|
|
|
|
|
throw("bad map state")
|
|
|
|
|
}
|
|
|
|
|
var useY uint8
|
|
|
|
|
if !h.sameSizeGrow() {
|
|
|
|
|
// Compute hash to make our evacuation decision (whether we need
|
2019-04-22 13:37:08 -07:00
|
|
|
// to send this key/elem to bucket x or bucket y).
|
2023-04-25 19:14:05 -04:00
|
|
|
hash := t.Hasher(k, uintptr(h.hash0))
|
2018-01-27 12:38:13 +01:00
|
|
|
if hash&newbit != 0 {
|
|
|
|
|
useY = 1
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
|
|
|
|
|
dst := &xy[useY] // evacuation destination
|
|
|
|
|
|
|
|
|
|
if dst.i == bucketCnt {
|
|
|
|
|
dst.b = h.newoverflow(t, dst.b)
|
|
|
|
|
dst.i = 0
|
|
|
|
|
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
|
2021-06-16 23:05:44 +00:00
|
|
|
dst.e = add(dst.k, bucketCnt*2*goarch.PtrSize)
|
2018-01-27 12:38:13 +01:00
|
|
|
}
|
|
|
|
|
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
|
|
|
|
|
|
|
|
|
|
// Copy key.
|
|
|
|
|
*(*string)(dst.k) = *(*string)(k)
|
|
|
|
|
|
2023-04-25 19:14:05 -04:00
|
|
|
typedmemmove(t.Elem, dst.e, e)
|
2018-01-27 12:38:13 +01:00
|
|
|
dst.i++
|
|
|
|
|
// These updates might push these pointers past the end of the
|
2019-04-22 13:37:08 -07:00
|
|
|
// key or elem arrays. That's ok, as we have the overflow pointer
|
2018-01-27 12:38:13 +01:00
|
|
|
// at the end of the bucket to protect against pointing past the
|
|
|
|
|
// end of the bucket.
|
2021-06-16 23:05:44 +00:00
|
|
|
dst.k = add(dst.k, 2*goarch.PtrSize)
|
2023-04-25 19:14:05 -04:00
|
|
|
dst.e = add(dst.e, uintptr(t.ValueSize))
|
2018-01-27 12:38:13 +01:00
|
|
|
}
|
|
|
|
|
}
|
2019-04-22 13:37:08 -07:00
|
|
|
// Unlink the overflow buckets & clear key/elem to help GC.
|
2023-04-25 19:14:05 -04:00
|
|
|
if h.flags&oldIterator == 0 && t.Bucket.PtrBytes != 0 {
|
|
|
|
|
b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
|
2018-01-27 12:38:13 +01:00
|
|
|
// Preserve b.tophash because the evacuation
|
|
|
|
|
// state is maintained there.
|
|
|
|
|
ptr := add(b, dataOffset)
|
2023-04-25 19:14:05 -04:00
|
|
|
n := uintptr(t.BucketSize) - dataOffset
|
2018-01-27 12:38:13 +01:00
|
|
|
memclrHasPointers(ptr, n)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if oldbucket == h.nevacuate {
|
|
|
|
|
advanceEvacuationMark(h, t, newbit)
|
|
|
|
|
}
|
|
|
|
|
}
|