mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime: use MapMaxKeyBytes,MapMaxElemBytes,MapBucketCount of internal/abi
For #59670
Change-Id: I9265e033bf3a84c3dc7b4a5d52c0df9672435f0d
GitHub-Last-Rev: 8e4099095c
GitHub-Pull-Request: golang/go#64774
Reviewed-on: https://go-review.googlesource.com/c/go/+/550117
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Keith Randall <khr@google.com>
Reviewed-by: Than McIntosh <thanm@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
This commit is contained in:
parent
24070cf747
commit
fdb200834f
7 changed files with 137 additions and 140 deletions
|
|
@ -64,20 +64,12 @@ import (
|
|||
const (
|
||||
// Maximum number of key/elem pairs a bucket can hold.
|
||||
bucketCntBits = abi.MapBucketCountBits
|
||||
bucketCnt = abi.MapBucketCount
|
||||
|
||||
// Maximum average load of a bucket that triggers growth is bucketCnt*13/16 (about 80% full)
|
||||
// Because of minimum alignment rules, bucketCnt is known to be at least 8.
|
||||
// Represent as loadFactorNum/loadFactorDen, to allow integer math.
|
||||
loadFactorDen = 2
|
||||
loadFactorNum = loadFactorDen * bucketCnt * 13 / 16
|
||||
|
||||
// Maximum key or elem size to keep inline (instead of mallocing per element).
|
||||
// Must fit in a uint8.
|
||||
// Fast versions cannot handle big elems - the cutoff size for
|
||||
// fast versions in cmd/compile/internal/gc/walk.go must be at most this elem.
|
||||
maxKeySize = abi.MapMaxKeyBytes
|
||||
maxElemSize = abi.MapMaxElemBytes
|
||||
loadFactorNum = loadFactorDen * abi.MapBucketCount * 13 / 16
|
||||
|
||||
// data offset should be the size of the bmap struct, but needs to be
|
||||
// aligned correctly. For amd64p32 this means 64-bit alignment
|
||||
|
|
@ -152,7 +144,7 @@ type bmap struct {
|
|||
// tophash generally contains the top byte of the hash value
|
||||
// for each key in this bucket. If tophash[0] < minTopHash,
|
||||
// tophash[0] is a bucket evacuation state instead.
|
||||
tophash [bucketCnt]uint8
|
||||
tophash [abi.MapBucketCount]uint8
|
||||
// Followed by bucketCnt keys and then bucketCnt elems.
|
||||
// NOTE: packing all the keys together and then all the elems together makes the
|
||||
// code a bit more complicated than alternating key/elem/key/elem/... but it allows
|
||||
|
|
@ -431,7 +423,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
|
|||
top := tophash(hash)
|
||||
bucketloop:
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i := uintptr(0); i < bucketCnt; i++ {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
if b.tophash[i] != top {
|
||||
if b.tophash[i] == emptyRest {
|
||||
break bucketloop
|
||||
|
|
@ -443,7 +435,7 @@ bucketloop:
|
|||
k = *((*unsafe.Pointer)(k))
|
||||
}
|
||||
if t.Key.Equal(key, k) {
|
||||
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
if t.IndirectElem() {
|
||||
e = *((*unsafe.Pointer)(e))
|
||||
}
|
||||
|
|
@ -492,7 +484,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
|
|||
top := tophash(hash)
|
||||
bucketloop:
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i := uintptr(0); i < bucketCnt; i++ {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
if b.tophash[i] != top {
|
||||
if b.tophash[i] == emptyRest {
|
||||
break bucketloop
|
||||
|
|
@ -504,7 +496,7 @@ bucketloop:
|
|||
k = *((*unsafe.Pointer)(k))
|
||||
}
|
||||
if t.Key.Equal(key, k) {
|
||||
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
if t.IndirectElem() {
|
||||
e = *((*unsafe.Pointer)(e))
|
||||
}
|
||||
|
|
@ -536,7 +528,7 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe
|
|||
top := tophash(hash)
|
||||
bucketloop:
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i := uintptr(0); i < bucketCnt; i++ {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
if b.tophash[i] != top {
|
||||
if b.tophash[i] == emptyRest {
|
||||
break bucketloop
|
||||
|
|
@ -548,7 +540,7 @@ bucketloop:
|
|||
k = *((*unsafe.Pointer)(k))
|
||||
}
|
||||
if t.Key.Equal(key, k) {
|
||||
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
if t.IndirectElem() {
|
||||
e = *((*unsafe.Pointer)(e))
|
||||
}
|
||||
|
|
@ -618,12 +610,12 @@ again:
|
|||
var elem unsafe.Pointer
|
||||
bucketloop:
|
||||
for {
|
||||
for i := uintptr(0); i < bucketCnt; i++ {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
if b.tophash[i] != top {
|
||||
if isEmpty(b.tophash[i]) && inserti == nil {
|
||||
inserti = &b.tophash[i]
|
||||
insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
|
||||
elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
elem = add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
}
|
||||
if b.tophash[i] == emptyRest {
|
||||
break bucketloop
|
||||
|
|
@ -641,7 +633,7 @@ bucketloop:
|
|||
if t.NeedKeyUpdate() {
|
||||
typedmemmove(t.Key, k, key)
|
||||
}
|
||||
elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
elem = add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
goto done
|
||||
}
|
||||
ovf := b.overflow(t)
|
||||
|
|
@ -665,7 +657,7 @@ bucketloop:
|
|||
newb := h.newoverflow(t, b)
|
||||
inserti = &newb.tophash[0]
|
||||
insertk = add(unsafe.Pointer(newb), dataOffset)
|
||||
elem = add(insertk, bucketCnt*uintptr(t.KeySize))
|
||||
elem = add(insertk, abi.MapBucketCount*uintptr(t.KeySize))
|
||||
}
|
||||
|
||||
// store new key/elem at insert position
|
||||
|
|
@ -731,7 +723,7 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
|
|||
top := tophash(hash)
|
||||
search:
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i := uintptr(0); i < bucketCnt; i++ {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
if b.tophash[i] != top {
|
||||
if b.tophash[i] == emptyRest {
|
||||
break search
|
||||
|
|
@ -752,7 +744,7 @@ search:
|
|||
} else if t.Key.PtrBytes != 0 {
|
||||
memclrHasPointers(k, t.Key.Size_)
|
||||
}
|
||||
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
if t.IndirectElem() {
|
||||
*(*unsafe.Pointer)(e) = nil
|
||||
} else if t.Elem.PtrBytes != 0 {
|
||||
|
|
@ -765,7 +757,7 @@ search:
|
|||
// change those to emptyRest states.
|
||||
// It would be nice to make this a separate function, but
|
||||
// for loops are not currently inlineable.
|
||||
if i == bucketCnt-1 {
|
||||
if i == abi.MapBucketCount-1 {
|
||||
if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
|
||||
goto notLast
|
||||
}
|
||||
|
|
@ -784,7 +776,7 @@ search:
|
|||
c := b
|
||||
for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
|
||||
}
|
||||
i = bucketCnt - 1
|
||||
i = abi.MapBucketCount - 1
|
||||
} else {
|
||||
i--
|
||||
}
|
||||
|
|
@ -845,7 +837,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) {
|
|||
// decide where to start
|
||||
r := uintptr(rand())
|
||||
it.startBucket = r & bucketMask(h.B)
|
||||
it.offset = uint8(r >> h.B & (bucketCnt - 1))
|
||||
it.offset = uint8(r >> h.B & (abi.MapBucketCount - 1))
|
||||
|
||||
// iterator state
|
||||
it.bucket = it.startBucket
|
||||
|
|
@ -906,8 +898,8 @@ next:
|
|||
}
|
||||
i = 0
|
||||
}
|
||||
for ; i < bucketCnt; i++ {
|
||||
offi := (i + it.offset) & (bucketCnt - 1)
|
||||
for ; i < abi.MapBucketCount; i++ {
|
||||
offi := (i + it.offset) & (abi.MapBucketCount - 1)
|
||||
if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty {
|
||||
// TODO: emptyRest is hard to use here, as we start iterating
|
||||
// in the middle of a bucket. It's feasible, just tricky.
|
||||
|
|
@ -917,7 +909,7 @@ next:
|
|||
if t.IndirectKey() {
|
||||
k = *((*unsafe.Pointer)(k))
|
||||
}
|
||||
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize))
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize))
|
||||
if checkBucket != noCheck && !h.sameSizeGrow() {
|
||||
// Special case: iterator was started during a grow to a larger size
|
||||
// and the grow is not done yet. We're working on a bucket whose
|
||||
|
|
@ -1008,7 +1000,7 @@ func mapclear(t *maptype, h *hmap) {
|
|||
for i := uintptr(0); i <= mask; i++ {
|
||||
b := (*bmap)(add(bucket, i*uintptr(t.BucketSize)))
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i := uintptr(0); i < bucketCnt; i++ {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
b.tophash[i] = emptyRest
|
||||
}
|
||||
}
|
||||
|
|
@ -1095,7 +1087,7 @@ func hashGrow(t *maptype, h *hmap) {
|
|||
|
||||
// overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
|
||||
func overLoadFactor(count int, B uint8) bool {
|
||||
return count > bucketCnt && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
|
||||
return count > abi.MapBucketCount && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
|
||||
}
|
||||
|
||||
// tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets.
|
||||
|
|
@ -1173,7 +1165,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
|
|||
x := &xy[0]
|
||||
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
|
||||
x.k = add(unsafe.Pointer(x.b), dataOffset)
|
||||
x.e = add(x.k, bucketCnt*uintptr(t.KeySize))
|
||||
x.e = add(x.k, abi.MapBucketCount*uintptr(t.KeySize))
|
||||
|
||||
if !h.sameSizeGrow() {
|
||||
// Only calculate y pointers if we're growing bigger.
|
||||
|
|
@ -1181,13 +1173,13 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
|
|||
y := &xy[1]
|
||||
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
|
||||
y.k = add(unsafe.Pointer(y.b), dataOffset)
|
||||
y.e = add(y.k, bucketCnt*uintptr(t.KeySize))
|
||||
y.e = add(y.k, abi.MapBucketCount*uintptr(t.KeySize))
|
||||
}
|
||||
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
k := add(unsafe.Pointer(b), dataOffset)
|
||||
e := add(k, bucketCnt*uintptr(t.KeySize))
|
||||
for i := 0; i < bucketCnt; i, k, e = i+1, add(k, uintptr(t.KeySize)), add(e, uintptr(t.ValueSize)) {
|
||||
e := add(k, abi.MapBucketCount*uintptr(t.KeySize))
|
||||
for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, uintptr(t.KeySize)), add(e, uintptr(t.ValueSize)) {
|
||||
top := b.tophash[i]
|
||||
if isEmpty(top) {
|
||||
b.tophash[i] = evacuatedEmpty
|
||||
|
|
@ -1233,13 +1225,13 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
|
|||
b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY
|
||||
dst := &xy[useY] // evacuation destination
|
||||
|
||||
if dst.i == bucketCnt {
|
||||
if dst.i == abi.MapBucketCount {
|
||||
dst.b = h.newoverflow(t, dst.b)
|
||||
dst.i = 0
|
||||
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
|
||||
dst.e = add(dst.k, bucketCnt*uintptr(t.KeySize))
|
||||
dst.e = add(dst.k, abi.MapBucketCount*uintptr(t.KeySize))
|
||||
}
|
||||
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
|
||||
dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
|
||||
if t.IndirectKey() {
|
||||
*(*unsafe.Pointer)(dst.k) = k2 // copy pointer
|
||||
} else {
|
||||
|
|
@ -1307,18 +1299,18 @@ func reflect_makemap(t *maptype, cap int) *hmap {
|
|||
if t.Key.Equal == nil {
|
||||
throw("runtime.reflect_makemap: unsupported map key type")
|
||||
}
|
||||
if t.Key.Size_ > maxKeySize && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) ||
|
||||
t.Key.Size_ <= maxKeySize && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) {
|
||||
if t.Key.Size_ > abi.MapMaxKeyBytes && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) ||
|
||||
t.Key.Size_ <= abi.MapMaxKeyBytes && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) {
|
||||
throw("key size wrong")
|
||||
}
|
||||
if t.Elem.Size_ > maxElemSize && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) ||
|
||||
t.Elem.Size_ <= maxElemSize && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) {
|
||||
if t.Elem.Size_ > abi.MapMaxElemBytes && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) ||
|
||||
t.Elem.Size_ <= abi.MapMaxElemBytes && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) {
|
||||
throw("elem size wrong")
|
||||
}
|
||||
if t.Key.Align_ > bucketCnt {
|
||||
if t.Key.Align_ > abi.MapBucketCount {
|
||||
throw("key align too big")
|
||||
}
|
||||
if t.Elem.Align_ > bucketCnt {
|
||||
if t.Elem.Align_ > abi.MapBucketCount {
|
||||
throw("elem align too big")
|
||||
}
|
||||
if t.Key.Size_%uintptr(t.Key.Align_) != 0 {
|
||||
|
|
@ -1327,7 +1319,7 @@ func reflect_makemap(t *maptype, cap int) *hmap {
|
|||
if t.Elem.Size_%uintptr(t.Elem.Align_) != 0 {
|
||||
throw("elem size not a multiple of elem align")
|
||||
}
|
||||
if bucketCnt < 8 {
|
||||
if abi.MapBucketCount < 8 {
|
||||
throw("bucketsize too small for proper alignment")
|
||||
}
|
||||
if dataOffset%uintptr(t.Key.Align_) != 0 {
|
||||
|
|
@ -1452,26 +1444,26 @@ func mapclone(m any) any {
|
|||
// moveToBmap moves a bucket from src to dst. It returns the destination bucket or new destination bucket if it overflows
|
||||
// and the pos that the next key/value will be written, if pos == bucketCnt means needs to written in overflow bucket.
|
||||
func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int) {
|
||||
for i := 0; i < bucketCnt; i++ {
|
||||
for i := 0; i < abi.MapBucketCount; i++ {
|
||||
if isEmpty(src.tophash[i]) {
|
||||
continue
|
||||
}
|
||||
|
||||
for ; pos < bucketCnt; pos++ {
|
||||
for ; pos < abi.MapBucketCount; pos++ {
|
||||
if isEmpty(dst.tophash[pos]) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if pos == bucketCnt {
|
||||
if pos == abi.MapBucketCount {
|
||||
dst = h.newoverflow(t, dst)
|
||||
pos = 0
|
||||
}
|
||||
|
||||
srcK := add(unsafe.Pointer(src), dataOffset+uintptr(i)*uintptr(t.KeySize))
|
||||
srcEle := add(unsafe.Pointer(src), dataOffset+bucketCnt*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize))
|
||||
srcEle := add(unsafe.Pointer(src), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize))
|
||||
dstK := add(unsafe.Pointer(dst), dataOffset+uintptr(pos)*uintptr(t.KeySize))
|
||||
dstEle := add(unsafe.Pointer(dst), dataOffset+bucketCnt*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize))
|
||||
dstEle := add(unsafe.Pointer(dst), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize))
|
||||
|
||||
dst.tophash[pos] = src.tophash[i]
|
||||
if t.IndirectKey() {
|
||||
|
|
@ -1575,7 +1567,7 @@ func mapclone2(t *maptype, src *hmap) *hmap {
|
|||
// Process entries one at a time.
|
||||
for srcBmap != nil {
|
||||
// move from oldBlucket to new bucket
|
||||
for i := uintptr(0); i < bucketCnt; i++ {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
if isEmpty(srcBmap.tophash[i]) {
|
||||
continue
|
||||
}
|
||||
|
|
@ -1589,7 +1581,7 @@ func mapclone2(t *maptype, src *hmap) *hmap {
|
|||
srcK = *((*unsafe.Pointer)(srcK))
|
||||
}
|
||||
|
||||
srcEle := add(unsafe.Pointer(srcBmap), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
srcEle := add(unsafe.Pointer(srcBmap), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
if t.IndirectElem() {
|
||||
srcEle = *((*unsafe.Pointer)(srcEle))
|
||||
}
|
||||
|
|
@ -1615,7 +1607,7 @@ func keys(m any, p unsafe.Pointer) {
|
|||
}
|
||||
s := (*slice)(p)
|
||||
r := int(rand())
|
||||
offset := uint8(r >> h.B & (bucketCnt - 1))
|
||||
offset := uint8(r >> h.B & (abi.MapBucketCount - 1))
|
||||
if h.B == 0 {
|
||||
copyKeys(t, h, (*bmap)(h.buckets), s, offset)
|
||||
return
|
||||
|
|
@ -1644,8 +1636,8 @@ func keys(m any, p unsafe.Pointer) {
|
|||
|
||||
func copyKeys(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
|
||||
for b != nil {
|
||||
for i := uintptr(0); i < bucketCnt; i++ {
|
||||
offi := (i + uintptr(offset)) & (bucketCnt - 1)
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
offi := (i + uintptr(offset)) & (abi.MapBucketCount - 1)
|
||||
if isEmpty(b.tophash[offi]) {
|
||||
continue
|
||||
}
|
||||
|
|
@ -1678,7 +1670,7 @@ func values(m any, p unsafe.Pointer) {
|
|||
}
|
||||
s := (*slice)(p)
|
||||
r := int(rand())
|
||||
offset := uint8(r >> h.B & (bucketCnt - 1))
|
||||
offset := uint8(r >> h.B & (abi.MapBucketCount - 1))
|
||||
if h.B == 0 {
|
||||
copyValues(t, h, (*bmap)(h.buckets), s, offset)
|
||||
return
|
||||
|
|
@ -1707,8 +1699,8 @@ func values(m any, p unsafe.Pointer) {
|
|||
|
||||
func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
|
||||
for b != nil {
|
||||
for i := uintptr(0); i < bucketCnt; i++ {
|
||||
offi := (i + uintptr(offset)) & (bucketCnt - 1)
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
offi := (i + uintptr(offset)) & (abi.MapBucketCount - 1)
|
||||
if isEmpty(b.tophash[offi]) {
|
||||
continue
|
||||
}
|
||||
|
|
@ -1717,7 +1709,7 @@ func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
|
|||
fatal("concurrent map read and map write")
|
||||
}
|
||||
|
||||
ele := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+offi*uintptr(t.ValueSize))
|
||||
ele := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+offi*uintptr(t.ValueSize))
|
||||
if t.IndirectElem() {
|
||||
ele = *((*unsafe.Pointer)(ele))
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue