mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
The spec carefully and consistently uses "key" and "element" as map terminology. The implementation, not so much. This change attempts to make the implementation consistently hew to the spec's terminology. Beyond consistency, this has the advantage of avoid some confusion and naming collisions, since v and value are very generic and commonly used terms. I believe that I found all everything, but there are a lot of non-obvious places for these to hide, and grepping for them is hard. Hopefully this change changes enough of them that we will start using elem going forward. Any remaining hidden cases can be removed ad hoc as they are discovered. The only externally-facing part of this change is in package reflect, where there is a minor doc change and a function parameter name change. Updates #27167 Change-Id: I2f2d78f16c360dc39007b9966d5c2046a29d3701 Reviewed-on: https://go-review.googlesource.com/c/go/+/174523 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
476 lines
14 KiB
Go
476 lines
14 KiB
Go
// Copyright 2018 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
package runtime
|
|
|
|
import (
|
|
"runtime/internal/sys"
|
|
"unsafe"
|
|
)
|
|
|
|
func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
|
|
if raceenabled && h != nil {
|
|
callerpc := getcallerpc()
|
|
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr))
|
|
}
|
|
if h == nil || h.count == 0 {
|
|
return unsafe.Pointer(&zeroVal[0])
|
|
}
|
|
if h.flags&hashWriting != 0 {
|
|
throw("concurrent map read and map write")
|
|
}
|
|
key := stringStructOf(&ky)
|
|
if h.B == 0 {
|
|
// One-bucket table.
|
|
b := (*bmap)(h.buckets)
|
|
if key.len < 32 {
|
|
// short key, doing lots of comparisons is ok
|
|
for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
|
|
k := (*stringStruct)(kptr)
|
|
if k.len != key.len || isEmpty(b.tophash[i]) {
|
|
if b.tophash[i] == emptyRest {
|
|
break
|
|
}
|
|
continue
|
|
}
|
|
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
|
|
}
|
|
}
|
|
return unsafe.Pointer(&zeroVal[0])
|
|
}
|
|
// long key, try not to do more comparisons than necessary
|
|
keymaybe := uintptr(bucketCnt)
|
|
for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
|
|
k := (*stringStruct)(kptr)
|
|
if k.len != key.len || isEmpty(b.tophash[i]) {
|
|
if b.tophash[i] == emptyRest {
|
|
break
|
|
}
|
|
continue
|
|
}
|
|
if k.str == key.str {
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
|
|
}
|
|
// check first 4 bytes
|
|
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
|
|
continue
|
|
}
|
|
// check last 4 bytes
|
|
if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
|
|
continue
|
|
}
|
|
if keymaybe != bucketCnt {
|
|
// Two keys are potential matches. Use hash to distinguish them.
|
|
goto dohash
|
|
}
|
|
keymaybe = i
|
|
}
|
|
if keymaybe != bucketCnt {
|
|
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
|
|
if memequal(k.str, key.str, uintptr(key.len)) {
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.elemsize))
|
|
}
|
|
}
|
|
return unsafe.Pointer(&zeroVal[0])
|
|
}
|
|
dohash:
|
|
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
|
|
m := bucketMask(h.B)
|
|
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
|
|
if c := h.oldbuckets; c != nil {
|
|
if !h.sameSizeGrow() {
|
|
// There used to be half as many buckets; mask down one more power of two.
|
|
m >>= 1
|
|
}
|
|
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
|
|
if !evacuated(oldb) {
|
|
b = oldb
|
|
}
|
|
}
|
|
top := tophash(hash)
|
|
for ; b != nil; b = b.overflow(t) {
|
|
for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
|
|
k := (*stringStruct)(kptr)
|
|
if k.len != key.len || b.tophash[i] != top {
|
|
continue
|
|
}
|
|
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
|
|
}
|
|
}
|
|
}
|
|
return unsafe.Pointer(&zeroVal[0])
|
|
}
|
|
|
|
func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
|
|
if raceenabled && h != nil {
|
|
callerpc := getcallerpc()
|
|
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
|
|
}
|
|
if h == nil || h.count == 0 {
|
|
return unsafe.Pointer(&zeroVal[0]), false
|
|
}
|
|
if h.flags&hashWriting != 0 {
|
|
throw("concurrent map read and map write")
|
|
}
|
|
key := stringStructOf(&ky)
|
|
if h.B == 0 {
|
|
// One-bucket table.
|
|
b := (*bmap)(h.buckets)
|
|
if key.len < 32 {
|
|
// short key, doing lots of comparisons is ok
|
|
for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
|
|
k := (*stringStruct)(kptr)
|
|
if k.len != key.len || isEmpty(b.tophash[i]) {
|
|
if b.tophash[i] == emptyRest {
|
|
break
|
|
}
|
|
continue
|
|
}
|
|
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
|
|
}
|
|
}
|
|
return unsafe.Pointer(&zeroVal[0]), false
|
|
}
|
|
// long key, try not to do more comparisons than necessary
|
|
keymaybe := uintptr(bucketCnt)
|
|
for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
|
|
k := (*stringStruct)(kptr)
|
|
if k.len != key.len || isEmpty(b.tophash[i]) {
|
|
if b.tophash[i] == emptyRest {
|
|
break
|
|
}
|
|
continue
|
|
}
|
|
if k.str == key.str {
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
|
|
}
|
|
// check first 4 bytes
|
|
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
|
|
continue
|
|
}
|
|
// check last 4 bytes
|
|
if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
|
|
continue
|
|
}
|
|
if keymaybe != bucketCnt {
|
|
// Two keys are potential matches. Use hash to distinguish them.
|
|
goto dohash
|
|
}
|
|
keymaybe = i
|
|
}
|
|
if keymaybe != bucketCnt {
|
|
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
|
|
if memequal(k.str, key.str, uintptr(key.len)) {
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.elemsize)), true
|
|
}
|
|
}
|
|
return unsafe.Pointer(&zeroVal[0]), false
|
|
}
|
|
dohash:
|
|
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
|
|
m := bucketMask(h.B)
|
|
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
|
|
if c := h.oldbuckets; c != nil {
|
|
if !h.sameSizeGrow() {
|
|
// There used to be half as many buckets; mask down one more power of two.
|
|
m >>= 1
|
|
}
|
|
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
|
|
if !evacuated(oldb) {
|
|
b = oldb
|
|
}
|
|
}
|
|
top := tophash(hash)
|
|
for ; b != nil; b = b.overflow(t) {
|
|
for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
|
|
k := (*stringStruct)(kptr)
|
|
if k.len != key.len || b.tophash[i] != top {
|
|
continue
|
|
}
|
|
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
|
|
}
|
|
}
|
|
}
|
|
return unsafe.Pointer(&zeroVal[0]), false
|
|
}
|
|
|
|
func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
|
|
if h == nil {
|
|
panic(plainError("assignment to entry in nil map"))
|
|
}
|
|
if raceenabled {
|
|
callerpc := getcallerpc()
|
|
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_faststr))
|
|
}
|
|
if h.flags&hashWriting != 0 {
|
|
throw("concurrent map writes")
|
|
}
|
|
key := stringStructOf(&s)
|
|
hash := t.key.alg.hash(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
|
|
|
|
// Set hashWriting after calling alg.hash for consistency with mapassign.
|
|
h.flags ^= hashWriting
|
|
|
|
if h.buckets == nil {
|
|
h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
|
|
}
|
|
|
|
again:
|
|
bucket := hash & bucketMask(h.B)
|
|
if h.growing() {
|
|
growWork_faststr(t, h, bucket)
|
|
}
|
|
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
|
|
top := tophash(hash)
|
|
|
|
var insertb *bmap
|
|
var inserti uintptr
|
|
var insertk unsafe.Pointer
|
|
|
|
bucketloop:
|
|
for {
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
|
if b.tophash[i] != top {
|
|
if isEmpty(b.tophash[i]) && insertb == nil {
|
|
insertb = b
|
|
inserti = i
|
|
}
|
|
if b.tophash[i] == emptyRest {
|
|
break bucketloop
|
|
}
|
|
continue
|
|
}
|
|
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
|
|
if k.len != key.len {
|
|
continue
|
|
}
|
|
if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
|
|
continue
|
|
}
|
|
// already have a mapping for key. Update it.
|
|
inserti = i
|
|
insertb = b
|
|
goto done
|
|
}
|
|
ovf := b.overflow(t)
|
|
if ovf == nil {
|
|
break
|
|
}
|
|
b = ovf
|
|
}
|
|
|
|
// Did not find mapping for key. Allocate new cell & add entry.
|
|
|
|
// If we hit the max load factor or we have too many overflow buckets,
|
|
// and we're not already in the middle of growing, start growing.
|
|
if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
|
|
hashGrow(t, h)
|
|
goto again // Growing the table invalidates everything, so try again
|
|
}
|
|
|
|
if insertb == nil {
|
|
// all current buckets are full, allocate a new one.
|
|
insertb = h.newoverflow(t, b)
|
|
inserti = 0 // not necessary, but avoids needlessly spilling inserti
|
|
}
|
|
insertb.tophash[inserti&(bucketCnt-1)] = top // mask inserti to avoid bounds checks
|
|
|
|
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*sys.PtrSize)
|
|
// store new key at insert position
|
|
*((*stringStruct)(insertk)) = *key
|
|
h.count++
|
|
|
|
done:
|
|
elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*sys.PtrSize+inserti*uintptr(t.elemsize))
|
|
if h.flags&hashWriting == 0 {
|
|
throw("concurrent map writes")
|
|
}
|
|
h.flags &^= hashWriting
|
|
return elem
|
|
}
|
|
|
|
func mapdelete_faststr(t *maptype, h *hmap, ky string) {
|
|
if raceenabled && h != nil {
|
|
callerpc := getcallerpc()
|
|
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_faststr))
|
|
}
|
|
if h == nil || h.count == 0 {
|
|
return
|
|
}
|
|
if h.flags&hashWriting != 0 {
|
|
throw("concurrent map writes")
|
|
}
|
|
|
|
key := stringStructOf(&ky)
|
|
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
|
|
|
|
// Set hashWriting after calling alg.hash for consistency with mapdelete
|
|
h.flags ^= hashWriting
|
|
|
|
bucket := hash & bucketMask(h.B)
|
|
if h.growing() {
|
|
growWork_faststr(t, h, bucket)
|
|
}
|
|
b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
|
|
bOrig := b
|
|
top := tophash(hash)
|
|
search:
|
|
for ; b != nil; b = b.overflow(t) {
|
|
for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
|
|
k := (*stringStruct)(kptr)
|
|
if k.len != key.len || b.tophash[i] != top {
|
|
continue
|
|
}
|
|
if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
|
|
continue
|
|
}
|
|
// Clear key's pointer.
|
|
k.str = nil
|
|
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
|
|
if t.elem.ptrdata != 0 {
|
|
memclrHasPointers(e, t.elem.size)
|
|
} else {
|
|
memclrNoHeapPointers(e, t.elem.size)
|
|
}
|
|
b.tophash[i] = emptyOne
|
|
// If the bucket now ends in a bunch of emptyOne states,
|
|
// change those to emptyRest states.
|
|
if i == bucketCnt-1 {
|
|
if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
|
|
goto notLast
|
|
}
|
|
} else {
|
|
if b.tophash[i+1] != emptyRest {
|
|
goto notLast
|
|
}
|
|
}
|
|
for {
|
|
b.tophash[i] = emptyRest
|
|
if i == 0 {
|
|
if b == bOrig {
|
|
break // beginning of initial bucket, we're done.
|
|
}
|
|
// Find previous bucket, continue at its last entry.
|
|
c := b
|
|
for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
|
|
}
|
|
i = bucketCnt - 1
|
|
} else {
|
|
i--
|
|
}
|
|
if b.tophash[i] != emptyOne {
|
|
break
|
|
}
|
|
}
|
|
notLast:
|
|
h.count--
|
|
break search
|
|
}
|
|
}
|
|
|
|
if h.flags&hashWriting == 0 {
|
|
throw("concurrent map writes")
|
|
}
|
|
h.flags &^= hashWriting
|
|
}
|
|
|
|
func growWork_faststr(t *maptype, h *hmap, bucket uintptr) {
|
|
// make sure we evacuate the oldbucket corresponding
|
|
// to the bucket we're about to use
|
|
evacuate_faststr(t, h, bucket&h.oldbucketmask())
|
|
|
|
// evacuate one more oldbucket to make progress on growing
|
|
if h.growing() {
|
|
evacuate_faststr(t, h, h.nevacuate)
|
|
}
|
|
}
|
|
|
|
func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
|
|
b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
|
|
newbit := h.noldbuckets()
|
|
if !evacuated(b) {
|
|
// TODO: reuse overflow buckets instead of using new ones, if there
|
|
// is no iterator using the old buckets. (If !oldIterator.)
|
|
|
|
// xy contains the x and y (low and high) evacuation destinations.
|
|
var xy [2]evacDst
|
|
x := &xy[0]
|
|
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
|
|
x.k = add(unsafe.Pointer(x.b), dataOffset)
|
|
x.e = add(x.k, bucketCnt*2*sys.PtrSize)
|
|
|
|
if !h.sameSizeGrow() {
|
|
// Only calculate y pointers if we're growing bigger.
|
|
// Otherwise GC can see bad pointers.
|
|
y := &xy[1]
|
|
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
|
|
y.k = add(unsafe.Pointer(y.b), dataOffset)
|
|
y.e = add(y.k, bucketCnt*2*sys.PtrSize)
|
|
}
|
|
|
|
for ; b != nil; b = b.overflow(t) {
|
|
k := add(unsafe.Pointer(b), dataOffset)
|
|
e := add(k, bucketCnt*2*sys.PtrSize)
|
|
for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 2*sys.PtrSize), add(e, uintptr(t.elemsize)) {
|
|
top := b.tophash[i]
|
|
if isEmpty(top) {
|
|
b.tophash[i] = evacuatedEmpty
|
|
continue
|
|
}
|
|
if top < minTopHash {
|
|
throw("bad map state")
|
|
}
|
|
var useY uint8
|
|
if !h.sameSizeGrow() {
|
|
// Compute hash to make our evacuation decision (whether we need
|
|
// to send this key/elem to bucket x or bucket y).
|
|
hash := t.key.alg.hash(k, uintptr(h.hash0))
|
|
if hash&newbit != 0 {
|
|
useY = 1
|
|
}
|
|
}
|
|
|
|
b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
|
|
dst := &xy[useY] // evacuation destination
|
|
|
|
if dst.i == bucketCnt {
|
|
dst.b = h.newoverflow(t, dst.b)
|
|
dst.i = 0
|
|
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
|
|
dst.e = add(dst.k, bucketCnt*2*sys.PtrSize)
|
|
}
|
|
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
|
|
|
|
// Copy key.
|
|
*(*string)(dst.k) = *(*string)(k)
|
|
|
|
typedmemmove(t.elem, dst.e, e)
|
|
dst.i++
|
|
// These updates might push these pointers past the end of the
|
|
// key or elem arrays. That's ok, as we have the overflow pointer
|
|
// at the end of the bucket to protect against pointing past the
|
|
// end of the bucket.
|
|
dst.k = add(dst.k, 2*sys.PtrSize)
|
|
dst.e = add(dst.e, uintptr(t.elemsize))
|
|
}
|
|
}
|
|
// Unlink the overflow buckets & clear key/elem to help GC.
|
|
if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
|
|
b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
|
|
// Preserve b.tophash because the evacuation
|
|
// state is maintained there.
|
|
ptr := add(b, dataOffset)
|
|
n := uintptr(t.bucketsize) - dataOffset
|
|
memclrHasPointers(ptr, n)
|
|
}
|
|
}
|
|
|
|
if oldbucket == h.nevacuate {
|
|
advanceEvacuationMark(h, t, newbit)
|
|
}
|
|
}
|