all: remove GOEXPERIMENT=swissmap

For #54766.

Change-Id: I6a6a636c40b5fe2e8b0d4a5e23933492bc8bb76e
Reviewed-on: https://go-review.googlesource.com/c/go/+/691595
Reviewed-by: Keith Randall <khr@google.com>
Auto-Submit: Michael Pratt <mpratt@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Keith Randall <khr@golang.org>
This commit is contained in:
Michael Pratt 2025-07-25 15:35:36 -04:00 committed by Gopher Robot
parent cc571dab91
commit 2ae059ccaf
71 changed files with 762 additions and 6140 deletions

View file

@ -104,12 +104,10 @@ func Main(archInit func(*ssagen.ArchInfo)) {
ir.Pkgs.Runtime = types.NewPkg("go.runtime", "runtime")
ir.Pkgs.Runtime.Prefix = "runtime"
if buildcfg.Experiment.SwissMap {
// Pseudo-package that contains the compiler's builtin
// declarations for maps.
ir.Pkgs.InternalMaps = types.NewPkg("go.internal/runtime/maps", "internal/runtime/maps")
ir.Pkgs.InternalMaps.Prefix = "internal/runtime/maps"
}
// pseudo-packages used in symbol tables
ir.Pkgs.Itab = types.NewPkg("go.itab", "go.itab")

View file

@ -1,305 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package reflectdata
import (
"internal/abi"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/rttype"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
)
// OldMapBucketType makes the map bucket type given the type of the map.
func OldMapBucketType(t *types.Type) *types.Type {
// Builds a type representing a Bucket structure for
// the given map type. This type is not visible to users -
// we include only enough information to generate a correct GC
// program for it.
// Make sure this stays in sync with runtime/map.go.
//
// A "bucket" is a "struct" {
// tophash [abi.OldMapBucketCount]uint8
// keys [abi.OldMapBucketCount]keyType
// elems [abi.OldMapBucketCount]elemType
// overflow *bucket
// }
if t.MapType().OldBucket != nil {
return t.MapType().OldBucket
}
keytype := t.Key()
elemtype := t.Elem()
types.CalcSize(keytype)
types.CalcSize(elemtype)
if keytype.Size() > abi.OldMapMaxKeyBytes {
keytype = types.NewPtr(keytype)
}
if elemtype.Size() > abi.OldMapMaxElemBytes {
elemtype = types.NewPtr(elemtype)
}
field := make([]*types.Field, 0, 5)
// The first field is: uint8 topbits[BUCKETSIZE].
arr := types.NewArray(types.Types[types.TUINT8], abi.OldMapBucketCount)
field = append(field, makefield("topbits", arr))
arr = types.NewArray(keytype, abi.OldMapBucketCount)
arr.SetNoalg(true)
keys := makefield("keys", arr)
field = append(field, keys)
arr = types.NewArray(elemtype, abi.OldMapBucketCount)
arr.SetNoalg(true)
elems := makefield("elems", arr)
field = append(field, elems)
// If keys and elems have no pointers, the map implementation
// can keep a list of overflow pointers on the side so that
// buckets can be marked as having no pointers.
// Arrange for the bucket to have no pointers by changing
// the type of the overflow field to uintptr in this case.
// See comment on hmap.overflow in runtime/map.go.
otyp := types.Types[types.TUNSAFEPTR]
if !elemtype.HasPointers() && !keytype.HasPointers() {
otyp = types.Types[types.TUINTPTR]
}
overflow := makefield("overflow", otyp)
field = append(field, overflow)
// link up fields
bucket := types.NewStruct(field[:])
bucket.SetNoalg(true)
types.CalcSize(bucket)
// Check invariants that map code depends on.
if !types.IsComparable(t.Key()) {
base.Fatalf("unsupported map key type for %v", t)
}
if abi.OldMapBucketCount < 8 {
base.Fatalf("bucket size %d too small for proper alignment %d", abi.OldMapBucketCount, 8)
}
if uint8(keytype.Alignment()) > abi.OldMapBucketCount {
base.Fatalf("key align too big for %v", t)
}
if uint8(elemtype.Alignment()) > abi.OldMapBucketCount {
base.Fatalf("elem align %d too big for %v, BUCKETSIZE=%d", elemtype.Alignment(), t, abi.OldMapBucketCount)
}
if keytype.Size() > abi.OldMapMaxKeyBytes {
base.Fatalf("key size too large for %v", t)
}
if elemtype.Size() > abi.OldMapMaxElemBytes {
base.Fatalf("elem size too large for %v", t)
}
if t.Key().Size() > abi.OldMapMaxKeyBytes && !keytype.IsPtr() {
base.Fatalf("key indirect incorrect for %v", t)
}
if t.Elem().Size() > abi.OldMapMaxElemBytes && !elemtype.IsPtr() {
base.Fatalf("elem indirect incorrect for %v", t)
}
if keytype.Size()%keytype.Alignment() != 0 {
base.Fatalf("key size not a multiple of key align for %v", t)
}
if elemtype.Size()%elemtype.Alignment() != 0 {
base.Fatalf("elem size not a multiple of elem align for %v", t)
}
if uint8(bucket.Alignment())%uint8(keytype.Alignment()) != 0 {
base.Fatalf("bucket align not multiple of key align %v", t)
}
if uint8(bucket.Alignment())%uint8(elemtype.Alignment()) != 0 {
base.Fatalf("bucket align not multiple of elem align %v", t)
}
if keys.Offset%keytype.Alignment() != 0 {
base.Fatalf("bad alignment of keys in bmap for %v", t)
}
if elems.Offset%elemtype.Alignment() != 0 {
base.Fatalf("bad alignment of elems in bmap for %v", t)
}
// Double-check that overflow field is final memory in struct,
// with no padding at end.
if overflow.Offset != bucket.Size()-int64(types.PtrSize) {
base.Fatalf("bad offset of overflow in bmap for %v, overflow.Offset=%d, bucket.Size()-int64(types.PtrSize)=%d",
t, overflow.Offset, bucket.Size()-int64(types.PtrSize))
}
t.MapType().OldBucket = bucket
bucket.StructType().Map = t
return bucket
}
var oldHmapType *types.Type
// OldMapType returns a type interchangeable with runtime.hmap.
// Make sure this stays in sync with runtime/map.go.
func OldMapType() *types.Type {
if oldHmapType != nil {
return oldHmapType
}
// build a struct:
// type hmap struct {
// count int
// flags uint8
// B uint8
// noverflow uint16
// hash0 uint32
// buckets unsafe.Pointer
// oldbuckets unsafe.Pointer
// nevacuate uintptr
// clearSeq uint64
// extra unsafe.Pointer // *mapextra
// }
// must match runtime/map.go:hmap.
fields := []*types.Field{
makefield("count", types.Types[types.TINT]),
makefield("flags", types.Types[types.TUINT8]),
makefield("B", types.Types[types.TUINT8]),
makefield("noverflow", types.Types[types.TUINT16]),
makefield("hash0", types.Types[types.TUINT32]), // Used in walk.go for OMAKEMAP.
makefield("buckets", types.Types[types.TUNSAFEPTR]), // Used in walk.go for OMAKEMAP.
makefield("oldbuckets", types.Types[types.TUNSAFEPTR]),
makefield("nevacuate", types.Types[types.TUINTPTR]),
makefield("clearSeq", types.Types[types.TUINT64]),
makefield("extra", types.Types[types.TUNSAFEPTR]),
}
n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("hmap"))
hmap := types.NewNamed(n)
n.SetType(hmap)
n.SetTypecheck(1)
hmap.SetUnderlying(types.NewStruct(fields))
types.CalcSize(hmap)
// The size of hmap should be 56 bytes on 64 bit
// and 36 bytes on 32 bit platforms.
if size := int64(2*8 + 5*types.PtrSize); hmap.Size() != size {
base.Fatalf("hmap size not correct: got %d, want %d", hmap.Size(), size)
}
oldHmapType = hmap
return hmap
}
var oldHiterType *types.Type
// OldMapIterType returns a type interchangeable with runtime.hiter.
// Make sure this stays in sync with runtime/map.go.
func OldMapIterType() *types.Type {
if oldHiterType != nil {
return oldHiterType
}
hmap := OldMapType()
// build a struct:
// type hiter struct {
// key unsafe.Pointer // *Key
// elem unsafe.Pointer // *Elem
// t unsafe.Pointer // *OldMapType
// h *hmap
// buckets unsafe.Pointer
// bptr unsafe.Pointer // *bmap
// overflow unsafe.Pointer // *[]*bmap
// oldoverflow unsafe.Pointer // *[]*bmap
// startBucket uintptr
// offset uint8
// wrapped bool
// B uint8
// i uint8
// bucket uintptr
// checkBucket uintptr
// clearSeq uint64
// }
// must match runtime/map.go:hiter.
fields := []*types.Field{
makefield("key", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP.
makefield("elem", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP.
makefield("t", types.Types[types.TUNSAFEPTR]),
makefield("h", types.NewPtr(hmap)),
makefield("buckets", types.Types[types.TUNSAFEPTR]),
makefield("bptr", types.Types[types.TUNSAFEPTR]),
makefield("overflow", types.Types[types.TUNSAFEPTR]),
makefield("oldoverflow", types.Types[types.TUNSAFEPTR]),
makefield("startBucket", types.Types[types.TUINTPTR]),
makefield("offset", types.Types[types.TUINT8]),
makefield("wrapped", types.Types[types.TBOOL]),
makefield("B", types.Types[types.TUINT8]),
makefield("i", types.Types[types.TUINT8]),
makefield("bucket", types.Types[types.TUINTPTR]),
makefield("checkBucket", types.Types[types.TUINTPTR]),
makefield("clearSeq", types.Types[types.TUINT64]),
}
// build iterator struct holding the above fields
n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("hiter"))
hiter := types.NewNamed(n)
n.SetType(hiter)
n.SetTypecheck(1)
hiter.SetUnderlying(types.NewStruct(fields))
types.CalcSize(hiter)
if hiter.Size() != int64(8+12*types.PtrSize) {
base.Fatalf("hash_iter size not correct %d %d", hiter.Size(), 8+12*types.PtrSize)
}
oldHiterType = hiter
return hiter
}
func writeOldMapType(t *types.Type, lsym *obj.LSym, c rttype.Cursor) {
// internal/abi.OldMapType
s1 := writeType(t.Key())
s2 := writeType(t.Elem())
s3 := writeType(OldMapBucketType(t))
hasher := genhash(t.Key())
c.Field("Key").WritePtr(s1)
c.Field("Elem").WritePtr(s2)
c.Field("Bucket").WritePtr(s3)
c.Field("Hasher").WritePtr(hasher)
var flags uint32
// Note: flags must match maptype accessors in ../../../../runtime/type.go
// and maptype builder in ../../../../reflect/type.go:MapOf.
if t.Key().Size() > abi.OldMapMaxKeyBytes {
c.Field("KeySize").WriteUint8(uint8(types.PtrSize))
flags |= 1 // indirect key
} else {
c.Field("KeySize").WriteUint8(uint8(t.Key().Size()))
}
if t.Elem().Size() > abi.OldMapMaxElemBytes {
c.Field("ValueSize").WriteUint8(uint8(types.PtrSize))
flags |= 2 // indirect value
} else {
c.Field("ValueSize").WriteUint8(uint8(t.Elem().Size()))
}
c.Field("BucketSize").WriteUint16(uint16(OldMapBucketType(t).Size()))
if types.IsReflexive(t.Key()) {
flags |= 4 // reflexive key
}
if needkeyupdate(t.Key()) {
flags |= 8 // need key update
}
if hashMightPanic(t.Key()) {
flags |= 16 // hash might panic
}
c.Field("Flags").WriteUint32(flags)
if u := t.Underlying(); u != t {
// If t is a named map type, also keep the underlying map
// type live in the binary. This is important to make sure that
// a named map and that same map cast to its underlying type via
// reflection, use the same hash function. See issue 37716.
lsym.AddRel(base.Ctxt, obj.Reloc{Type: objabi.R_KEEP, Sym: writeType(u)})
}
}

View file

@ -8,7 +8,6 @@ import (
"encoding/binary"
"fmt"
"internal/abi"
"internal/buildcfg"
"slices"
"sort"
"strings"
@ -773,11 +772,7 @@ func writeType(t *types.Type) *obj.LSym {
rt = rttype.InterfaceType
dataAdd = len(imethods(t)) * int(rttype.IMethod.Size())
case types.TMAP:
if buildcfg.Experiment.SwissMap {
rt = rttype.SwissMapType
} else {
rt = rttype.OldMapType
}
rt = rttype.MapType
case types.TPTR:
rt = rttype.PtrType
// TODO: use rttype.Type for Elem() is ANY?
@ -877,11 +872,7 @@ func writeType(t *types.Type) *obj.LSym {
}
case types.TMAP:
if buildcfg.Experiment.SwissMap {
writeSwissMapType(t, lsym, c)
} else {
writeOldMapType(t, lsym, c)
}
case types.TPTR:
// internal/abi.PtrType

View file

@ -27,8 +27,7 @@ var ArrayType *types.Type
var ChanType *types.Type
var FuncType *types.Type
var InterfaceType *types.Type
var OldMapType *types.Type
var SwissMapType *types.Type
var MapType *types.Type
var PtrType *types.Type
var SliceType *types.Type
var StructType *types.Type
@ -55,8 +54,7 @@ func Init() {
ChanType = FromReflect(reflect.TypeOf(abi.ChanType{}))
FuncType = FromReflect(reflect.TypeOf(abi.FuncType{}))
InterfaceType = FromReflect(reflect.TypeOf(abi.InterfaceType{}))
OldMapType = FromReflect(reflect.TypeOf(abi.OldMapType{}))
SwissMapType = FromReflect(reflect.TypeOf(abi.SwissMapType{}))
MapType = FromReflect(reflect.TypeOf(abi.SwissMapType{}))
PtrType = FromReflect(reflect.TypeOf(abi.PtrType{}))
SliceType = FromReflect(reflect.TypeOf(abi.SliceType{}))
StructType = FromReflect(reflect.TypeOf(abi.StructType{}))

View file

@ -94,11 +94,7 @@ func InitConfig() {
_ = types.NewPtr(types.Types[types.TINT16]) // *int16
_ = types.NewPtr(types.Types[types.TINT64]) // *int64
_ = types.NewPtr(types.ErrorType) // *error
if buildcfg.Experiment.SwissMap {
_ = types.NewPtr(reflectdata.SwissMapType()) // *internal/runtime/maps.Map
} else {
_ = types.NewPtr(reflectdata.OldMapType()) // *runtime.hmap
}
_ = types.NewPtr(deferstruct()) // *runtime._defer
types.NewPtrCacheEnabled = false
ssaConfig = ssa.NewConfig(base.Ctxt.Arch.Name, *types_, base.Ctxt, base.Flag.N == 0, Arch.SoftFloat)
@ -3083,13 +3079,8 @@ func (s *state) exprCheckPtr(n ir.Node, checkPtrOK bool) *ssa.Value {
return v
}
// map <--> *hmap
var mt *types.Type
if buildcfg.Experiment.SwissMap {
mt = types.NewPtr(reflectdata.SwissMapType())
} else {
mt = types.NewPtr(reflectdata.OldMapType())
}
// map <--> *internal/runtime/maps.Map
mt := types.NewPtr(reflectdata.SwissMapType())
if to.Kind() == types.TMAP && from == mt {
return v
}
@ -5759,13 +5750,13 @@ func (s *state) referenceTypeBuiltin(n *ir.UnaryExpr, x *ssa.Value) *ssa.Value {
s.startBlock(bElse)
switch n.Op() {
case ir.OLEN:
if buildcfg.Experiment.SwissMap && n.X.Type().IsMap() {
// length is stored in the first word.
if n.X.Type().IsMap() {
// length is stored in the first word, but needs conversion to int.
loadType := reflectdata.SwissMapType().Field(0).Type // uint64
load := s.load(loadType, x)
s.vars[n] = s.conv(nil, load, loadType, lenType) // integer conversion doesn't need Node
} else {
// length is stored in the first word for map/chan
// length is stored in the first word for chan, no conversion needed.
s.vars[n] = s.load(lenType, x)
}
case ir.OCAP:

View file

@ -6,7 +6,6 @@ package test
import (
"bufio"
"internal/goexperiment"
"internal/testenv"
"io"
"math/bits"
@ -234,15 +233,6 @@ func TestIntendedInlining(t *testing.T) {
},
}
if !goexperiment.SwissMap {
// Maps
want["runtime"] = append(want["runtime"], "bucketMask")
want["runtime"] = append(want["runtime"], "bucketShift")
want["runtime"] = append(want["runtime"], "evacuated")
want["runtime"] = append(want["runtime"], "tophash")
want["runtime"] = append(want["runtime"], "(*bmap).keys")
want["runtime"] = append(want["runtime"], "(*bmap).overflow")
}
if runtime.GOARCH != "386" && runtime.GOARCH != "loong64" && runtime.GOARCH != "mips64" && runtime.GOARCH != "mips64le" && runtime.GOARCH != "riscv64" {
// nextFreeFast calls sys.TrailingZeros64, which on 386 is implemented in asm and is not inlinable.
// We currently don't have midstack inlining so nextFreeFast is also not inlinable on 386.

View file

@ -152,14 +152,12 @@ func mapassign_fast32ptr(mapType *byte, hmap map[any]any, key unsafe.Pointer) (v
func mapassign_fast64(mapType *byte, hmap map[any]any, key uint64) (val *any)
func mapassign_fast64ptr(mapType *byte, hmap map[any]any, key unsafe.Pointer) (val *any)
func mapassign_faststr(mapType *byte, hmap map[any]any, key string) (val *any)
func mapiterinit(mapType *byte, hmap map[any]any, hiter *any) // old maps
func mapIterStart(mapType *byte, hmap map[any]any, hiter *any) // swiss maps
func mapIterStart(mapType *byte, hmap map[any]any, hiter *any)
func mapdelete(mapType *byte, hmap map[any]any, key *any)
func mapdelete_fast32(mapType *byte, hmap map[any]any, key uint32)
func mapdelete_fast64(mapType *byte, hmap map[any]any, key uint64)
func mapdelete_faststr(mapType *byte, hmap map[any]any, key string)
func mapiternext(hiter *any) // old maps
func mapIterNext(hiter *any) // swiss maps
func mapIterNext(hiter *any)
func mapclear(mapType *byte, hmap map[any]any)
// *byte is really *runtime.Type

View file

@ -130,13 +130,11 @@ var runtimeDecls = [...]struct {
{"mapassign_fast64", funcTag, 85},
{"mapassign_fast64ptr", funcTag, 93},
{"mapassign_faststr", funcTag, 86},
{"mapiterinit", funcTag, 94},
{"mapIterStart", funcTag, 94},
{"mapdelete", funcTag, 94},
{"mapdelete_fast32", funcTag, 95},
{"mapdelete_fast64", funcTag, 96},
{"mapdelete_faststr", funcTag, 97},
{"mapiternext", funcTag, 98},
{"mapIterNext", funcTag, 98},
{"mapclear", funcTag, 99},
{"makechan64", funcTag, 101},

View file

@ -471,11 +471,9 @@ func tconv2(b *bytes.Buffer, t *Type, verb rune, mode fmtMode, visited map[*Type
case TSTRUCT:
if m := t.StructType().Map; m != nil {
mt := m.MapType()
// Format the bucket struct for map[x]y as map.bucket[x]y.
// Format the bucket struct for map[x]y as map.group[x]y.
// This avoids a recursive print that generates very long names.
switch t {
case mt.OldBucket:
b.WriteString("map.bucket[")
case mt.SwissGroup:
b.WriteString("map.group[")
default:

View file

@ -22,7 +22,7 @@ func TestSizeof(t *testing.T) {
}{
{Sym{}, 32, 64},
{Type{}, 60, 96},
{Map{}, 16, 32},
{Map{}, 12, 24},
{Forward{}, 20, 32},
{Func{}, 32, 56},
{Struct{}, 12, 24},

View file

@ -10,7 +10,6 @@ import (
"cmd/internal/src"
"fmt"
"go/constant"
"internal/buildcfg"
"internal/types/errors"
"sync"
)
@ -281,16 +280,6 @@ type Map struct {
Key *Type // Key type
Elem *Type // Val (elem) type
// Note: It would be cleaner to completely split Map into OldMap and
// SwissMap, but 99% of the types map code doesn't care about the
// implementation at all, so it is tons of churn to split the type.
// Only code that looks at the bucket field can care about the
// implementation.
// GOEXPERIMENT=noswissmap fields
OldBucket *Type // internal struct type representing a hash bucket
// GOEXPERIMENT=swissmap fields
SwissGroup *Type // internal struct type representing a slot group
}
@ -1189,13 +1178,12 @@ func (t *Type) cmp(x *Type) Cmp {
// by the general code after the switch.
case TSTRUCT:
if buildcfg.Experiment.SwissMap {
// Is this a map group type?
if t.StructType().Map == nil {
if x.StructType().Map != nil {
return CMPlt // nil < non-nil
}
// to the fallthrough
// to the general case
} else if x.StructType().Map == nil {
return CMPgt // nil > non-nil
}
@ -1204,22 +1192,6 @@ func (t *Type) cmp(x *Type) Cmp {
// to the group type (it uses unsafe.Pointer). If it
// did, this would need special handling to avoid
// infinite recursion.
} else {
// Is this a map bucket type?
if t.StructType().Map == nil {
if x.StructType().Map != nil {
return CMPlt // nil < non-nil
}
// to the fallthrough
} else if x.StructType().Map == nil {
return CMPgt // nil > non-nil
}
// Both have non-nil Map, fallthrough to the general
// case. Note that the map type does not directly refer
// to the bucket type (it uses unsafe.Pointer). If it
// did, this would need special handling to avoid
// infinite recursion.
}
tfs := t.Fields()
xfs := x.Fields()

View file

@ -9,7 +9,6 @@ import (
"go/constant"
"go/token"
"internal/abi"
"internal/buildcfg"
"strings"
"cmd/compile/internal/base"
@ -313,13 +312,6 @@ func walkMakeChan(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
// walkMakeMap walks an OMAKEMAP node.
func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
if buildcfg.Experiment.SwissMap {
return walkMakeSwissMap(n, init)
}
return walkMakeOldMap(n, init)
}
func walkMakeSwissMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
t := n.Type()
mapType := reflectdata.SwissMapType()
hint := n.Len
@ -366,12 +358,12 @@ func walkMakeSwissMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
empty := ir.NewBasicLit(base.Pos, types.UntypedInt, constant.MakeUint64(abi.SwissMapCtrlEmpty))
// g.ctrl = abi.SwissMapCtrlEmpty
csym := groupType.Field(0).Sym // g.ctrl see reflectdata/map_swiss.go
csym := groupType.Field(0).Sym // g.ctrl see reflectdata/map.go
ca := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, g, csym), empty)
nif.Body.Append(ca)
// m.dirPtr = g
dsym := mapType.Field(2).Sym // m.dirPtr see reflectdata/map_swiss.go
dsym := mapType.Field(2).Sym // m.dirPtr see reflectdata/map.go
na := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, m, dsym), typecheck.ConvNop(g, types.Types[types.TUNSAFEPTR]))
nif.Body.Append(na)
appendWalkStmt(init, nif)
@ -391,7 +383,7 @@ func walkMakeSwissMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
// m map has been allocated on the stack already.
// m.seed = uintptr(rand())
rand := mkcall("rand", types.Types[types.TUINT64], init)
seedSym := mapType.Field(1).Sym // m.seed see reflectdata/map_swiss.go
seedSym := mapType.Field(1).Sym // m.seed see reflectdata/map.go
appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, m, seedSym), typecheck.Conv(rand, types.Types[types.TUINTPTR])))
return typecheck.ConvNop(m, t)
}
@ -428,101 +420,6 @@ func walkMakeSwissMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
return mkcall1(fn, n.Type(), init, reflectdata.MakeMapRType(base.Pos, n), typecheck.Conv(hint, argtype), m)
}
func walkMakeOldMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
t := n.Type()
hmapType := reflectdata.OldMapType()
hint := n.Len
// var h *hmap
var h ir.Node
if n.Esc() == ir.EscNone {
// Allocate hmap on stack.
// var hv hmap
// h = &hv
h = stackTempAddr(init, hmapType)
// Allocate one bucket pointed to by hmap.buckets on stack if hint
// is not larger than BUCKETSIZE. In case hint is larger than
// BUCKETSIZE runtime.makemap will allocate the buckets on the heap.
// Maximum key and elem size is 128 bytes, larger objects
// are stored with an indirection. So max bucket size is 2048+eps.
if !ir.IsConst(hint, constant.Int) ||
constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.OldMapBucketCount)) {
// In case hint is larger than BUCKETSIZE runtime.makemap
// will allocate the buckets on the heap, see #20184
//
// if hint <= BUCKETSIZE {
// var bv bmap
// b = &bv
// h.buckets = b
// }
nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(base.Pos, abi.OldMapBucketCount)), nil, nil)
nif.Likely = true
// var bv bmap
// b = &bv
b := stackTempAddr(&nif.Body, reflectdata.OldMapBucketType(t))
// h.buckets = b
bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap
na := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, bsym), typecheck.ConvNop(b, types.Types[types.TUNSAFEPTR]))
nif.Body.Append(na)
appendWalkStmt(init, nif)
}
}
if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.OldMapBucketCount)) {
// Handling make(map[any]any) and
// make(map[any]any, hint) where hint <= BUCKETSIZE
// special allows for faster map initialization and
// improves binary size by using calls with fewer arguments.
// For hint <= BUCKETSIZE overLoadFactor(hint, 0) is false
// and no buckets will be allocated by makemap. Therefore,
// no buckets need to be allocated in this code path.
if n.Esc() == ir.EscNone {
// Only need to initialize h.hash0 since
// hmap h has been allocated on the stack already.
// h.hash0 = rand32()
rand := mkcall("rand32", types.Types[types.TUINT32], init)
hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap
appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, hashsym), rand))
return typecheck.ConvNop(h, t)
}
// Call runtime.makemap_small to allocate an
// hmap on the heap and initialize hmap's hash0 field.
fn := typecheck.LookupRuntime("makemap_small", t.Key(), t.Elem())
return mkcall1(fn, n.Type(), init)
}
if n.Esc() != ir.EscNone {
h = typecheck.NodNil()
}
// Map initialization with a variable or large hint is
// more complicated. We therefore generate a call to
// runtime.makemap to initialize hmap and allocate the
// map buckets.
// When hint fits into int, use makemap instead of
// makemap64, which is faster and shorter on 32 bit platforms.
fnname := "makemap64"
argtype := types.Types[types.TINT64]
// Type checking guarantees that TIDEAL hint is positive and fits in an int.
// See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function.
// The case of hint overflow when converting TUINT or TUINTPTR to TINT
// will be handled by the negative range checks in makemap during runtime.
if hint.Type().IsKind(types.TIDEAL) || hint.Type().Size() <= types.Types[types.TUINT].Size() {
fnname = "makemap"
argtype = types.Types[types.TINT]
}
fn := typecheck.LookupRuntime(fnname, hmapType, t.Key(), t.Elem())
return mkcall1(fn, n.Type(), init, reflectdata.MakeMapRType(base.Pos, n), typecheck.Conv(hint, argtype), h)
}
// walkMakeSlice walks an OMAKESLICE node.
func walkMakeSlice(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
len := n.Len

View file

@ -8,7 +8,6 @@ import (
"fmt"
"go/constant"
"internal/abi"
"internal/buildcfg"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
@ -967,12 +966,8 @@ func (o *orderState) stmt(n ir.Node) {
n.X = o.copyExpr(r)
// n.Prealloc is the temp for the iterator.
// MapIterType contains pointers and needs to be zeroed.
if buildcfg.Experiment.SwissMap {
// SwissMapIterType contains pointers and needs to be zeroed.
n.Prealloc = o.newTemp(reflectdata.SwissMapIterType(), true)
} else {
n.Prealloc = o.newTemp(reflectdata.OldMapIterType(), true)
}
}
n.Key = o.exprInPlace(n.Key)
n.Value = o.exprInPlace(n.Value)

View file

@ -6,7 +6,6 @@ package walk
import (
"go/constant"
"internal/buildcfg"
"unicode/utf8"
"cmd/compile/internal/base"
@ -247,20 +246,11 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
hit := nrange.Prealloc
th := hit.Type()
// depends on layout of iterator struct.
// See cmd/compile/internal/reflectdata/reflect.go:MapIterType
var keysym, elemsym *types.Sym
var iterInit, iterNext string
if buildcfg.Experiment.SwissMap {
keysym = th.Field(0).Sym
elemsym = th.Field(1).Sym // ditto
iterInit = "mapIterStart"
iterNext = "mapIterNext"
} else {
keysym = th.Field(0).Sym
elemsym = th.Field(1).Sym // ditto
iterInit = "mapiterinit"
iterNext = "mapiternext"
}
// See cmd/compile/internal/reflectdata/map.go:SwissMapIterType
keysym := th.Field(0).Sym
elemsym := th.Field(1).Sym // ditto
iterInit := "mapIterStart"
iterNext := "mapIterNext"
fn := typecheck.LookupRuntime(iterInit, t.Key(), t.Elem(), th)
init = append(init, mkcallstmt1(fn, reflectdata.RangeMapRType(base.Pos, nrange), ha, typecheck.NodAddr(hit)))

View file

@ -7,7 +7,6 @@ package walk
import (
"fmt"
"internal/abi"
"internal/buildcfg"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
@ -192,42 +191,7 @@ var mapassign = mkmapnames("mapassign", "ptr")
var mapdelete = mkmapnames("mapdelete", "")
func mapfast(t *types.Type) int {
if buildcfg.Experiment.SwissMap {
return mapfastSwiss(t)
}
return mapfastOld(t)
}
func mapfastSwiss(t *types.Type) int {
if t.Elem().Size() > abi.OldMapMaxElemBytes {
return mapslow
}
switch reflectdata.AlgType(t.Key()) {
case types.AMEM32:
if !t.Key().HasPointers() {
return mapfast32
}
if types.PtrSize == 4 {
return mapfast32ptr
}
base.Fatalf("small pointer %v", t.Key())
case types.AMEM64:
if !t.Key().HasPointers() {
return mapfast64
}
if types.PtrSize == 8 {
return mapfast64ptr
}
// Two-word object, at least one of which is a pointer.
// Use the slow path.
case types.ASTRING:
return mapfaststr
}
return mapslow
}
func mapfastOld(t *types.Type) int {
if t.Elem().Size() > abi.OldMapMaxElemBytes {
if t.Elem().Size() > abi.SwissMapMaxElemBytes {
return mapslow
}
switch reflectdata.AlgType(t.Key()) {

View file

@ -560,14 +560,10 @@ func (d *deadcodePass) decodetypeMethods(ldr *loader.Loader, arch *sys.Arch, sym
case abi.Chan: // reflect.chanType
off += 2 * arch.PtrSize
case abi.Map:
if buildcfg.Experiment.SwissMap {
off += 7*arch.PtrSize + 4 // internal/abi.SwissMapType
if arch.PtrSize == 8 {
off += 4 // padding for final uint32 field (Flags).
}
} else {
off += 4*arch.PtrSize + 8 // internal/abi.OldMapType
}
case abi.Interface: // reflect.interfaceType
off += 3 * arch.PtrSize
default:

View file

@ -872,14 +872,6 @@ func (d *dwctxt) mkinternaltype(ctxt *Link, abbrev int, typename, keyname, valna
}
func (d *dwctxt) synthesizemaptypes(ctxt *Link, die *dwarf.DWDie) {
if buildcfg.Experiment.SwissMap {
d.synthesizemaptypesSwiss(ctxt, die)
} else {
d.synthesizemaptypesOld(ctxt, die)
}
}
func (d *dwctxt) synthesizemaptypesSwiss(ctxt *Link, die *dwarf.DWDie) {
mapType := walktypedef(d.findprotodie(ctxt, "type:internal/runtime/maps.Map"))
tableType := walktypedef(d.findprotodie(ctxt, "type:internal/runtime/maps.table"))
groupsReferenceType := walktypedef(d.findprotodie(ctxt, "type:internal/runtime/maps.groupsReference"))
@ -941,102 +933,6 @@ func (d *dwctxt) synthesizemaptypesSwiss(ctxt *Link, die *dwarf.DWDie) {
}
}
func (d *dwctxt) synthesizemaptypesOld(ctxt *Link, die *dwarf.DWDie) {
hash := walktypedef(d.findprotodie(ctxt, "type:runtime.hmap"))
bucket := walktypedef(d.findprotodie(ctxt, "type:runtime.bmap"))
if hash == nil {
return
}
for ; die != nil; die = die.Link {
if die.Abbrev != dwarf.DW_ABRV_MAPTYPE {
continue
}
gotype := loader.Sym(getattr(die, dwarf.DW_AT_type).Data.(dwSym))
keytype := decodetypeMapKey(d.ldr, d.arch, gotype)
valtype := decodetypeMapValue(d.ldr, d.arch, gotype)
keydata := d.ldr.Data(keytype)
valdata := d.ldr.Data(valtype)
keysize, valsize := decodetypeSize(d.arch, keydata), decodetypeSize(d.arch, valdata)
keytype, valtype = d.walksymtypedef(d.defgotype(keytype)), d.walksymtypedef(d.defgotype(valtype))
// compute size info like hashmap.c does.
indirectKey, indirectVal := false, false
if keysize > abi.OldMapMaxKeyBytes {
keysize = int64(d.arch.PtrSize)
indirectKey = true
}
if valsize > abi.OldMapMaxElemBytes {
valsize = int64(d.arch.PtrSize)
indirectVal = true
}
// Construct type to represent an array of BucketSize keys
keyname := d.nameFromDIESym(keytype)
dwhks := d.mkinternaltype(ctxt, dwarf.DW_ABRV_ARRAYTYPE, "[]key", keyname, "", func(dwhk *dwarf.DWDie) {
newattr(dwhk, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, abi.OldMapBucketCount*keysize, 0)
t := keytype
if indirectKey {
t = d.defptrto(keytype)
}
d.newrefattr(dwhk, dwarf.DW_AT_type, t)
fld := d.newdie(dwhk, dwarf.DW_ABRV_ARRAYRANGE, "size")
newattr(fld, dwarf.DW_AT_count, dwarf.DW_CLS_CONSTANT, abi.OldMapBucketCount, 0)
d.newrefattr(fld, dwarf.DW_AT_type, d.uintptrInfoSym)
})
// Construct type to represent an array of BucketSize values
valname := d.nameFromDIESym(valtype)
dwhvs := d.mkinternaltype(ctxt, dwarf.DW_ABRV_ARRAYTYPE, "[]val", valname, "", func(dwhv *dwarf.DWDie) {
newattr(dwhv, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, abi.OldMapBucketCount*valsize, 0)
t := valtype
if indirectVal {
t = d.defptrto(valtype)
}
d.newrefattr(dwhv, dwarf.DW_AT_type, t)
fld := d.newdie(dwhv, dwarf.DW_ABRV_ARRAYRANGE, "size")
newattr(fld, dwarf.DW_AT_count, dwarf.DW_CLS_CONSTANT, abi.OldMapBucketCount, 0)
d.newrefattr(fld, dwarf.DW_AT_type, d.uintptrInfoSym)
})
// Construct bucket<K,V>
dwhbs := d.mkinternaltype(ctxt, dwarf.DW_ABRV_STRUCTTYPE, "bucket", keyname, valname, func(dwhb *dwarf.DWDie) {
// Copy over all fields except the field "data" from the generic
// bucket. "data" will be replaced with keys/values below.
d.copychildrenexcept(ctxt, dwhb, bucket, findchild(bucket, "data"))
fld := d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "keys")
d.newrefattr(fld, dwarf.DW_AT_type, dwhks)
newmemberoffsetattr(fld, abi.OldMapBucketCount)
fld = d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "values")
d.newrefattr(fld, dwarf.DW_AT_type, dwhvs)
newmemberoffsetattr(fld, abi.OldMapBucketCount+abi.OldMapBucketCount*int32(keysize))
fld = d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "overflow")
d.newrefattr(fld, dwarf.DW_AT_type, d.defptrto(d.dtolsym(dwhb.Sym)))
newmemberoffsetattr(fld, abi.OldMapBucketCount+abi.OldMapBucketCount*(int32(keysize)+int32(valsize)))
if d.arch.RegSize > d.arch.PtrSize {
fld = d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "pad")
d.newrefattr(fld, dwarf.DW_AT_type, d.uintptrInfoSym)
newmemberoffsetattr(fld, abi.OldMapBucketCount+abi.OldMapBucketCount*(int32(keysize)+int32(valsize))+int32(d.arch.PtrSize))
}
newattr(dwhb, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, abi.OldMapBucketCount+abi.OldMapBucketCount*keysize+abi.OldMapBucketCount*valsize+int64(d.arch.RegSize), 0)
})
// Construct hash<K,V>
dwhs := d.mkinternaltype(ctxt, dwarf.DW_ABRV_STRUCTTYPE, "hash", keyname, valname, func(dwh *dwarf.DWDie) {
d.copychildren(ctxt, dwh, hash)
d.substitutetype(dwh, "buckets", d.defptrto(dwhbs))
d.substitutetype(dwh, "oldbuckets", d.defptrto(dwhbs))
newattr(dwh, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, getattr(hash, dwarf.DW_AT_byte_size).Value, nil)
})
// make map type a pointer to hash<K,V>
d.newrefattr(die, dwarf.DW_AT_type, d.defptrto(dwhs))
}
}
func (d *dwctxt) synthesizechantypes(ctxt *Link, die *dwarf.DWDie) {
sudog := walktypedef(d.findprotodie(ctxt, "type:runtime.sudog"))
waitq := walktypedef(d.findprotodie(ctxt, "type:runtime.waitq"))
@ -2015,14 +1911,9 @@ func dwarfGenerateDebugInfo(ctxt *Link) {
"type:runtime.sudog": nil,
"type:runtime.waitq": nil,
"type:runtime.hchan": nil,
}
if buildcfg.Experiment.SwissMap {
prototypedies["type:internal/runtime/maps.Map"] = nil
prototypedies["type:internal/runtime/maps.table"] = nil
prototypedies["type:internal/runtime/maps.groupsReference"] = nil
} else {
prototypedies["type:runtime.hmap"] = nil
prototypedies["type:runtime.bmap"] = nil
"type:internal/runtime/maps.Map": nil,
"type:internal/runtime/maps.table": nil,
"type:internal/runtime/maps.groupsReference": nil,
}
// Needed by the prettyprinter code for interface inspection.
@ -2034,16 +1925,12 @@ func dwarfGenerateDebugInfo(ctxt *Link) {
"type:internal/abi.PtrType",
"type:internal/abi.SliceType",
"type:internal/abi.StructType",
"type:internal/abi.SwissMapType",
"type:internal/abi.InterfaceType",
"type:internal/abi.ITab",
"type:internal/abi.Imethod"} {
d.defgotype(d.lookupOrDiag(typ))
}
if buildcfg.Experiment.SwissMap {
d.defgotype(d.lookupOrDiag("type:internal/abi.SwissMapType"))
} else {
d.defgotype(d.lookupOrDiag("type:internal/abi.OldMapType"))
}
// fake root DIE for compile unit DIEs
var dwroot dwarf.DWDie

View file

@ -63,6 +63,7 @@ func TestRuntimeTypesPresent(t *testing.T) {
"internal/abi.PtrType": true,
"internal/abi.SliceType": true,
"internal/abi.StructType": true,
"internal/abi.SwissMapType": true,
"internal/abi.InterfaceType": true,
"internal/abi.ITab": true,
}
@ -71,16 +72,6 @@ func TestRuntimeTypesPresent(t *testing.T) {
if len(found) != len(want) {
t.Errorf("found %v, want %v", found, want)
}
// Must have one of OldMapType or SwissMapType.
want = map[string]bool{
"internal/abi.OldMapType": true,
"internal/abi.SwissMapType": true,
}
found = findTypes(t, dwarf, want)
if len(found) != 1 {
t.Errorf("map type want one of %v found %v", want, found)
}
}
func findTypes(t *testing.T, dw *dwarf.Data, want map[string]bool) (found map[string]bool) {

View file

@ -9,7 +9,6 @@ package maphash
import (
"internal/abi"
"internal/goarch"
"internal/goexperiment"
"unsafe"
)
@ -51,12 +50,7 @@ func comparableHash[T comparable](v T, seed Seed) uint64 {
s := seed.s
var m map[T]struct{}
mTyp := abi.TypeOf(m)
var hasher func(unsafe.Pointer, uintptr) uintptr
if goexperiment.SwissMap {
hasher = (*abi.SwissMapType)(unsafe.Pointer(mTyp)).Hasher
} else {
hasher = (*abi.OldMapType)(unsafe.Pointer(mTyp)).Hasher
}
hasher := (*abi.SwissMapType)(unsafe.Pointer(mTyp)).Hasher
if goarch.PtrSize == 8 {
return uint64(hasher(abi.NoEscape(unsafe.Pointer(&v)), uintptr(s)))
}

View file

@ -1,54 +0,0 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package abi
import (
"unsafe"
)
// Map constants common to several packages
// runtime/runtime-gdb.py:MapTypePrinter contains its own copy
const (
// Maximum number of key/elem pairs a bucket can hold.
OldMapBucketCountBits = 3 // log2 of number of elements in a bucket.
OldMapBucketCount = 1 << OldMapBucketCountBits
// Maximum key or elem size to keep inline (instead of mallocing per element).
// Must fit in a uint8.
// Note: fast map functions cannot handle big elems (bigger than MapMaxElemBytes).
OldMapMaxKeyBytes = 128
OldMapMaxElemBytes = 128 // Must fit in a uint8.
)
type OldMapType struct {
Type
Key *Type
Elem *Type
Bucket *Type // internal type representing a hash bucket
// function for hashing keys (ptr to key, seed) -> hash
Hasher func(unsafe.Pointer, uintptr) uintptr
KeySize uint8 // size of key slot
ValueSize uint8 // size of elem slot
BucketSize uint16 // size of bucket
Flags uint32
}
// Note: flag values must match those used in the TMAP case
// in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
func (mt *OldMapType) IndirectKey() bool { // store ptr to key instead of key itself
return mt.Flags&1 != 0
}
func (mt *OldMapType) IndirectElem() bool { // store ptr to elem instead of elem itself
return mt.Flags&2 != 0
}
func (mt *OldMapType) ReflexiveKey() bool { // true if k==k for all keys
return mt.Flags&4 != 0
}
func (mt *OldMapType) NeedKeyUpdate() bool { // true if we need to update key on an overwrite
return mt.Flags&8 != 0
}
func (mt *OldMapType) HashMightPanic() bool { // true if hash function might panic
return mt.Flags&16 != 0
}

View file

@ -1,10 +0,0 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !goexperiment.swissmap
package abi
// See comment in map_select_swiss.go.
type mapType = OldMapType

View file

@ -1,22 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build goexperiment.swissmap
package abi
// Select the map type that this binary is built using. This is for common
// lookup methods like Type.Key to know which type to use.
//
// Note that mapType *must not be used by any functions called in the
// compiler to build a target program* because the compiler must use the map
// type determined by run-time GOEXPERIMENT, not the build tags used to build
// the compiler.
//
// TODO(prattmic): This package is rather confusing because it has many
// functions that can't be used by the compiler (e.g., Type.Uncommon depends on
// the layout of type + uncommon objects in the binary. It would be incorrect
// for an ad-hoc local Type object). It may be best to move code that isn't
// usable by the compiler out of the package.
type mapType = SwissMapType

View file

@ -355,7 +355,7 @@ func (t *Type) Uncommon() *UncommonType {
return &(*u)(unsafe.Pointer(t)).u
case Map:
type u struct {
mapType
SwissMapType
u UncommonType
}
return &(*u)(unsafe.Pointer(t)).u
@ -384,7 +384,7 @@ func (t *Type) Elem() *Type {
tt := (*ChanType)(unsafe.Pointer(t))
return tt.Elem
case Map:
tt := (*mapType)(unsafe.Pointer(t))
tt := (*SwissMapType)(unsafe.Pointer(t))
return tt.Elem
case Pointer:
tt := (*PtrType)(unsafe.Pointer(t))
@ -404,12 +404,12 @@ func (t *Type) StructType() *StructType {
return (*StructType)(unsafe.Pointer(t))
}
// MapType returns t cast to a *OldMapType or *SwissMapType, or nil if its tag does not match.
func (t *Type) MapType() *mapType {
// MapType returns t cast to a *SwissMapType, or nil if its tag does not match.
func (t *Type) MapType() *SwissMapType {
if t.Kind() != Map {
return nil
}
return (*mapType)(unsafe.Pointer(t))
return (*SwissMapType)(unsafe.Pointer(t))
}
// ArrayType returns t cast to a *ArrayType, or nil if its tag does not match.
@ -471,7 +471,7 @@ func (t *InterfaceType) NumMethod() int { return len(t.Methods) }
func (t *Type) Key() *Type {
if t.Kind() == Map {
return (*mapType)(unsafe.Pointer(t)).Key
return (*SwissMapType)(unsafe.Pointer(t)).Key
}
return nil
}

View file

@ -82,7 +82,6 @@ func ParseGOEXPERIMENT(goos, goarch, goexp string) (*ExperimentFlags, error) {
RegabiWrappers: regabiSupported,
RegabiArgs: regabiSupported,
AliasTypeParams: true,
SwissMap: true,
Dwarf5: dwarf5Supported,
}

View file

@ -1,8 +0,0 @@
// Code generated by mkconsts.go. DO NOT EDIT.
//go:build !goexperiment.swissmap
package goexperiment
const SwissMap = false
const SwissMapInt = 0

View file

@ -1,8 +0,0 @@
// Code generated by mkconsts.go. DO NOT EDIT.
//go:build goexperiment.swissmap
package goexperiment
const SwissMap = true
const SwissMapInt = 1

View file

@ -105,9 +105,6 @@ type Flags struct {
// This flag will be removed with Go 1.25.
AliasTypeParams bool
// SwissMap enables the SwissTable-based map implementation.
SwissMap bool
// Synctest enables the testing/synctest package.
Synctest bool

View file

@ -1,51 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !goexperiment.swissmap
// This file allows non-GOEXPERIMENT=swissmap builds (i.e., old map builds) to
// construct a swissmap table for running the tests in this package.
package maps
import (
"internal/abi"
"unsafe"
)
type instantiatedGroup[K comparable, V any] struct {
ctrls ctrlGroup
slots [abi.SwissMapGroupSlots]instantiatedSlot[K, V]
}
type instantiatedSlot[K comparable, V any] struct {
key K
elem V
}
func newTestMapType[K comparable, V any]() *abi.SwissMapType {
var m map[K]V
mTyp := abi.TypeOf(m)
omt := (*abi.OldMapType)(unsafe.Pointer(mTyp))
var grp instantiatedGroup[K, V]
var slot instantiatedSlot[K, V]
mt := &abi.SwissMapType{
Key: omt.Key,
Elem: omt.Elem,
Group: abi.TypeOf(grp),
Hasher: omt.Hasher,
SlotSize: unsafe.Sizeof(slot),
GroupSize: unsafe.Sizeof(grp),
ElemOff: unsafe.Offsetof(slot.elem),
}
if omt.NeedKeyUpdate() {
mt.Flags |= abi.SwissMapNeedKeyUpdate
}
if omt.HashMightPanic() {
mt.Flags |= abi.SwissMapHashMightPanic
}
return mt
}

View file

@ -1,19 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build goexperiment.swissmap
package maps
import (
"internal/abi"
"unsafe"
)
func newTestMapType[K comparable, V any]() *abi.SwissMapType {
var m map[K]V
mTyp := abi.TypeOf(m)
mt := (*abi.SwissMapType)(unsafe.Pointer(mTyp))
return mt
}

View file

@ -22,6 +22,13 @@ const MaxAvgGroupLoad = maxAvgGroupLoad
// we can't properly test hint alloc overflows with this.
const maxAllocTest = 1 << 30
func newTestMapType[K comparable, V any]() *abi.SwissMapType {
var m map[K]V
mTyp := abi.TypeOf(m)
mt := (*abi.SwissMapType)(unsafe.Pointer(mTyp))
return mt
}
func NewTestMap[K comparable, V any](hint uintptr) (*Map, *abi.SwissMapType) {
mt := newTestMapType[K, V]()
return NewMap(mt, hint, nil, maxAllocTest), mt

View file

@ -191,7 +191,7 @@ func h2(h uintptr) uintptr {
return h & 0x7f
}
// Note: changes here must be reflected in cmd/compile/internal/reflectdata/map_swiss.go:SwissMapType.
// Note: changes here must be reflected in cmd/compile/internal/reflectdata/map.go:SwissMapType.
type Map struct {
// The number of filled slots (i.e. the number of elements in all
// tables). Excludes deleted slots.
@ -814,13 +814,6 @@ func (m *Map) Clone(typ *abi.SwissMapType) *Map {
return m
}
func OldMapKeyError(t *abi.OldMapType, p unsafe.Pointer) error {
if !t.HashMightPanic() {
return nil
}
return mapKeyError2(t.Key, p)
}
func mapKeyError(t *abi.SwissMapType, p unsafe.Pointer) error {
if !t.HashMightPanic() {
return nil

View file

@ -1,267 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Tests of map internals that need to use the builtin map type, and thus must
// be built with GOEXPERIMENT=swissmap.
//go:build goexperiment.swissmap
package maps_test
import (
"fmt"
"internal/abi"
"internal/runtime/maps"
"testing"
"unsafe"
)
var alwaysFalse bool
var escapeSink any
func escape[T any](x T) T {
if alwaysFalse {
escapeSink = x
}
return x
}
const (
belowMax = abi.SwissMapGroupSlots * 3 / 2 // 1.5 * group max = 2 groups @ 75%
atMax = (2 * abi.SwissMapGroupSlots * maps.MaxAvgGroupLoad) / abi.SwissMapGroupSlots // 2 groups at 7/8 full.
)
func TestTableGroupCount(t *testing.T) {
// Test that maps of different sizes have the right number of
// tables/groups.
type mapCount struct {
tables int
groups uint64
}
type mapCase struct {
initialLit mapCount
initialHint mapCount
after mapCount
}
var testCases = []struct {
n int // n is the number of map elements
escape mapCase // expected values for escaping map
}{
{
n: -(1 << 30),
escape: mapCase{
initialLit: mapCount{0, 0},
initialHint: mapCount{0, 0},
after: mapCount{0, 0},
},
},
{
n: -1,
escape: mapCase{
initialLit: mapCount{0, 0},
initialHint: mapCount{0, 0},
after: mapCount{0, 0},
},
},
{
n: 0,
escape: mapCase{
initialLit: mapCount{0, 0},
initialHint: mapCount{0, 0},
after: mapCount{0, 0},
},
},
{
n: 1,
escape: mapCase{
initialLit: mapCount{0, 0},
initialHint: mapCount{0, 0},
after: mapCount{0, 1},
},
},
{
n: abi.SwissMapGroupSlots,
escape: mapCase{
initialLit: mapCount{0, 0},
initialHint: mapCount{0, 0},
after: mapCount{0, 1},
},
},
{
n: abi.SwissMapGroupSlots + 1,
escape: mapCase{
initialLit: mapCount{0, 0},
initialHint: mapCount{1, 2},
after: mapCount{1, 2},
},
},
{
n: belowMax, // 1.5 group max = 2 groups @ 75%
escape: mapCase{
initialLit: mapCount{0, 0},
initialHint: mapCount{1, 2},
after: mapCount{1, 2},
},
},
{
n: atMax, // 2 groups at max
escape: mapCase{
initialLit: mapCount{0, 0},
initialHint: mapCount{1, 2},
after: mapCount{1, 2},
},
},
{
n: atMax + 1, // 2 groups at max + 1 -> grow to 4 groups
escape: mapCase{
initialLit: mapCount{0, 0},
initialHint: mapCount{1, 4},
after: mapCount{1, 4},
},
},
{
n: 2 * belowMax, // 3 * group max = 4 groups @75%
escape: mapCase{
initialLit: mapCount{0, 0},
initialHint: mapCount{1, 4},
after: mapCount{1, 4},
},
},
{
n: 2*atMax + 1, // 4 groups at max + 1 -> grow to 8 groups
escape: mapCase{
initialLit: mapCount{0, 0},
initialHint: mapCount{1, 8},
after: mapCount{1, 8},
},
},
}
testMap := func(t *testing.T, m map[int]int, n int, initial, after mapCount) {
mm := *(**maps.Map)(unsafe.Pointer(&m))
gotTab := mm.TableCount()
if gotTab != initial.tables {
t.Errorf("initial TableCount got %d want %d", gotTab, initial.tables)
}
gotGroup := mm.GroupCount()
if gotGroup != initial.groups {
t.Errorf("initial GroupCount got %d want %d", gotGroup, initial.groups)
}
for i := 0; i < n; i++ {
m[i] = i
}
gotTab = mm.TableCount()
if gotTab != after.tables {
t.Errorf("after TableCount got %d want %d", gotTab, after.tables)
}
gotGroup = mm.GroupCount()
if gotGroup != after.groups {
t.Errorf("after GroupCount got %d want %d", gotGroup, after.groups)
}
}
t.Run("mapliteral", func(t *testing.T) {
for _, tc := range testCases {
t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) {
t.Run("escape", func(t *testing.T) {
m := escape(map[int]int{})
testMap(t, m, tc.n, tc.escape.initialLit, tc.escape.after)
})
})
}
})
t.Run("nohint", func(t *testing.T) {
for _, tc := range testCases {
t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) {
t.Run("escape", func(t *testing.T) {
m := escape(make(map[int]int))
testMap(t, m, tc.n, tc.escape.initialLit, tc.escape.after)
})
})
}
})
t.Run("makemap", func(t *testing.T) {
for _, tc := range testCases {
t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) {
t.Run("escape", func(t *testing.T) {
m := escape(make(map[int]int, tc.n))
testMap(t, m, tc.n, tc.escape.initialHint, tc.escape.after)
})
})
}
})
t.Run("makemap64", func(t *testing.T) {
for _, tc := range testCases {
t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) {
t.Run("escape", func(t *testing.T) {
m := escape(make(map[int]int, int64(tc.n)))
testMap(t, m, tc.n, tc.escape.initialHint, tc.escape.after)
})
})
}
})
}
func TestTombstoneGrow(t *testing.T) {
tableSizes := []int{16, 32, 64, 128, 256}
for _, tableSize := range tableSizes {
for _, load := range []string{"low", "mid", "high"} {
capacity := tableSize * 7 / 8
var initialElems int
switch load {
case "low":
initialElems = capacity / 8
case "mid":
initialElems = capacity / 2
case "high":
initialElems = capacity
}
t.Run(fmt.Sprintf("tableSize=%d/elems=%d/load=%0.3f", tableSize, initialElems, float64(initialElems)/float64(tableSize)), func(t *testing.T) {
allocs := testing.AllocsPerRun(1, func() {
// Fill the map with elements.
m := make(map[int]int, capacity)
for i := range initialElems {
m[i] = i
}
// This is the heart of our test.
// Loop over the map repeatedly, deleting a key then adding a not-yet-seen key
// while keeping the map at a ~constant number of elements (+/-1).
nextKey := initialElems
for range 100000 {
for k := range m {
delete(m, k)
break
}
m[nextKey] = nextKey
nextKey++
if len(m) != initialElems {
t.Fatal("len(m) should remain constant")
}
}
})
// The make has 4 allocs (map, directory, table, groups).
// Each growth has 2 allocs (table, groups).
// We allow two growths if we start full, 1 otherwise.
// Fail (somewhat arbitrarily) if there are more than that.
allowed := float64(4 + 1*2)
if initialElems == capacity {
allowed += 2
}
if allocs > allowed {
t.Fatalf("got %v allocations, allowed %v", allocs, allowed)
}
})
}
}
}

View file

@ -699,3 +699,252 @@ func TestMapDeleteClear(t *testing.T) {
t.Errorf("Delete(%d) failed to clear element. got %d want 0", key, gotElem)
}
}
var alwaysFalse bool
var escapeSink any
func escape[T any](x T) T {
if alwaysFalse {
escapeSink = x
}
return x
}
const (
belowMax = abi.SwissMapGroupSlots * 3 / 2 // 1.5 * group max = 2 groups @ 75%
atMax = (2 * abi.SwissMapGroupSlots * maps.MaxAvgGroupLoad) / abi.SwissMapGroupSlots // 2 groups at 7/8 full.
)
func TestTableGroupCount(t *testing.T) {
// Test that maps of different sizes have the right number of
// tables/groups.
type mapCount struct {
tables int
groups uint64
}
type mapCase struct {
initialLit mapCount
initialHint mapCount
after mapCount
}
var testCases = []struct {
n int // n is the number of map elements
escape mapCase // expected values for escaping map
}{
{
n: -(1 << 30),
escape: mapCase{
initialLit: mapCount{0, 0},
initialHint: mapCount{0, 0},
after: mapCount{0, 0},
},
},
{
n: -1,
escape: mapCase{
initialLit: mapCount{0, 0},
initialHint: mapCount{0, 0},
after: mapCount{0, 0},
},
},
{
n: 0,
escape: mapCase{
initialLit: mapCount{0, 0},
initialHint: mapCount{0, 0},
after: mapCount{0, 0},
},
},
{
n: 1,
escape: mapCase{
initialLit: mapCount{0, 0},
initialHint: mapCount{0, 0},
after: mapCount{0, 1},
},
},
{
n: abi.SwissMapGroupSlots,
escape: mapCase{
initialLit: mapCount{0, 0},
initialHint: mapCount{0, 0},
after: mapCount{0, 1},
},
},
{
n: abi.SwissMapGroupSlots + 1,
escape: mapCase{
initialLit: mapCount{0, 0},
initialHint: mapCount{1, 2},
after: mapCount{1, 2},
},
},
{
n: belowMax, // 1.5 group max = 2 groups @ 75%
escape: mapCase{
initialLit: mapCount{0, 0},
initialHint: mapCount{1, 2},
after: mapCount{1, 2},
},
},
{
n: atMax, // 2 groups at max
escape: mapCase{
initialLit: mapCount{0, 0},
initialHint: mapCount{1, 2},
after: mapCount{1, 2},
},
},
{
n: atMax + 1, // 2 groups at max + 1 -> grow to 4 groups
escape: mapCase{
initialLit: mapCount{0, 0},
initialHint: mapCount{1, 4},
after: mapCount{1, 4},
},
},
{
n: 2 * belowMax, // 3 * group max = 4 groups @75%
escape: mapCase{
initialLit: mapCount{0, 0},
initialHint: mapCount{1, 4},
after: mapCount{1, 4},
},
},
{
n: 2*atMax + 1, // 4 groups at max + 1 -> grow to 8 groups
escape: mapCase{
initialLit: mapCount{0, 0},
initialHint: mapCount{1, 8},
after: mapCount{1, 8},
},
},
}
testMap := func(t *testing.T, m map[int]int, n int, initial, after mapCount) {
mm := *(**maps.Map)(unsafe.Pointer(&m))
gotTab := mm.TableCount()
if gotTab != initial.tables {
t.Errorf("initial TableCount got %d want %d", gotTab, initial.tables)
}
gotGroup := mm.GroupCount()
if gotGroup != initial.groups {
t.Errorf("initial GroupCount got %d want %d", gotGroup, initial.groups)
}
for i := 0; i < n; i++ {
m[i] = i
}
gotTab = mm.TableCount()
if gotTab != after.tables {
t.Errorf("after TableCount got %d want %d", gotTab, after.tables)
}
gotGroup = mm.GroupCount()
if gotGroup != after.groups {
t.Errorf("after GroupCount got %d want %d", gotGroup, after.groups)
}
}
t.Run("mapliteral", func(t *testing.T) {
for _, tc := range testCases {
t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) {
t.Run("escape", func(t *testing.T) {
m := escape(map[int]int{})
testMap(t, m, tc.n, tc.escape.initialLit, tc.escape.after)
})
})
}
})
t.Run("nohint", func(t *testing.T) {
for _, tc := range testCases {
t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) {
t.Run("escape", func(t *testing.T) {
m := escape(make(map[int]int))
testMap(t, m, tc.n, tc.escape.initialLit, tc.escape.after)
})
})
}
})
t.Run("makemap", func(t *testing.T) {
for _, tc := range testCases {
t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) {
t.Run("escape", func(t *testing.T) {
m := escape(make(map[int]int, tc.n))
testMap(t, m, tc.n, tc.escape.initialHint, tc.escape.after)
})
})
}
})
t.Run("makemap64", func(t *testing.T) {
for _, tc := range testCases {
t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) {
t.Run("escape", func(t *testing.T) {
m := escape(make(map[int]int, int64(tc.n)))
testMap(t, m, tc.n, tc.escape.initialHint, tc.escape.after)
})
})
}
})
}
func TestTombstoneGrow(t *testing.T) {
tableSizes := []int{16, 32, 64, 128, 256}
for _, tableSize := range tableSizes {
for _, load := range []string{"low", "mid", "high"} {
capacity := tableSize * 7 / 8
var initialElems int
switch load {
case "low":
initialElems = capacity / 8
case "mid":
initialElems = capacity / 2
case "high":
initialElems = capacity
}
t.Run(fmt.Sprintf("tableSize=%d/elems=%d/load=%0.3f", tableSize, initialElems, float64(initialElems)/float64(tableSize)), func(t *testing.T) {
allocs := testing.AllocsPerRun(1, func() {
// Fill the map with elements.
m := make(map[int]int, capacity)
for i := range initialElems {
m[i] = i
}
// This is the heart of our test.
// Loop over the map repeatedly, deleting a key then adding a not-yet-seen key
// while keeping the map at a ~constant number of elements (+/-1).
nextKey := initialElems
for range 100000 {
for k := range m {
delete(m, k)
break
}
m[nextKey] = nextKey
nextKey++
if len(m) != initialElems {
t.Fatal("len(m) should remain constant")
}
}
})
// The make has 4 allocs (map, directory, table, groups).
// Each growth has 2 allocs (table, groups).
// We allow two growths if we start full, 1 otherwise.
// Fail (somewhat arbitrarily) if there are more than that.
allowed := float64(4 + 1*2)
if initialElems == capacity {
allowed += 2
}
if allocs > allowed {
t.Fatalf("got %v allocations, allowed %v", allocs, allowed)
}
})
}
}
}

View file

@ -6,6 +6,10 @@ package maps
import (
"internal/abi"
"internal/asan"
"internal/msan"
"internal/race"
"internal/runtime/sys"
"unsafe"
)
@ -28,3 +32,337 @@ func newarray(typ *abi.Type, n int) unsafe.Pointer
//go:linkname newobject
func newobject(typ *abi.Type) unsafe.Pointer
// Pushed from runtime in order to use runtime.plainError
//
//go:linkname errNilAssign
var errNilAssign error
// Pull from runtime. It is important that is this the exact same copy as the
// runtime because runtime.mapaccess1_fat compares the returned pointer with
// &runtime.zeroVal[0].
// TODO: move zeroVal to internal/abi?
//
//go:linkname zeroVal runtime.zeroVal
var zeroVal [abi.ZeroValSize]byte
// mapaccess1 returns a pointer to h[key]. Never returns nil, instead
// it will return a reference to the zero object for the elem type if
// the key is not in the map.
// NOTE: The returned pointer may keep the whole map live, so don't
// hold onto it for very long.
//
//go:linkname runtime_mapaccess1 runtime.mapaccess1
func runtime_mapaccess1(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
if race.Enabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapaccess1)
race.ReadPC(unsafe.Pointer(m), callerpc, pc)
race.ReadObjectPC(typ.Key, key, callerpc, pc)
}
if msan.Enabled && m != nil {
msan.Read(key, typ.Key.Size_)
}
if asan.Enabled && m != nil {
asan.Read(key, typ.Key.Size_)
}
if m == nil || m.Used() == 0 {
if err := mapKeyError(typ, key); err != nil {
panic(err) // see issue 23734
}
return unsafe.Pointer(&zeroVal[0])
}
if m.writing != 0 {
fatal("concurrent map read and map write")
}
hash := typ.Hasher(key, m.seed)
if m.dirLen <= 0 {
_, elem, ok := m.getWithKeySmall(typ, hash, key)
if !ok {
return unsafe.Pointer(&zeroVal[0])
}
return elem
}
// Select table.
idx := m.directoryIndex(hash)
t := m.directoryAt(idx)
// Probe table.
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
match := g.ctrls().matchH2(h2(hash))
for match != 0 {
i := match.first()
slotKey := g.key(typ, i)
slotKeyOrig := slotKey
if typ.IndirectKey() {
slotKey = *((*unsafe.Pointer)(slotKey))
}
if typ.Key.Equal(key, slotKey) {
slotElem := unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
if typ.IndirectElem() {
slotElem = *((*unsafe.Pointer)(slotElem))
}
return slotElem
}
match = match.removeFirst()
}
match = g.ctrls().matchEmpty()
if match != 0 {
// Finding an empty slot means we've reached the end of
// the probe sequence.
return unsafe.Pointer(&zeroVal[0])
}
}
}
//go:linkname runtime_mapaccess2 runtime.mapaccess2
func runtime_mapaccess2(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) (unsafe.Pointer, bool) {
if race.Enabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapaccess1)
race.ReadPC(unsafe.Pointer(m), callerpc, pc)
race.ReadObjectPC(typ.Key, key, callerpc, pc)
}
if msan.Enabled && m != nil {
msan.Read(key, typ.Key.Size_)
}
if asan.Enabled && m != nil {
asan.Read(key, typ.Key.Size_)
}
if m == nil || m.Used() == 0 {
if err := mapKeyError(typ, key); err != nil {
panic(err) // see issue 23734
}
return unsafe.Pointer(&zeroVal[0]), false
}
if m.writing != 0 {
fatal("concurrent map read and map write")
}
hash := typ.Hasher(key, m.seed)
if m.dirLen == 0 {
_, elem, ok := m.getWithKeySmall(typ, hash, key)
if !ok {
return unsafe.Pointer(&zeroVal[0]), false
}
return elem, true
}
// Select table.
idx := m.directoryIndex(hash)
t := m.directoryAt(idx)
// Probe table.
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
match := g.ctrls().matchH2(h2(hash))
for match != 0 {
i := match.first()
slotKey := g.key(typ, i)
slotKeyOrig := slotKey
if typ.IndirectKey() {
slotKey = *((*unsafe.Pointer)(slotKey))
}
if typ.Key.Equal(key, slotKey) {
slotElem := unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
if typ.IndirectElem() {
slotElem = *((*unsafe.Pointer)(slotElem))
}
return slotElem, true
}
match = match.removeFirst()
}
match = g.ctrls().matchEmpty()
if match != 0 {
// Finding an empty slot means we've reached the end of
// the probe sequence.
return unsafe.Pointer(&zeroVal[0]), false
}
}
}
//go:linkname runtime_mapassign runtime.mapassign
func runtime_mapassign(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
if m == nil {
panic(errNilAssign)
}
if race.Enabled {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapassign)
race.WritePC(unsafe.Pointer(m), callerpc, pc)
race.ReadObjectPC(typ.Key, key, callerpc, pc)
}
if msan.Enabled {
msan.Read(key, typ.Key.Size_)
}
if asan.Enabled {
asan.Read(key, typ.Key.Size_)
}
if m.writing != 0 {
fatal("concurrent map writes")
}
hash := typ.Hasher(key, m.seed)
// Set writing after calling Hasher, since Hasher may panic, in which
// case we have not actually done a write.
m.writing ^= 1 // toggle, see comment on writing
if m.dirPtr == nil {
m.growToSmall(typ)
}
if m.dirLen == 0 {
if m.used < abi.SwissMapGroupSlots {
elem := m.putSlotSmall(typ, hash, key)
if m.writing == 0 {
fatal("concurrent map writes")
}
m.writing ^= 1
return elem
}
// Can't fit another entry, grow to full size map.
m.growToTable(typ)
}
var slotElem unsafe.Pointer
outer:
for {
// Select table.
idx := m.directoryIndex(hash)
t := m.directoryAt(idx)
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
// As we look for a match, keep track of the first deleted slot
// we find, which we'll use to insert the new entry if
// necessary.
var firstDeletedGroup groupReference
var firstDeletedSlot uintptr
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
match := g.ctrls().matchH2(h2(hash))
// Look for an existing slot containing this key.
for match != 0 {
i := match.first()
slotKey := g.key(typ, i)
slotKeyOrig := slotKey
if typ.IndirectKey() {
slotKey = *((*unsafe.Pointer)(slotKey))
}
if typ.Key.Equal(key, slotKey) {
if typ.NeedKeyUpdate() {
typedmemmove(typ.Key, slotKey, key)
}
slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
if typ.IndirectElem() {
slotElem = *((*unsafe.Pointer)(slotElem))
}
t.checkInvariants(typ, m)
break outer
}
match = match.removeFirst()
}
// No existing slot for this key in this group. Is this the end
// of the probe sequence?
match = g.ctrls().matchEmpty()
if match != 0 {
// Finding an empty slot means we've reached the end of
// the probe sequence.
var i uintptr
// If we found a deleted slot along the way, we
// can replace it without consuming growthLeft.
if firstDeletedGroup.data != nil {
g = firstDeletedGroup
i = firstDeletedSlot
t.growthLeft++ // will be decremented below to become a no-op.
} else {
// Otherwise, use the empty slot.
i = match.first()
}
// If there is room left to grow, just insert the new entry.
if t.growthLeft > 0 {
slotKey := g.key(typ, i)
slotKeyOrig := slotKey
if typ.IndirectKey() {
kmem := newobject(typ.Key)
*(*unsafe.Pointer)(slotKey) = kmem
slotKey = kmem
}
typedmemmove(typ.Key, slotKey, key)
slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
if typ.IndirectElem() {
emem := newobject(typ.Elem)
*(*unsafe.Pointer)(slotElem) = emem
slotElem = emem
}
g.ctrls().set(i, ctrl(h2(hash)))
t.growthLeft--
t.used++
m.used++
t.checkInvariants(typ, m)
break outer
}
t.rehash(typ, m)
continue outer
}
// No empty slots in this group. Check for a deleted
// slot, which we'll use if we don't find a match later
// in the probe sequence.
//
// We only need to remember a single deleted slot.
if firstDeletedGroup.data == nil {
// Since we already checked for empty slots
// above, matches here must be deleted slots.
match = g.ctrls().matchEmptyOrDeleted()
if match != 0 {
firstDeletedGroup = g
firstDeletedSlot = match.first()
}
}
}
}
if m.writing == 0 {
fatal("concurrent map writes")
}
m.writing ^= 1
return slotElem
}

View file

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build goexperiment.swissmap
package maps
import (

View file

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build goexperiment.swissmap
package maps
import (

View file

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build goexperiment.swissmap
package maps
import (

View file

@ -1,352 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build goexperiment.swissmap
package maps
import (
"internal/abi"
"internal/asan"
"internal/msan"
"internal/race"
"internal/runtime/sys"
"unsafe"
)
// Functions below pushed from runtime.
// Pushed from runtime in order to use runtime.plainError
//
//go:linkname errNilAssign
var errNilAssign error
// Pull from runtime. It is important that is this the exact same copy as the
// runtime because runtime.mapaccess1_fat compares the returned pointer with
// &runtime.zeroVal[0].
// TODO: move zeroVal to internal/abi?
//
//go:linkname zeroVal runtime.zeroVal
var zeroVal [abi.ZeroValSize]byte
// mapaccess1 returns a pointer to h[key]. Never returns nil, instead
// it will return a reference to the zero object for the elem type if
// the key is not in the map.
// NOTE: The returned pointer may keep the whole map live, so don't
// hold onto it for very long.
//
//go:linkname runtime_mapaccess1 runtime.mapaccess1
func runtime_mapaccess1(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
if race.Enabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapaccess1)
race.ReadPC(unsafe.Pointer(m), callerpc, pc)
race.ReadObjectPC(typ.Key, key, callerpc, pc)
}
if msan.Enabled && m != nil {
msan.Read(key, typ.Key.Size_)
}
if asan.Enabled && m != nil {
asan.Read(key, typ.Key.Size_)
}
if m == nil || m.Used() == 0 {
if err := mapKeyError(typ, key); err != nil {
panic(err) // see issue 23734
}
return unsafe.Pointer(&zeroVal[0])
}
if m.writing != 0 {
fatal("concurrent map read and map write")
}
hash := typ.Hasher(key, m.seed)
if m.dirLen <= 0 {
_, elem, ok := m.getWithKeySmall(typ, hash, key)
if !ok {
return unsafe.Pointer(&zeroVal[0])
}
return elem
}
// Select table.
idx := m.directoryIndex(hash)
t := m.directoryAt(idx)
// Probe table.
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
match := g.ctrls().matchH2(h2(hash))
for match != 0 {
i := match.first()
slotKey := g.key(typ, i)
slotKeyOrig := slotKey
if typ.IndirectKey() {
slotKey = *((*unsafe.Pointer)(slotKey))
}
if typ.Key.Equal(key, slotKey) {
slotElem := unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
if typ.IndirectElem() {
slotElem = *((*unsafe.Pointer)(slotElem))
}
return slotElem
}
match = match.removeFirst()
}
match = g.ctrls().matchEmpty()
if match != 0 {
// Finding an empty slot means we've reached the end of
// the probe sequence.
return unsafe.Pointer(&zeroVal[0])
}
}
}
//go:linkname runtime_mapaccess2 runtime.mapaccess2
func runtime_mapaccess2(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) (unsafe.Pointer, bool) {
if race.Enabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapaccess1)
race.ReadPC(unsafe.Pointer(m), callerpc, pc)
race.ReadObjectPC(typ.Key, key, callerpc, pc)
}
if msan.Enabled && m != nil {
msan.Read(key, typ.Key.Size_)
}
if asan.Enabled && m != nil {
asan.Read(key, typ.Key.Size_)
}
if m == nil || m.Used() == 0 {
if err := mapKeyError(typ, key); err != nil {
panic(err) // see issue 23734
}
return unsafe.Pointer(&zeroVal[0]), false
}
if m.writing != 0 {
fatal("concurrent map read and map write")
}
hash := typ.Hasher(key, m.seed)
if m.dirLen == 0 {
_, elem, ok := m.getWithKeySmall(typ, hash, key)
if !ok {
return unsafe.Pointer(&zeroVal[0]), false
}
return elem, true
}
// Select table.
idx := m.directoryIndex(hash)
t := m.directoryAt(idx)
// Probe table.
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
match := g.ctrls().matchH2(h2(hash))
for match != 0 {
i := match.first()
slotKey := g.key(typ, i)
slotKeyOrig := slotKey
if typ.IndirectKey() {
slotKey = *((*unsafe.Pointer)(slotKey))
}
if typ.Key.Equal(key, slotKey) {
slotElem := unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
if typ.IndirectElem() {
slotElem = *((*unsafe.Pointer)(slotElem))
}
return slotElem, true
}
match = match.removeFirst()
}
match = g.ctrls().matchEmpty()
if match != 0 {
// Finding an empty slot means we've reached the end of
// the probe sequence.
return unsafe.Pointer(&zeroVal[0]), false
}
}
}
//go:linkname runtime_mapassign runtime.mapassign
func runtime_mapassign(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
if m == nil {
panic(errNilAssign)
}
if race.Enabled {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapassign)
race.WritePC(unsafe.Pointer(m), callerpc, pc)
race.ReadObjectPC(typ.Key, key, callerpc, pc)
}
if msan.Enabled {
msan.Read(key, typ.Key.Size_)
}
if asan.Enabled {
asan.Read(key, typ.Key.Size_)
}
if m.writing != 0 {
fatal("concurrent map writes")
}
hash := typ.Hasher(key, m.seed)
// Set writing after calling Hasher, since Hasher may panic, in which
// case we have not actually done a write.
m.writing ^= 1 // toggle, see comment on writing
if m.dirPtr == nil {
m.growToSmall(typ)
}
if m.dirLen == 0 {
if m.used < abi.SwissMapGroupSlots {
elem := m.putSlotSmall(typ, hash, key)
if m.writing == 0 {
fatal("concurrent map writes")
}
m.writing ^= 1
return elem
}
// Can't fit another entry, grow to full size map.
m.growToTable(typ)
}
var slotElem unsafe.Pointer
outer:
for {
// Select table.
idx := m.directoryIndex(hash)
t := m.directoryAt(idx)
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
// As we look for a match, keep track of the first deleted slot
// we find, which we'll use to insert the new entry if
// necessary.
var firstDeletedGroup groupReference
var firstDeletedSlot uintptr
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
match := g.ctrls().matchH2(h2(hash))
// Look for an existing slot containing this key.
for match != 0 {
i := match.first()
slotKey := g.key(typ, i)
slotKeyOrig := slotKey
if typ.IndirectKey() {
slotKey = *((*unsafe.Pointer)(slotKey))
}
if typ.Key.Equal(key, slotKey) {
if typ.NeedKeyUpdate() {
typedmemmove(typ.Key, slotKey, key)
}
slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
if typ.IndirectElem() {
slotElem = *((*unsafe.Pointer)(slotElem))
}
t.checkInvariants(typ, m)
break outer
}
match = match.removeFirst()
}
// No existing slot for this key in this group. Is this the end
// of the probe sequence?
match = g.ctrls().matchEmpty()
if match != 0 {
// Finding an empty slot means we've reached the end of
// the probe sequence.
var i uintptr
// If we found a deleted slot along the way, we
// can replace it without consuming growthLeft.
if firstDeletedGroup.data != nil {
g = firstDeletedGroup
i = firstDeletedSlot
t.growthLeft++ // will be decremented below to become a no-op.
} else {
// Otherwise, use the empty slot.
i = match.first()
}
// If there is room left to grow, just insert the new entry.
if t.growthLeft > 0 {
slotKey := g.key(typ, i)
slotKeyOrig := slotKey
if typ.IndirectKey() {
kmem := newobject(typ.Key)
*(*unsafe.Pointer)(slotKey) = kmem
slotKey = kmem
}
typedmemmove(typ.Key, slotKey, key)
slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
if typ.IndirectElem() {
emem := newobject(typ.Elem)
*(*unsafe.Pointer)(slotElem) = emem
slotElem = emem
}
g.ctrls().set(i, ctrl(h2(hash)))
t.growthLeft--
t.used++
m.used++
t.checkInvariants(typ, m)
break outer
}
t.rehash(typ, m)
continue outer
}
// No empty slots in this group. Check for a deleted
// slot, which we'll use if we don't find a match later
// in the probe sequence.
//
// We only need to remember a single deleted slot.
if firstDeletedGroup.data == nil {
// Since we already checked for empty slots
// above, matches here must be deleted slots.
match = g.ctrls().matchEmptyOrDeleted()
if match != 0 {
firstDeletedGroup = g
firstDeletedSlot = match.first()
}
}
}
}
if m.writing == 0 {
fatal("concurrent map writes")
}
m.writing ^= 1
return slotElem
}

View file

@ -12,7 +12,6 @@ import (
"go/token"
"internal/asan"
"internal/goarch"
"internal/goexperiment"
"internal/msan"
"internal/race"
"internal/testenv"
@ -1277,10 +1276,6 @@ var deepEqualPerfTests = []struct {
}
func TestDeepEqualAllocs(t *testing.T) {
// TODO(prattmic): maps on stack
if goexperiment.SwissMap {
t.Skipf("Maps on stack not yet implemented")
}
if asan.Enabled {
t.Skip("test allocates more with -asan; see #70079")
}
@ -7343,7 +7338,8 @@ func TestGCBits(t *testing.T) {
verifyGCBits(t, TypeOf(([][10000]Xscalar)(nil)), lit(1))
verifyGCBits(t, SliceOf(ArrayOf(10000, Tscalar)), lit(1))
testGCBitsMap(t)
// For maps, we don't manually construct GC data, instead using the
// public reflect API in groupAndSlotOf.
}
func rep(n int, b []byte) []byte { return bytes.Repeat(b, n) }

View file

@ -1,24 +0,0 @@
// Copyright 2024 Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !goexperiment.swissmap
package reflect
import (
"unsafe"
)
func MapBucketOf(x, y Type) Type {
return toType(bucketOf(x.common(), y.common()))
}
func CachedBucketOf(m Type) Type {
t := m.(*rtype)
if Kind(t.t.Kind()) != Map {
panic("not map")
}
tt := (*mapType)(unsafe.Pointer(t))
return toType(tt.Bucket)
}

View file

@ -1,12 +0,0 @@
// Copyright 2024 Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build goexperiment.swissmap
package reflect
func MapGroupOf(x, y Type) Type {
grp, _ := groupAndSlotOf(x, y)
return grp
}

View file

@ -152,3 +152,8 @@ var MethodValueCallCodePtr = methodValueCallCodePtr
var InternalIsZero = isZero
var IsRegularMemory = isRegularMemory
func MapGroupOf(x, y Type) Type {
grp, _ := groupAndSlotOf(x, y)
return grp
}

View file

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build goexperiment.swissmap
package reflect
import (
@ -14,16 +12,11 @@ import (
"unsafe"
)
// mapType represents a map type.
//
// TODO(prattmic): Only used within this file, could be cleaned up.
type mapType = abi.SwissMapType
func (t *rtype) Key() Type {
if t.Kind() != Map {
panic("reflect: Key of non-map type " + t.String())
}
tt := (*mapType)(unsafe.Pointer(t))
tt := (*abi.SwissMapType)(unsafe.Pointer(t))
return toType(tt.Key)
}
@ -50,7 +43,7 @@ func MapOf(key, elem Type) Type {
// Look in known types.
s := "map[" + stringFor(ktyp) + "]" + stringFor(etyp)
for _, tt := range typesByString(s) {
mt := (*mapType)(unsafe.Pointer(tt))
mt := (*abi.SwissMapType)(unsafe.Pointer(tt))
if mt.Key == ktyp && mt.Elem == etyp {
ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt))
return ti.(Type)
@ -63,7 +56,7 @@ func MapOf(key, elem Type) Type {
// Note: flag values must match those used in the TMAP case
// in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
var imap any = (map[unsafe.Pointer]unsafe.Pointer)(nil)
mt := **(**mapType)(unsafe.Pointer(&imap))
mt := **(**abi.SwissMapType)(unsafe.Pointer(&imap))
mt.Str = resolveReflectName(newName(s, "", false, false))
mt.TFlag = abi.TFlagDirectIface
mt.Hash = fnv1(etyp.Hash, 'm', byte(ktyp.Hash>>24), byte(ktyp.Hash>>16), byte(ktyp.Hash>>8), byte(ktyp.Hash))
@ -145,7 +138,7 @@ var stringType = rtypeOf("")
// As in Go, the key's value must be assignable to the map's key type.
func (v Value) MapIndex(key Value) Value {
v.mustBe(Map)
tt := (*mapType)(unsafe.Pointer(v.typ()))
tt := (*abi.SwissMapType)(unsafe.Pointer(v.typ()))
// Do not require key to be exported, so that DeepEqual
// and other programs can use all the keys returned by
@ -209,7 +202,7 @@ func mapIterNext(it *maps.Iter) {
// It returns an empty slice if v represents a nil map.
func (v Value) MapKeys() []Value {
v.mustBe(Map)
tt := (*mapType)(unsafe.Pointer(v.typ()))
tt := (*abi.SwissMapType)(unsafe.Pointer(v.typ()))
keyType := tt.Key
fl := v.flag.ro() | flag(keyType.Kind())
@ -248,10 +241,6 @@ type MapIter struct {
hiter maps.Iter
}
// TODO(prattmic): only for sharing the linkname declarations with old maps.
// Remove with old maps.
type hiter = maps.Iter
// Key returns the key of iter's current map entry.
func (iter *MapIter) Key() Value {
if !iter.hiter.Initialized() {
@ -262,7 +251,7 @@ func (iter *MapIter) Key() Value {
panic("MapIter.Key called on exhausted iterator")
}
t := (*mapType)(unsafe.Pointer(iter.m.typ()))
t := (*abi.SwissMapType)(unsafe.Pointer(iter.m.typ()))
ktype := t.Key
return copyVal(ktype, iter.m.flag.ro()|flag(ktype.Kind()), iterkey)
}
@ -287,7 +276,7 @@ func (v Value) SetIterKey(iter *MapIter) {
target = v.ptr
}
t := (*mapType)(unsafe.Pointer(iter.m.typ()))
t := (*abi.SwissMapType)(unsafe.Pointer(iter.m.typ()))
ktype := t.Key
iter.m.mustBeExported() // do not let unexported m leak
@ -306,7 +295,7 @@ func (iter *MapIter) Value() Value {
panic("MapIter.Value called on exhausted iterator")
}
t := (*mapType)(unsafe.Pointer(iter.m.typ()))
t := (*abi.SwissMapType)(unsafe.Pointer(iter.m.typ()))
vtype := t.Elem
return copyVal(vtype, iter.m.flag.ro()|flag(vtype.Kind()), iterelem)
}
@ -331,7 +320,7 @@ func (v Value) SetIterValue(iter *MapIter) {
target = v.ptr
}
t := (*mapType)(unsafe.Pointer(iter.m.typ()))
t := (*abi.SwissMapType)(unsafe.Pointer(iter.m.typ()))
vtype := t.Elem
iter.m.mustBeExported() // do not let unexported m leak
@ -348,7 +337,7 @@ func (iter *MapIter) Next() bool {
panic("MapIter.Next called on an iterator that does not have an associated map Value")
}
if !iter.hiter.Initialized() {
t := (*mapType)(unsafe.Pointer(iter.m.typ()))
t := (*abi.SwissMapType)(unsafe.Pointer(iter.m.typ()))
m := (*maps.Map)(iter.m.pointer())
mapIterStart(t, m, &iter.hiter)
} else {
@ -408,7 +397,7 @@ func (v Value) SetMapIndex(key, elem Value) {
v.mustBe(Map)
v.mustBeExported()
key.mustBeExported()
tt := (*mapType)(unsafe.Pointer(v.typ()))
tt := (*abi.SwissMapType)(unsafe.Pointer(v.typ()))
if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.SwissMapMaxElemBytes {
k := *(*string)(key.ptr)

View file

@ -1,484 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !goexperiment.swissmap
package reflect
import (
"internal/abi"
"internal/goarch"
"unsafe"
)
// mapType represents a map type.
type mapType struct {
abi.OldMapType
}
// Pushed from runtime.
//go:noescape
func mapiterinit(t *abi.Type, m unsafe.Pointer, it *hiter)
//go:noescape
func mapiternext(it *hiter)
func (t *rtype) Key() Type {
if t.Kind() != Map {
panic("reflect: Key of non-map type " + t.String())
}
tt := (*mapType)(unsafe.Pointer(t))
return toType(tt.Key)
}
// MapOf returns the map type with the given key and element types.
// For example, if k represents int and e represents string,
// MapOf(k, e) represents map[int]string.
//
// If the key type is not a valid map key type (that is, if it does
// not implement Go's == operator), MapOf panics.
func MapOf(key, elem Type) Type {
ktyp := key.common()
etyp := elem.common()
if ktyp.Equal == nil {
panic("reflect.MapOf: invalid key type " + stringFor(ktyp))
}
// Look in cache.
ckey := cacheKey{Map, ktyp, etyp, 0}
if mt, ok := lookupCache.Load(ckey); ok {
return mt.(Type)
}
// Look in known types.
s := "map[" + stringFor(ktyp) + "]" + stringFor(etyp)
for _, tt := range typesByString(s) {
mt := (*mapType)(unsafe.Pointer(tt))
if mt.Key == ktyp && mt.Elem == etyp {
ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt))
return ti.(Type)
}
}
// Make a map type.
// Note: flag values must match those used in the TMAP case
// in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
var imap any = (map[unsafe.Pointer]unsafe.Pointer)(nil)
mt := **(**mapType)(unsafe.Pointer(&imap))
mt.Str = resolveReflectName(newName(s, "", false, false))
mt.TFlag = abi.TFlagDirectIface
mt.Hash = fnv1(etyp.Hash, 'm', byte(ktyp.Hash>>24), byte(ktyp.Hash>>16), byte(ktyp.Hash>>8), byte(ktyp.Hash))
mt.Key = ktyp
mt.Elem = etyp
mt.Bucket = bucketOf(ktyp, etyp)
mt.Hasher = func(p unsafe.Pointer, seed uintptr) uintptr {
return typehash(ktyp, p, seed)
}
mt.Flags = 0
if ktyp.Size_ > abi.OldMapMaxKeyBytes {
mt.KeySize = uint8(goarch.PtrSize)
mt.Flags |= 1 // indirect key
} else {
mt.KeySize = uint8(ktyp.Size_)
}
if etyp.Size_ > abi.OldMapMaxElemBytes {
mt.ValueSize = uint8(goarch.PtrSize)
mt.Flags |= 2 // indirect value
} else {
mt.ValueSize = uint8(etyp.Size_)
}
mt.BucketSize = uint16(mt.Bucket.Size_)
if isReflexive(ktyp) {
mt.Flags |= 4
}
if needKeyUpdate(ktyp) {
mt.Flags |= 8
}
if hashMightPanic(ktyp) {
mt.Flags |= 16
}
mt.PtrToThis = 0
ti, _ := lookupCache.LoadOrStore(ckey, toRType(&mt.Type))
return ti.(Type)
}
func bucketOf(ktyp, etyp *abi.Type) *abi.Type {
if ktyp.Size_ > abi.OldMapMaxKeyBytes {
ktyp = ptrTo(ktyp)
}
if etyp.Size_ > abi.OldMapMaxElemBytes {
etyp = ptrTo(etyp)
}
// Prepare GC data if any.
// A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+ptrSize bytes,
// or 2064 bytes, or 258 pointer-size words, or 33 bytes of pointer bitmap.
// Note that since the key and value are known to be <= 128 bytes,
// they're guaranteed to have bitmaps instead of GC programs.
var gcdata *byte
var ptrdata uintptr
size := abi.OldMapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize
if size&uintptr(ktyp.Align_-1) != 0 || size&uintptr(etyp.Align_-1) != 0 {
panic("reflect: bad size computation in MapOf")
}
if ktyp.Pointers() || etyp.Pointers() {
nptr := (abi.OldMapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize) / goarch.PtrSize
n := (nptr + 7) / 8
// Runtime needs pointer masks to be a multiple of uintptr in size.
n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1)
mask := make([]byte, n)
base := uintptr(abi.OldMapBucketCount / goarch.PtrSize)
if ktyp.Pointers() {
emitGCMask(mask, base, ktyp, abi.OldMapBucketCount)
}
base += abi.OldMapBucketCount * ktyp.Size_ / goarch.PtrSize
if etyp.Pointers() {
emitGCMask(mask, base, etyp, abi.OldMapBucketCount)
}
base += abi.OldMapBucketCount * etyp.Size_ / goarch.PtrSize
word := base
mask[word/8] |= 1 << (word % 8)
gcdata = &mask[0]
ptrdata = (word + 1) * goarch.PtrSize
// overflow word must be last
if ptrdata != size {
panic("reflect: bad layout computation in MapOf")
}
}
b := &abi.Type{
Align_: goarch.PtrSize,
Size_: size,
Kind_: abi.Struct,
PtrBytes: ptrdata,
GCData: gcdata,
}
s := "bucket(" + stringFor(ktyp) + "," + stringFor(etyp) + ")"
b.Str = resolveReflectName(newName(s, "", false, false))
return b
}
var stringType = rtypeOf("")
// MapIndex returns the value associated with key in the map v.
// It panics if v's Kind is not [Map].
// It returns the zero Value if key is not found in the map or if v represents a nil map.
// As in Go, the key's value must be assignable to the map's key type.
func (v Value) MapIndex(key Value) Value {
v.mustBe(Map)
tt := (*mapType)(unsafe.Pointer(v.typ()))
// Do not require key to be exported, so that DeepEqual
// and other programs can use all the keys returned by
// MapKeys as arguments to MapIndex. If either the map
// or the key is unexported, though, the result will be
// considered unexported. This is consistent with the
// behavior for structs, which allow read but not write
// of unexported fields.
var e unsafe.Pointer
if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.OldMapMaxElemBytes {
k := *(*string)(key.ptr)
e = mapaccess_faststr(v.typ(), v.pointer(), k)
} else {
key = key.assignTo("reflect.Value.MapIndex", tt.Key, nil)
var k unsafe.Pointer
if key.flag&flagIndir != 0 {
k = key.ptr
} else {
k = unsafe.Pointer(&key.ptr)
}
e = mapaccess(v.typ(), v.pointer(), k)
}
if e == nil {
return Value{}
}
typ := tt.Elem
fl := (v.flag | key.flag).ro()
fl |= flag(typ.Kind())
return copyVal(typ, fl, e)
}
// MapKeys returns a slice containing all the keys present in the map,
// in unspecified order.
// It panics if v's Kind is not [Map].
// It returns an empty slice if v represents a nil map.
func (v Value) MapKeys() []Value {
v.mustBe(Map)
tt := (*mapType)(unsafe.Pointer(v.typ()))
keyType := tt.Key
fl := v.flag.ro() | flag(keyType.Kind())
m := v.pointer()
mlen := int(0)
if m != nil {
mlen = maplen(m)
}
var it hiter
mapiterinit(v.typ(), m, &it)
a := make([]Value, mlen)
var i int
for i = 0; i < len(a); i++ {
key := it.key
if key == nil {
// Someone deleted an entry from the map since we
// called maplen above. It's a data race, but nothing
// we can do about it.
break
}
a[i] = copyVal(keyType, fl, key)
mapiternext(&it)
}
return a[:i]
}
// hiter's structure matches runtime.hiter's structure.
// Having a clone here allows us to embed a map iterator
// inside type MapIter so that MapIters can be re-used
// without doing any allocations.
type hiter struct {
key unsafe.Pointer
elem unsafe.Pointer
t unsafe.Pointer
h unsafe.Pointer
buckets unsafe.Pointer
bptr unsafe.Pointer
overflow *[]unsafe.Pointer
oldoverflow *[]unsafe.Pointer
startBucket uintptr
offset uint8
wrapped bool
B uint8
i uint8
bucket uintptr
checkBucket uintptr
clearSeq uint64
}
func (h *hiter) initialized() bool {
return h.t != nil
}
// A MapIter is an iterator for ranging over a map.
// See [Value.MapRange].
type MapIter struct {
m Value
hiter hiter
}
// Key returns the key of iter's current map entry.
func (iter *MapIter) Key() Value {
if !iter.hiter.initialized() {
panic("MapIter.Key called before Next")
}
iterkey := iter.hiter.key
if iterkey == nil {
panic("MapIter.Key called on exhausted iterator")
}
t := (*mapType)(unsafe.Pointer(iter.m.typ()))
ktype := t.Key
return copyVal(ktype, iter.m.flag.ro()|flag(ktype.Kind()), iterkey)
}
// SetIterKey assigns to v the key of iter's current map entry.
// It is equivalent to v.Set(iter.Key()), but it avoids allocating a new Value.
// As in Go, the key must be assignable to v's type and
// must not be derived from an unexported field.
// It panics if [Value.CanSet] returns false.
func (v Value) SetIterKey(iter *MapIter) {
if !iter.hiter.initialized() {
panic("reflect: Value.SetIterKey called before Next")
}
iterkey := iter.hiter.key
if iterkey == nil {
panic("reflect: Value.SetIterKey called on exhausted iterator")
}
v.mustBeAssignable()
var target unsafe.Pointer
if v.kind() == Interface {
target = v.ptr
}
t := (*mapType)(unsafe.Pointer(iter.m.typ()))
ktype := t.Key
iter.m.mustBeExported() // do not let unexported m leak
key := Value{ktype, iterkey, iter.m.flag | flag(ktype.Kind()) | flagIndir}
key = key.assignTo("reflect.MapIter.SetKey", v.typ(), target)
typedmemmove(v.typ(), v.ptr, key.ptr)
}
// Value returns the value of iter's current map entry.
func (iter *MapIter) Value() Value {
if !iter.hiter.initialized() {
panic("MapIter.Value called before Next")
}
iterelem := iter.hiter.elem
if iterelem == nil {
panic("MapIter.Value called on exhausted iterator")
}
t := (*mapType)(unsafe.Pointer(iter.m.typ()))
vtype := t.Elem
return copyVal(vtype, iter.m.flag.ro()|flag(vtype.Kind()), iterelem)
}
// SetIterValue assigns to v the value of iter's current map entry.
// It is equivalent to v.Set(iter.Value()), but it avoids allocating a new Value.
// As in Go, the value must be assignable to v's type and
// must not be derived from an unexported field.
// It panics if [Value.CanSet] returns false.
func (v Value) SetIterValue(iter *MapIter) {
if !iter.hiter.initialized() {
panic("reflect: Value.SetIterValue called before Next")
}
iterelem := iter.hiter.elem
if iterelem == nil {
panic("reflect: Value.SetIterValue called on exhausted iterator")
}
v.mustBeAssignable()
var target unsafe.Pointer
if v.kind() == Interface {
target = v.ptr
}
t := (*mapType)(unsafe.Pointer(iter.m.typ()))
vtype := t.Elem
iter.m.mustBeExported() // do not let unexported m leak
elem := Value{vtype, iterelem, iter.m.flag | flag(vtype.Kind()) | flagIndir}
elem = elem.assignTo("reflect.MapIter.SetValue", v.typ(), target)
typedmemmove(v.typ(), v.ptr, elem.ptr)
}
// Next advances the map iterator and reports whether there is another
// entry. It returns false when iter is exhausted; subsequent
// calls to [MapIter.Key], [MapIter.Value], or [MapIter.Next] will panic.
func (iter *MapIter) Next() bool {
if !iter.m.IsValid() {
panic("MapIter.Next called on an iterator that does not have an associated map Value")
}
if !iter.hiter.initialized() {
mapiterinit(iter.m.typ(), iter.m.pointer(), &iter.hiter)
} else {
if iter.hiter.key == nil {
panic("MapIter.Next called on exhausted iterator")
}
mapiternext(&iter.hiter)
}
return iter.hiter.key != nil
}
// Reset modifies iter to iterate over v.
// It panics if v's Kind is not [Map] and v is not the zero Value.
// Reset(Value{}) causes iter to not to refer to any map,
// which may allow the previously iterated-over map to be garbage collected.
func (iter *MapIter) Reset(v Value) {
if v.IsValid() {
v.mustBe(Map)
}
iter.m = v
iter.hiter = hiter{}
}
// MapRange returns a range iterator for a map.
// It panics if v's Kind is not [Map].
//
// Call [MapIter.Next] to advance the iterator, and [MapIter.Key]/[MapIter.Value] to access each entry.
// [MapIter.Next] returns false when the iterator is exhausted.
// MapRange follows the same iteration semantics as a range statement.
//
// Example:
//
// iter := reflect.ValueOf(m).MapRange()
// for iter.Next() {
// k := iter.Key()
// v := iter.Value()
// ...
// }
func (v Value) MapRange() *MapIter {
// This is inlinable to take advantage of "function outlining".
// The allocation of MapIter can be stack allocated if the caller
// does not allow it to escape.
// See https://blog.filippo.io/efficient-go-apis-with-the-inliner/
if v.kind() != Map {
v.panicNotMap()
}
return &MapIter{m: v}
}
// SetMapIndex sets the element associated with key in the map v to elem.
// It panics if v's Kind is not [Map].
// If elem is the zero Value, SetMapIndex deletes the key from the map.
// Otherwise if v holds a nil map, SetMapIndex will panic.
// As in Go, key's elem must be assignable to the map's key type,
// and elem's value must be assignable to the map's elem type.
func (v Value) SetMapIndex(key, elem Value) {
v.mustBe(Map)
v.mustBeExported()
key.mustBeExported()
tt := (*mapType)(unsafe.Pointer(v.typ()))
if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.OldMapMaxElemBytes {
k := *(*string)(key.ptr)
if elem.typ() == nil {
mapdelete_faststr(v.typ(), v.pointer(), k)
return
}
elem.mustBeExported()
elem = elem.assignTo("reflect.Value.SetMapIndex", tt.Elem, nil)
var e unsafe.Pointer
if elem.flag&flagIndir != 0 {
e = elem.ptr
} else {
e = unsafe.Pointer(&elem.ptr)
}
mapassign_faststr(v.typ(), v.pointer(), k, e)
return
}
key = key.assignTo("reflect.Value.SetMapIndex", tt.Key, nil)
var k unsafe.Pointer
if key.flag&flagIndir != 0 {
k = key.ptr
} else {
k = unsafe.Pointer(&key.ptr)
}
if elem.typ() == nil {
mapdelete(v.typ(), v.pointer(), k)
return
}
elem.mustBeExported()
elem = elem.assignTo("reflect.Value.SetMapIndex", tt.Elem, nil)
var e unsafe.Pointer
if elem.flag&flagIndir != 0 {
e = elem.ptr
} else {
e = unsafe.Pointer(&elem.ptr)
}
mapassign(v.typ(), v.pointer(), k, e)
}
// Force slow panicking path not inlined, so it won't add to the
// inlining budget of the caller.
// TODO: undo when the inliner is no longer bottom-up only.
//
//go:noinline
func (f flag) panicNotMap() {
f.mustBe(Map)
}

View file

@ -1,60 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !goexperiment.swissmap
package reflect_test
import (
"internal/abi"
"internal/goarch"
. "reflect"
"testing"
)
func testGCBitsMap(t *testing.T) {
const bucketCount = abi.OldMapBucketCount
hdr := make([]byte, bucketCount/goarch.PtrSize)
verifyMapBucket := func(t *testing.T, k, e Type, m any, want []byte) {
verifyGCBits(t, MapBucketOf(k, e), want)
verifyGCBits(t, CachedBucketOf(TypeOf(m)), want)
}
verifyMapBucket(t,
Tscalar, Tptr,
map[Xscalar]Xptr(nil),
join(hdr, rep(bucketCount, lit(0)), rep(bucketCount, lit(1)), lit(1)))
verifyMapBucket(t,
Tscalarptr, Tptr,
map[Xscalarptr]Xptr(nil),
join(hdr, rep(bucketCount, lit(0, 1)), rep(bucketCount, lit(1)), lit(1)))
verifyMapBucket(t, Tint64, Tptr,
map[int64]Xptr(nil),
join(hdr, rep(bucketCount, rep(8/goarch.PtrSize, lit(0))), rep(bucketCount, lit(1)), lit(1)))
verifyMapBucket(t,
Tscalar, Tscalar,
map[Xscalar]Xscalar(nil),
empty)
verifyMapBucket(t,
ArrayOf(2, Tscalarptr), ArrayOf(3, Tptrscalar),
map[[2]Xscalarptr][3]Xptrscalar(nil),
join(hdr, rep(bucketCount*2, lit(0, 1)), rep(bucketCount*3, lit(1, 0)), lit(1)))
verifyMapBucket(t,
ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
map[[64 / goarch.PtrSize]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
join(hdr, rep(bucketCount*64/goarch.PtrSize, lit(0, 1)), rep(bucketCount*64/goarch.PtrSize, lit(1, 0)), lit(1)))
verifyMapBucket(t,
ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
map[[64/goarch.PtrSize + 1]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
join(hdr, rep(bucketCount, lit(1)), rep(bucketCount*64/goarch.PtrSize, lit(1, 0)), lit(1)))
verifyMapBucket(t,
ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
map[[64 / goarch.PtrSize]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
join(hdr, rep(bucketCount*64/goarch.PtrSize, lit(0, 1)), rep(bucketCount, lit(1)), lit(1)))
verifyMapBucket(t,
ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
map[[64/goarch.PtrSize + 1]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
join(hdr, rep(bucketCount, lit(1)), rep(bucketCount, lit(1)), lit(1)))
}

View file

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build goexperiment.swissmap
package reflect_test
import (
@ -11,11 +9,6 @@ import (
"testing"
)
func testGCBitsMap(t *testing.T) {
// Unlike old maps, we don't manually construct GC data for swiss maps,
// instead using the public reflect API in groupAndSlotOf.
}
// See also runtime_test.TestGroupSizeZero.
func TestGroupSizeZero(t *testing.T) {
st := reflect.TypeFor[struct{}]()

View file

@ -1,64 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !goexperiment.swissmap
package runtime
import (
"internal/abi"
"unsafe"
)
const RuntimeHmapSize = unsafe.Sizeof(hmap{})
func OverLoadFactor(count int, B uint8) bool {
return overLoadFactor(count, B)
}
func MapBucketsCount(m map[int]int) int {
h := *(**hmap)(unsafe.Pointer(&m))
return 1 << h.B
}
func MapBucketsPointerIsNil(m map[int]int) bool {
h := *(**hmap)(unsafe.Pointer(&m))
return h.buckets == nil
}
func MapTombstoneCheck(m map[int]int) {
// Make sure emptyOne and emptyRest are distributed correctly.
// We should have a series of filled and emptyOne cells, followed by
// a series of emptyRest cells.
h := *(**hmap)(unsafe.Pointer(&m))
i := any(m)
t := *(**maptype)(unsafe.Pointer(&i))
for x := 0; x < 1<<h.B; x++ {
b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.BucketSize)))
n := 0
for b := b0; b != nil; b = b.overflow(t) {
for i := 0; i < abi.OldMapBucketCount; i++ {
if b.tophash[i] != emptyRest {
n++
}
}
}
k := 0
for b := b0; b != nil; b = b.overflow(t) {
for i := 0; i < abi.OldMapBucketCount; i++ {
if k < n && b.tophash[i] == emptyRest {
panic("early emptyRest")
}
if k >= n && b.tophash[i] != emptyRest {
panic("late non-emptyRest")
}
if k == n-1 && b.tophash[i] == emptyOne {
panic("last non-emptyRest entry is emptyOne")
}
k++
}
}
}
}

View file

@ -1,11 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build goexperiment.swissmap
package runtime
func MapTombstoneCheck(m map[int]int) {
// TODO
}

View file

@ -59,9 +59,6 @@ const CrashStackImplemented = crashStackImplemented
const TracebackInnerFrames = tracebackInnerFrames
const TracebackOuterFrames = tracebackOuterFrames
var MapKeys = keys
var MapValues = values
var LockPartialOrder = lockPartialOrder
type TimeTimer = timeTimer

View file

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build goexperiment.swissmap
package runtime
import (
@ -16,8 +14,7 @@ import (
// Legacy //go:linkname compatibility shims
//
// The functions below are unused by the toolchain, and exist only for
// compatibility with existing //go:linkname use in the ecosystem (and in
// map_noswiss.go for normal use via GOEXPERIMENT=noswissmap).
// compatibility with existing //go:linkname use in the ecosystem.
// linknameIter is the it argument to mapiterinit and mapiternext.
//
@ -27,7 +24,7 @@ import (
// type hiter struct {
// key unsafe.Pointer
// elem unsafe.Pointer
// t *maptype
// t *maptype // old map abi.Type
// h *hmap
// buckets unsafe.Pointer
// bptr *bmap

View file

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build goexperiment.swissmap
package runtime
import (
@ -19,8 +17,6 @@ const (
loadFactorDen = 8
)
type maptype = abi.SwissMapType
//go:linkname maps_errNilAssign internal/runtime/maps.errNilAssign
var maps_errNilAssign error = plainError("assignment to entry in nil map")
@ -331,19 +327,3 @@ func mapclone(m any) any {
e.data = (unsafe.Pointer)(map_)
return m
}
// keys for implementing maps.keys
//
//go:linkname keys maps.keys
func keys(m any, p unsafe.Pointer) {
// Currently unused in the maps package.
panic("unimplemented")
}
// values for implementing maps.values
//
//go:linkname values maps.values
func values(m any, p unsafe.Pointer) {
// Currently unused in the maps package.
panic("unimplemented")
}

View file

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build goexperiment.swissmap
package runtime
import (

View file

@ -1,493 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !goexperiment.swissmap
package runtime
import (
"internal/abi"
"internal/goarch"
"internal/runtime/sys"
"unsafe"
)
func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
if raceenabled && h != nil {
callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast32))
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0])
}
if h.flags&hashWriting != 0 {
fatal("concurrent map read and map write")
}
var b *bmap
if h.B == 0 {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := bucketMask(h.B)
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
if c := h.oldbuckets; c != nil {
if !h.sameSizeGrow() {
// There used to be half as many buckets; mask down one more power of two.
m >>= 1
}
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
if !evacuated(oldb) {
b = oldb
}
}
}
for ; b != nil; b = b.overflow(t) {
for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 4) {
if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*4+i*uintptr(t.ValueSize))
}
}
}
return unsafe.Pointer(&zeroVal[0])
}
// mapaccess2_fast32 should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/ugorji/go/codec
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname mapaccess2_fast32
func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast32))
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0]), false
}
if h.flags&hashWriting != 0 {
fatal("concurrent map read and map write")
}
var b *bmap
if h.B == 0 {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := bucketMask(h.B)
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
if c := h.oldbuckets; c != nil {
if !h.sameSizeGrow() {
// There used to be half as many buckets; mask down one more power of two.
m >>= 1
}
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
if !evacuated(oldb) {
b = oldb
}
}
}
for ; b != nil; b = b.overflow(t) {
for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 4) {
if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*4+i*uintptr(t.ValueSize)), true
}
}
}
return unsafe.Pointer(&zeroVal[0]), false
}
// mapassign_fast32 should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/sonic
// - github.com/ugorji/go/codec
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname mapassign_fast32
func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
if h == nil {
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
callerpc := sys.GetCallerPC()
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
}
if h.flags&hashWriting != 0 {
fatal("concurrent map writes")
}
hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting
if h.buckets == nil {
h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
}
again:
bucket := hash & bucketMask(h.B)
if h.growing() {
growWork_fast32(t, h, bucket)
}
b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
var insertb *bmap
var inserti uintptr
var insertk unsafe.Pointer
bucketloop:
for {
for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
if isEmpty(b.tophash[i]) {
if insertb == nil {
inserti = i
insertb = b
}
if b.tophash[i] == emptyRest {
break bucketloop
}
continue
}
k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
if k != key {
continue
}
inserti = i
insertb = b
goto done
}
ovf := b.overflow(t)
if ovf == nil {
break
}
b = ovf
}
// Did not find mapping for key. Allocate new cell & add entry.
// If we hit the max load factor or we have too many overflow buckets,
// and we're not already in the middle of growing, start growing.
if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
hashGrow(t, h)
goto again // Growing the table invalidates everything, so try again
}
if insertb == nil {
// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
insertb = h.newoverflow(t, b)
inserti = 0 // not necessary, but avoids needlessly spilling inserti
}
insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
// store new key at insert position
*(*uint32)(insertk) = key
h.count++
done:
elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*4+inserti*uintptr(t.ValueSize))
if h.flags&hashWriting == 0 {
fatal("concurrent map writes")
}
h.flags &^= hashWriting
return elem
}
// mapassign_fast32ptr should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/ugorji/go/codec
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname mapassign_fast32ptr
func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if h == nil {
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
callerpc := sys.GetCallerPC()
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
}
if h.flags&hashWriting != 0 {
fatal("concurrent map writes")
}
hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting
if h.buckets == nil {
h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
}
again:
bucket := hash & bucketMask(h.B)
if h.growing() {
growWork_fast32(t, h, bucket)
}
b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
var insertb *bmap
var inserti uintptr
var insertk unsafe.Pointer
bucketloop:
for {
for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
if isEmpty(b.tophash[i]) {
if insertb == nil {
inserti = i
insertb = b
}
if b.tophash[i] == emptyRest {
break bucketloop
}
continue
}
k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4)))
if k != key {
continue
}
inserti = i
insertb = b
goto done
}
ovf := b.overflow(t)
if ovf == nil {
break
}
b = ovf
}
// Did not find mapping for key. Allocate new cell & add entry.
// If we hit the max load factor or we have too many overflow buckets,
// and we're not already in the middle of growing, start growing.
if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
hashGrow(t, h)
goto again // Growing the table invalidates everything, so try again
}
if insertb == nil {
// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
insertb = h.newoverflow(t, b)
inserti = 0 // not necessary, but avoids needlessly spilling inserti
}
insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
// store new key at insert position
*(*unsafe.Pointer)(insertk) = key
h.count++
done:
elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*4+inserti*uintptr(t.ValueSize))
if h.flags&hashWriting == 0 {
fatal("concurrent map writes")
}
h.flags &^= hashWriting
return elem
}
func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
if raceenabled && h != nil {
callerpc := sys.GetCallerPC()
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast32))
}
if h == nil || h.count == 0 {
return
}
if h.flags&hashWriting != 0 {
fatal("concurrent map writes")
}
hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling t.hasher for consistency with mapdelete
h.flags ^= hashWriting
bucket := hash & bucketMask(h.B)
if h.growing() {
growWork_fast32(t, h, bucket)
}
b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
bOrig := b
search:
for ; b != nil; b = b.overflow(t) {
for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 4) {
if key != *(*uint32)(k) || isEmpty(b.tophash[i]) {
continue
}
// Only clear key if there are pointers in it.
// This can only happen if pointers are 32 bit
// wide as 64 bit pointers do not fit into a 32 bit key.
if goarch.PtrSize == 4 && t.Key.Pointers() {
// The key must be a pointer as we checked pointers are
// 32 bits wide and the key is 32 bits wide also.
*(*unsafe.Pointer)(k) = nil
}
e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*4+i*uintptr(t.ValueSize))
if t.Elem.Pointers() {
memclrHasPointers(e, t.Elem.Size_)
} else {
memclrNoHeapPointers(e, t.Elem.Size_)
}
b.tophash[i] = emptyOne
// If the bucket now ends in a bunch of emptyOne states,
// change those to emptyRest states.
if i == abi.OldMapBucketCount-1 {
if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
goto notLast
}
} else {
if b.tophash[i+1] != emptyRest {
goto notLast
}
}
for {
b.tophash[i] = emptyRest
if i == 0 {
if b == bOrig {
break // beginning of initial bucket, we're done.
}
// Find previous bucket, continue at its last entry.
c := b
for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
}
i = abi.OldMapBucketCount - 1
} else {
i--
}
if b.tophash[i] != emptyOne {
break
}
}
notLast:
h.count--
// Reset the hash seed to make it more difficult for attackers to
// repeatedly trigger hash collisions. See issue 25237.
if h.count == 0 {
h.hash0 = uint32(rand())
}
break search
}
}
if h.flags&hashWriting == 0 {
fatal("concurrent map writes")
}
h.flags &^= hashWriting
}
func growWork_fast32(t *maptype, h *hmap, bucket uintptr) {
// make sure we evacuate the oldbucket corresponding
// to the bucket we're about to use
evacuate_fast32(t, h, bucket&h.oldbucketmask())
// evacuate one more oldbucket to make progress on growing
if h.growing() {
evacuate_fast32(t, h, h.nevacuate)
}
}
func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
newbit := h.noldbuckets()
if !evacuated(b) {
// TODO: reuse overflow buckets instead of using new ones, if there
// is no iterator using the old buckets. (If !oldIterator.)
// xy contains the x and y (low and high) evacuation destinations.
var xy [2]evacDst
x := &xy[0]
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
x.k = add(unsafe.Pointer(x.b), dataOffset)
x.e = add(x.k, abi.OldMapBucketCount*4)
if !h.sameSizeGrow() {
// Only calculate y pointers if we're growing bigger.
// Otherwise GC can see bad pointers.
y := &xy[1]
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
y.k = add(unsafe.Pointer(y.b), dataOffset)
y.e = add(y.k, abi.OldMapBucketCount*4)
}
for ; b != nil; b = b.overflow(t) {
k := add(unsafe.Pointer(b), dataOffset)
e := add(k, abi.OldMapBucketCount*4)
for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, 4), add(e, uintptr(t.ValueSize)) {
top := b.tophash[i]
if isEmpty(top) {
b.tophash[i] = evacuatedEmpty
continue
}
if top < minTopHash {
throw("bad map state")
}
var useY uint8
if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need
// to send this key/elem to bucket x or bucket y).
hash := t.Hasher(k, uintptr(h.hash0))
if hash&newbit != 0 {
useY = 1
}
}
b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
dst := &xy[useY] // evacuation destination
if dst.i == abi.OldMapBucketCount {
dst.b = h.newoverflow(t, dst.b)
dst.i = 0
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
dst.e = add(dst.k, abi.OldMapBucketCount*4)
}
dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
// Copy key.
if goarch.PtrSize == 4 && t.Key.Pointers() && writeBarrier.enabled {
// Write with a write barrier.
*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
} else {
*(*uint32)(dst.k) = *(*uint32)(k)
}
typedmemmove(t.Elem, dst.e, e)
dst.i++
// These updates might push these pointers past the end of the
// key or elem arrays. That's ok, as we have the overflow pointer
// at the end of the bucket to protect against pointing past the
// end of the bucket.
dst.k = add(dst.k, 4)
dst.e = add(dst.e, uintptr(t.ValueSize))
}
}
// Unlink the overflow buckets & clear key/elem to help GC.
if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
// Preserve b.tophash because the evacuation
// state is maintained there.
ptr := add(b, dataOffset)
n := uintptr(t.BucketSize) - dataOffset
memclrHasPointers(ptr, n)
}
}
if oldbucket == h.nevacuate {
advanceEvacuationMark(h, t, newbit)
}
}

View file

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build goexperiment.swissmap
package runtime
import (

View file

@ -1,502 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !goexperiment.swissmap
package runtime
import (
"internal/abi"
"internal/goarch"
"internal/runtime/sys"
"unsafe"
)
func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
if raceenabled && h != nil {
callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast64))
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0])
}
if h.flags&hashWriting != 0 {
fatal("concurrent map read and map write")
}
var b *bmap
if h.B == 0 {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := bucketMask(h.B)
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
if c := h.oldbuckets; c != nil {
if !h.sameSizeGrow() {
// There used to be half as many buckets; mask down one more power of two.
m >>= 1
}
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
if !evacuated(oldb) {
b = oldb
}
}
}
for ; b != nil; b = b.overflow(t) {
for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 8) {
if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*8+i*uintptr(t.ValueSize))
}
}
}
return unsafe.Pointer(&zeroVal[0])
}
// mapaccess2_fast64 should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/ugorji/go/codec
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname mapaccess2_fast64
func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast64))
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0]), false
}
if h.flags&hashWriting != 0 {
fatal("concurrent map read and map write")
}
var b *bmap
if h.B == 0 {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := bucketMask(h.B)
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
if c := h.oldbuckets; c != nil {
if !h.sameSizeGrow() {
// There used to be half as many buckets; mask down one more power of two.
m >>= 1
}
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
if !evacuated(oldb) {
b = oldb
}
}
}
for ; b != nil; b = b.overflow(t) {
for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 8) {
if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*8+i*uintptr(t.ValueSize)), true
}
}
}
return unsafe.Pointer(&zeroVal[0]), false
}
// mapassign_fast64 should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/sonic
// - github.com/ugorji/go/codec
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname mapassign_fast64
func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
if h == nil {
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
callerpc := sys.GetCallerPC()
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
}
if h.flags&hashWriting != 0 {
fatal("concurrent map writes")
}
hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting
if h.buckets == nil {
h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
}
again:
bucket := hash & bucketMask(h.B)
if h.growing() {
growWork_fast64(t, h, bucket)
}
b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
var insertb *bmap
var inserti uintptr
var insertk unsafe.Pointer
bucketloop:
for {
for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
if isEmpty(b.tophash[i]) {
if insertb == nil {
insertb = b
inserti = i
}
if b.tophash[i] == emptyRest {
break bucketloop
}
continue
}
k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
if k != key {
continue
}
insertb = b
inserti = i
goto done
}
ovf := b.overflow(t)
if ovf == nil {
break
}
b = ovf
}
// Did not find mapping for key. Allocate new cell & add entry.
// If we hit the max load factor or we have too many overflow buckets,
// and we're not already in the middle of growing, start growing.
if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
hashGrow(t, h)
goto again // Growing the table invalidates everything, so try again
}
if insertb == nil {
// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
insertb = h.newoverflow(t, b)
inserti = 0 // not necessary, but avoids needlessly spilling inserti
}
insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
// store new key at insert position
*(*uint64)(insertk) = key
h.count++
done:
elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*8+inserti*uintptr(t.ValueSize))
if h.flags&hashWriting == 0 {
fatal("concurrent map writes")
}
h.flags &^= hashWriting
return elem
}
// mapassign_fast64ptr should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/sonic
// - github.com/ugorji/go/codec
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname mapassign_fast64ptr
func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if h == nil {
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
callerpc := sys.GetCallerPC()
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
}
if h.flags&hashWriting != 0 {
fatal("concurrent map writes")
}
hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting
if h.buckets == nil {
h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
}
again:
bucket := hash & bucketMask(h.B)
if h.growing() {
growWork_fast64(t, h, bucket)
}
b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
var insertb *bmap
var inserti uintptr
var insertk unsafe.Pointer
bucketloop:
for {
for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
if isEmpty(b.tophash[i]) {
if insertb == nil {
insertb = b
inserti = i
}
if b.tophash[i] == emptyRest {
break bucketloop
}
continue
}
k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8)))
if k != key {
continue
}
insertb = b
inserti = i
goto done
}
ovf := b.overflow(t)
if ovf == nil {
break
}
b = ovf
}
// Did not find mapping for key. Allocate new cell & add entry.
// If we hit the max load factor or we have too many overflow buckets,
// and we're not already in the middle of growing, start growing.
if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
hashGrow(t, h)
goto again // Growing the table invalidates everything, so try again
}
if insertb == nil {
// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
insertb = h.newoverflow(t, b)
inserti = 0 // not necessary, but avoids needlessly spilling inserti
}
insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
// store new key at insert position
*(*unsafe.Pointer)(insertk) = key
h.count++
done:
elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*8+inserti*uintptr(t.ValueSize))
if h.flags&hashWriting == 0 {
fatal("concurrent map writes")
}
h.flags &^= hashWriting
return elem
}
func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
if raceenabled && h != nil {
callerpc := sys.GetCallerPC()
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast64))
}
if h == nil || h.count == 0 {
return
}
if h.flags&hashWriting != 0 {
fatal("concurrent map writes")
}
hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling t.hasher for consistency with mapdelete
h.flags ^= hashWriting
bucket := hash & bucketMask(h.B)
if h.growing() {
growWork_fast64(t, h, bucket)
}
b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
bOrig := b
search:
for ; b != nil; b = b.overflow(t) {
for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 8) {
if key != *(*uint64)(k) || isEmpty(b.tophash[i]) {
continue
}
// Only clear key if there are pointers in it.
if t.Key.Pointers() {
if goarch.PtrSize == 8 {
*(*unsafe.Pointer)(k) = nil
} else {
// There are three ways to squeeze at one or more 32 bit pointers into 64 bits.
// Just call memclrHasPointers instead of trying to handle all cases here.
memclrHasPointers(k, 8)
}
}
e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*8+i*uintptr(t.ValueSize))
if t.Elem.Pointers() {
memclrHasPointers(e, t.Elem.Size_)
} else {
memclrNoHeapPointers(e, t.Elem.Size_)
}
b.tophash[i] = emptyOne
// If the bucket now ends in a bunch of emptyOne states,
// change those to emptyRest states.
if i == abi.OldMapBucketCount-1 {
if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
goto notLast
}
} else {
if b.tophash[i+1] != emptyRest {
goto notLast
}
}
for {
b.tophash[i] = emptyRest
if i == 0 {
if b == bOrig {
break // beginning of initial bucket, we're done.
}
// Find previous bucket, continue at its last entry.
c := b
for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
}
i = abi.OldMapBucketCount - 1
} else {
i--
}
if b.tophash[i] != emptyOne {
break
}
}
notLast:
h.count--
// Reset the hash seed to make it more difficult for attackers to
// repeatedly trigger hash collisions. See issue 25237.
if h.count == 0 {
h.hash0 = uint32(rand())
}
break search
}
}
if h.flags&hashWriting == 0 {
fatal("concurrent map writes")
}
h.flags &^= hashWriting
}
func growWork_fast64(t *maptype, h *hmap, bucket uintptr) {
// make sure we evacuate the oldbucket corresponding
// to the bucket we're about to use
evacuate_fast64(t, h, bucket&h.oldbucketmask())
// evacuate one more oldbucket to make progress on growing
if h.growing() {
evacuate_fast64(t, h, h.nevacuate)
}
}
func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
newbit := h.noldbuckets()
if !evacuated(b) {
// TODO: reuse overflow buckets instead of using new ones, if there
// is no iterator using the old buckets. (If !oldIterator.)
// xy contains the x and y (low and high) evacuation destinations.
var xy [2]evacDst
x := &xy[0]
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
x.k = add(unsafe.Pointer(x.b), dataOffset)
x.e = add(x.k, abi.OldMapBucketCount*8)
if !h.sameSizeGrow() {
// Only calculate y pointers if we're growing bigger.
// Otherwise GC can see bad pointers.
y := &xy[1]
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
y.k = add(unsafe.Pointer(y.b), dataOffset)
y.e = add(y.k, abi.OldMapBucketCount*8)
}
for ; b != nil; b = b.overflow(t) {
k := add(unsafe.Pointer(b), dataOffset)
e := add(k, abi.OldMapBucketCount*8)
for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, 8), add(e, uintptr(t.ValueSize)) {
top := b.tophash[i]
if isEmpty(top) {
b.tophash[i] = evacuatedEmpty
continue
}
if top < minTopHash {
throw("bad map state")
}
var useY uint8
if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need
// to send this key/elem to bucket x or bucket y).
hash := t.Hasher(k, uintptr(h.hash0))
if hash&newbit != 0 {
useY = 1
}
}
b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
dst := &xy[useY] // evacuation destination
if dst.i == abi.OldMapBucketCount {
dst.b = h.newoverflow(t, dst.b)
dst.i = 0
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
dst.e = add(dst.k, abi.OldMapBucketCount*8)
}
dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
// Copy key.
if t.Key.Pointers() && writeBarrier.enabled {
if goarch.PtrSize == 8 {
// Write with a write barrier.
*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
} else {
// There are three ways to squeeze at least one 32 bit pointer into 64 bits.
// Give up and call typedmemmove.
typedmemmove(t.Key, dst.k, k)
}
} else {
*(*uint64)(dst.k) = *(*uint64)(k)
}
typedmemmove(t.Elem, dst.e, e)
dst.i++
// These updates might push these pointers past the end of the
// key or elem arrays. That's ok, as we have the overflow pointer
// at the end of the bucket to protect against pointing past the
// end of the bucket.
dst.k = add(dst.k, 8)
dst.e = add(dst.e, uintptr(t.ValueSize))
}
}
// Unlink the overflow buckets & clear key/elem to help GC.
if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
// Preserve b.tophash because the evacuation
// state is maintained there.
ptr := add(b, dataOffset)
n := uintptr(t.BucketSize) - dataOffset
memclrHasPointers(ptr, n)
}
}
if oldbucket == h.nevacuate {
advanceEvacuationMark(h, t, newbit)
}
}

View file

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build goexperiment.swissmap
package runtime
import (

View file

@ -1,507 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !goexperiment.swissmap
package runtime
import (
"internal/abi"
"internal/goarch"
"internal/runtime/sys"
"unsafe"
)
func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
if raceenabled && h != nil {
callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_faststr))
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0])
}
if h.flags&hashWriting != 0 {
fatal("concurrent map read and map write")
}
key := stringStructOf(&ky)
if h.B == 0 {
// One-bucket table.
b := (*bmap)(h.buckets)
if key.len < 32 {
// short key, doing lots of comparisons is ok
for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || isEmpty(b.tophash[i]) {
if b.tophash[i] == emptyRest {
break
}
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
}
}
return unsafe.Pointer(&zeroVal[0])
}
// long key, try not to do more comparisons than necessary
keymaybe := uintptr(abi.OldMapBucketCount)
for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || isEmpty(b.tophash[i]) {
if b.tophash[i] == emptyRest {
break
}
continue
}
if k.str == key.str {
return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
}
// check first 4 bytes
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
continue
}
// check last 4 bytes
if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
continue
}
if keymaybe != abi.OldMapBucketCount {
// Two keys are potential matches. Use hash to distinguish them.
goto dohash
}
keymaybe = i
}
if keymaybe != abi.OldMapBucketCount {
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
if memequal(k.str, key.str, uintptr(key.len)) {
return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize))
}
}
return unsafe.Pointer(&zeroVal[0])
}
dohash:
hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
m := bucketMask(h.B)
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
if c := h.oldbuckets; c != nil {
if !h.sameSizeGrow() {
// There used to be half as many buckets; mask down one more power of two.
m >>= 1
}
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
if !evacuated(oldb) {
b = oldb
}
}
top := tophash(hash)
for ; b != nil; b = b.overflow(t) {
for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || b.tophash[i] != top {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
}
}
}
return unsafe.Pointer(&zeroVal[0])
}
// mapaccess2_faststr should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/ugorji/go/codec
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname mapaccess2_faststr
func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_faststr))
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0]), false
}
if h.flags&hashWriting != 0 {
fatal("concurrent map read and map write")
}
key := stringStructOf(&ky)
if h.B == 0 {
// One-bucket table.
b := (*bmap)(h.buckets)
if key.len < 32 {
// short key, doing lots of comparisons is ok
for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || isEmpty(b.tophash[i]) {
if b.tophash[i] == emptyRest {
break
}
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
}
}
return unsafe.Pointer(&zeroVal[0]), false
}
// long key, try not to do more comparisons than necessary
keymaybe := uintptr(abi.OldMapBucketCount)
for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || isEmpty(b.tophash[i]) {
if b.tophash[i] == emptyRest {
break
}
continue
}
if k.str == key.str {
return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
}
// check first 4 bytes
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
continue
}
// check last 4 bytes
if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
continue
}
if keymaybe != abi.OldMapBucketCount {
// Two keys are potential matches. Use hash to distinguish them.
goto dohash
}
keymaybe = i
}
if keymaybe != abi.OldMapBucketCount {
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
if memequal(k.str, key.str, uintptr(key.len)) {
return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)), true
}
}
return unsafe.Pointer(&zeroVal[0]), false
}
dohash:
hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
m := bucketMask(h.B)
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
if c := h.oldbuckets; c != nil {
if !h.sameSizeGrow() {
// There used to be half as many buckets; mask down one more power of two.
m >>= 1
}
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
if !evacuated(oldb) {
b = oldb
}
}
top := tophash(hash)
for ; b != nil; b = b.overflow(t) {
for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || b.tophash[i] != top {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
}
}
}
return unsafe.Pointer(&zeroVal[0]), false
}
// mapassign_faststr should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/sonic
// - github.com/ugorji/go/codec
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname mapassign_faststr
func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
if h == nil {
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
callerpc := sys.GetCallerPC()
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_faststr))
}
if h.flags&hashWriting != 0 {
fatal("concurrent map writes")
}
key := stringStructOf(&s)
hash := t.Hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
// Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting
if h.buckets == nil {
h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
}
again:
bucket := hash & bucketMask(h.B)
if h.growing() {
growWork_faststr(t, h, bucket)
}
b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
top := tophash(hash)
var insertb *bmap
var inserti uintptr
var insertk unsafe.Pointer
bucketloop:
for {
for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
if b.tophash[i] != top {
if isEmpty(b.tophash[i]) && insertb == nil {
insertb = b
inserti = i
}
if b.tophash[i] == emptyRest {
break bucketloop
}
continue
}
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*goarch.PtrSize))
if k.len != key.len {
continue
}
if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
continue
}
// already have a mapping for key. Update it.
inserti = i
insertb = b
// Overwrite existing key, so it can be garbage collected.
// The size is already guaranteed to be set correctly.
k.str = key.str
goto done
}
ovf := b.overflow(t)
if ovf == nil {
break
}
b = ovf
}
// Did not find mapping for key. Allocate new cell & add entry.
// If we hit the max load factor or we have too many overflow buckets,
// and we're not already in the middle of growing, start growing.
if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
hashGrow(t, h)
goto again // Growing the table invalidates everything, so try again
}
if insertb == nil {
// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
insertb = h.newoverflow(t, b)
inserti = 0 // not necessary, but avoids needlessly spilling inserti
}
insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = top // mask inserti to avoid bounds checks
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*goarch.PtrSize)
// store new key at insert position
*((*stringStruct)(insertk)) = *key
h.count++
done:
elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+inserti*uintptr(t.ValueSize))
if h.flags&hashWriting == 0 {
fatal("concurrent map writes")
}
h.flags &^= hashWriting
return elem
}
func mapdelete_faststr(t *maptype, h *hmap, ky string) {
if raceenabled && h != nil {
callerpc := sys.GetCallerPC()
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_faststr))
}
if h == nil || h.count == 0 {
return
}
if h.flags&hashWriting != 0 {
fatal("concurrent map writes")
}
key := stringStructOf(&ky)
hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
// Set hashWriting after calling t.hasher for consistency with mapdelete
h.flags ^= hashWriting
bucket := hash & bucketMask(h.B)
if h.growing() {
growWork_faststr(t, h, bucket)
}
b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
bOrig := b
top := tophash(hash)
search:
for ; b != nil; b = b.overflow(t) {
for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || b.tophash[i] != top {
continue
}
if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
continue
}
// Clear key's pointer.
k.str = nil
e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
if t.Elem.Pointers() {
memclrHasPointers(e, t.Elem.Size_)
} else {
memclrNoHeapPointers(e, t.Elem.Size_)
}
b.tophash[i] = emptyOne
// If the bucket now ends in a bunch of emptyOne states,
// change those to emptyRest states.
if i == abi.OldMapBucketCount-1 {
if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
goto notLast
}
} else {
if b.tophash[i+1] != emptyRest {
goto notLast
}
}
for {
b.tophash[i] = emptyRest
if i == 0 {
if b == bOrig {
break // beginning of initial bucket, we're done.
}
// Find previous bucket, continue at its last entry.
c := b
for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
}
i = abi.OldMapBucketCount - 1
} else {
i--
}
if b.tophash[i] != emptyOne {
break
}
}
notLast:
h.count--
// Reset the hash seed to make it more difficult for attackers to
// repeatedly trigger hash collisions. See issue 25237.
if h.count == 0 {
h.hash0 = uint32(rand())
}
break search
}
}
if h.flags&hashWriting == 0 {
fatal("concurrent map writes")
}
h.flags &^= hashWriting
}
func growWork_faststr(t *maptype, h *hmap, bucket uintptr) {
// make sure we evacuate the oldbucket corresponding
// to the bucket we're about to use
evacuate_faststr(t, h, bucket&h.oldbucketmask())
// evacuate one more oldbucket to make progress on growing
if h.growing() {
evacuate_faststr(t, h, h.nevacuate)
}
}
func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
newbit := h.noldbuckets()
if !evacuated(b) {
// TODO: reuse overflow buckets instead of using new ones, if there
// is no iterator using the old buckets. (If !oldIterator.)
// xy contains the x and y (low and high) evacuation destinations.
var xy [2]evacDst
x := &xy[0]
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
x.k = add(unsafe.Pointer(x.b), dataOffset)
x.e = add(x.k, abi.OldMapBucketCount*2*goarch.PtrSize)
if !h.sameSizeGrow() {
// Only calculate y pointers if we're growing bigger.
// Otherwise GC can see bad pointers.
y := &xy[1]
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
y.k = add(unsafe.Pointer(y.b), dataOffset)
y.e = add(y.k, abi.OldMapBucketCount*2*goarch.PtrSize)
}
for ; b != nil; b = b.overflow(t) {
k := add(unsafe.Pointer(b), dataOffset)
e := add(k, abi.OldMapBucketCount*2*goarch.PtrSize)
for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.ValueSize)) {
top := b.tophash[i]
if isEmpty(top) {
b.tophash[i] = evacuatedEmpty
continue
}
if top < minTopHash {
throw("bad map state")
}
var useY uint8
if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need
// to send this key/elem to bucket x or bucket y).
hash := t.Hasher(k, uintptr(h.hash0))
if hash&newbit != 0 {
useY = 1
}
}
b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
dst := &xy[useY] // evacuation destination
if dst.i == abi.OldMapBucketCount {
dst.b = h.newoverflow(t, dst.b)
dst.i = 0
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
dst.e = add(dst.k, abi.OldMapBucketCount*2*goarch.PtrSize)
}
dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
// Copy key.
*(*string)(dst.k) = *(*string)(k)
typedmemmove(t.Elem, dst.e, e)
dst.i++
// These updates might push these pointers past the end of the
// key or elem arrays. That's ok, as we have the overflow pointer
// at the end of the bucket to protect against pointing past the
// end of the bucket.
dst.k = add(dst.k, 2*goarch.PtrSize)
dst.e = add(dst.e, uintptr(t.ValueSize))
}
}
// Unlink the overflow buckets & clear key/elem to help GC.
if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
// Preserve b.tophash because the evacuation
// state is maintained there.
ptr := add(b, dataOffset)
n := uintptr(t.BucketSize) - dataOffset
memclrHasPointers(ptr, n)
}
}
if oldbucket == h.nevacuate {
advanceEvacuationMark(h, t, newbit)
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,214 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !goexperiment.swissmap
package runtime_test
import (
"internal/abi"
"internal/goarch"
"runtime"
"slices"
"testing"
)
func TestHmapSize(t *testing.T) {
// The structure of hmap is defined in runtime/map.go
// and in cmd/compile/internal/reflectdata/map.go and must be in sync.
// The size of hmap should be 56 bytes on 64 bit and 36 bytes on 32 bit platforms.
var hmapSize = uintptr(2*8 + 5*goarch.PtrSize)
if runtime.RuntimeHmapSize != hmapSize {
t.Errorf("sizeof(runtime.hmap{})==%d, want %d", runtime.RuntimeHmapSize, hmapSize)
}
}
func TestLoadFactor(t *testing.T) {
for b := uint8(0); b < 20; b++ {
count := 13 * (1 << b) / 2 // 6.5
if b == 0 {
count = 8
}
if runtime.OverLoadFactor(count, b) {
t.Errorf("OverLoadFactor(%d,%d)=true, want false", count, b)
}
if !runtime.OverLoadFactor(count+1, b) {
t.Errorf("OverLoadFactor(%d,%d)=false, want true", count+1, b)
}
}
}
func TestMapIterOrder(t *testing.T) {
sizes := []int{3, 7, 9, 15}
if abi.OldMapBucketCountBits >= 5 {
// it gets flaky (often only one iteration order) at size 3 when abi.MapBucketCountBits >=5.
t.Fatalf("This test becomes flaky if abi.MapBucketCountBits(=%d) is 5 or larger", abi.OldMapBucketCountBits)
}
for _, n := range sizes {
for i := 0; i < 1000; i++ {
// Make m be {0: true, 1: true, ..., n-1: true}.
m := make(map[int]bool)
for i := 0; i < n; i++ {
m[i] = true
}
// Check that iterating over the map produces at least two different orderings.
ord := func() []int {
var s []int
for key := range m {
s = append(s, key)
}
return s
}
first := ord()
ok := false
for try := 0; try < 100; try++ {
if !slices.Equal(first, ord()) {
ok = true
break
}
}
if !ok {
t.Errorf("Map with n=%d elements had consistent iteration order: %v", n, first)
break
}
}
}
}
const bs = abi.OldMapBucketCount
// belowOverflow should be a pretty-full pair of buckets;
// atOverflow is 1/8 bs larger = 13/8 buckets or two buckets
// that are 13/16 full each, which is the overflow boundary.
// Adding one to that should ensure overflow to the next higher size.
const (
belowOverflow = bs * 3 / 2 // 1.5 bs = 2 buckets @ 75%
atOverflow = belowOverflow + bs/8 // 2 buckets at 13/16 fill.
)
var mapBucketTests = [...]struct {
n int // n is the number of map elements
noescape int // number of expected buckets for non-escaping map
escape int // number of expected buckets for escaping map
}{
{-(1 << 30), 1, 1},
{-1, 1, 1},
{0, 1, 1},
{1, 1, 1},
{bs, 1, 1},
{bs + 1, 2, 2},
{belowOverflow, 2, 2}, // 1.5 bs = 2 buckets @ 75%
{atOverflow + 1, 4, 4}, // 13/8 bs + 1 == overflow to 4
{2 * belowOverflow, 4, 4}, // 3 bs = 4 buckets @75%
{2*atOverflow + 1, 8, 8}, // 13/4 bs + 1 = overflow to 8
{4 * belowOverflow, 8, 8}, // 6 bs = 8 buckets @ 75%
{4*atOverflow + 1, 16, 16}, // 13/2 bs + 1 = overflow to 16
}
func TestMapBuckets(t *testing.T) {
// Test that maps of different sizes have the right number of buckets.
// Non-escaping maps with small buckets (like map[int]int) never
// have a nil bucket pointer due to starting with preallocated buckets
// on the stack. Escaping maps start with a non-nil bucket pointer if
// hint size is above bucketCnt and thereby have more than one bucket.
// These tests depend on bucketCnt and loadFactor* in map.go.
t.Run("mapliteral", func(t *testing.T) {
for _, tt := range mapBucketTests {
localMap := map[int]int{}
if runtime.MapBucketsPointerIsNil(localMap) {
t.Errorf("no escape: buckets pointer is nil for non-escaping map")
}
for i := 0; i < tt.n; i++ {
localMap[i] = i
}
if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
}
escapingMap := runtime.Escape(map[int]int{})
if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
}
for i := 0; i < tt.n; i++ {
escapingMap[i] = i
}
if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
t.Errorf("escape n=%d want %d buckets, got %d", tt.n, tt.escape, got)
}
}
})
t.Run("nohint", func(t *testing.T) {
for _, tt := range mapBucketTests {
localMap := make(map[int]int)
if runtime.MapBucketsPointerIsNil(localMap) {
t.Errorf("no escape: buckets pointer is nil for non-escaping map")
}
for i := 0; i < tt.n; i++ {
localMap[i] = i
}
if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
}
escapingMap := runtime.Escape(make(map[int]int))
if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
}
for i := 0; i < tt.n; i++ {
escapingMap[i] = i
}
if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got)
}
}
})
t.Run("makemap", func(t *testing.T) {
for _, tt := range mapBucketTests {
localMap := make(map[int]int, tt.n)
if runtime.MapBucketsPointerIsNil(localMap) {
t.Errorf("no escape: buckets pointer is nil for non-escaping map")
}
for i := 0; i < tt.n; i++ {
localMap[i] = i
}
if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
}
escapingMap := runtime.Escape(make(map[int]int, tt.n))
if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
}
for i := 0; i < tt.n; i++ {
escapingMap[i] = i
}
if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got)
}
}
})
t.Run("makemap64", func(t *testing.T) {
for _, tt := range mapBucketTests {
localMap := make(map[int]int, int64(tt.n))
if runtime.MapBucketsPointerIsNil(localMap) {
t.Errorf("no escape: buckets pointer is nil for non-escaping map")
}
for i := 0; i < tt.n; i++ {
localMap[i] = i
}
if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
}
escapingMap := runtime.Escape(make(map[int]int, tt.n))
if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
}
for i := 0; i < tt.n; i++ {
escapingMap[i] = i
}
if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got)
}
}
})
}

View file

@ -1,75 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build goexperiment.swissmap
package runtime_test
import (
"internal/abi"
"internal/goarch"
"internal/runtime/maps"
"slices"
"testing"
"unsafe"
)
func TestHmapSize(t *testing.T) {
// The structure of Map is defined in internal/runtime/maps/map.go
// and in cmd/compile/internal/reflectdata/map_swiss.go and must be in sync.
// The size of Map should be 48 bytes on 64 bit and 32 bytes on 32 bit platforms.
wantSize := uintptr(2*8 + 4*goarch.PtrSize)
gotSize := unsafe.Sizeof(maps.Map{})
if gotSize != wantSize {
t.Errorf("sizeof(maps.Map{})==%d, want %d", gotSize, wantSize)
}
}
// See also reflect_test.TestGroupSizeZero.
func TestGroupSizeZero(t *testing.T) {
var m map[struct{}]struct{}
mTyp := abi.TypeOf(m)
mt := (*abi.SwissMapType)(unsafe.Pointer(mTyp))
// internal/runtime/maps when create pointers to slots, even if slots
// are size 0. The compiler should have reserved an extra word to
// ensure that pointers to the zero-size type at the end of group are
// valid.
if mt.Group.Size() <= 8 {
t.Errorf("Group size got %d want >8", mt.Group.Size())
}
}
func TestMapIterOrder(t *testing.T) {
sizes := []int{3, 7, 9, 15}
for _, n := range sizes {
for i := 0; i < 1000; i++ {
// Make m be {0: true, 1: true, ..., n-1: true}.
m := make(map[int]bool)
for i := 0; i < n; i++ {
m[i] = true
}
// Check that iterating over the map produces at least two different orderings.
ord := func() []int {
var s []int
for key := range m {
s = append(s, key)
}
return s
}
first := ord()
ok := false
for try := 0; try < 100; try++ {
if !slices.Equal(first, ord()) {
ok = true
break
}
}
if !ok {
t.Errorf("Map with n=%d elements had consistent iteration order: %v", n, first)
break
}
}
}
}

View file

@ -6,7 +6,9 @@ package runtime_test
import (
"fmt"
"internal/goexperiment"
"internal/abi"
"internal/goarch"
"internal/runtime/maps"
"internal/testenv"
"math"
"os"
@ -812,31 +814,6 @@ func TestIncrementAfterBulkClearKeyStringValueInt(t *testing.T) {
}
}
func TestMapTombstones(t *testing.T) {
m := map[int]int{}
const N = 10000
// Fill a map.
for i := 0; i < N; i++ {
m[i] = i
}
runtime.MapTombstoneCheck(m)
// Delete half of the entries.
for i := 0; i < N; i += 2 {
delete(m, i)
}
runtime.MapTombstoneCheck(m)
// Add new entries to fill in holes.
for i := N; i < 3*N/2; i++ {
m[i] = i
}
runtime.MapTombstoneCheck(m)
// Delete everything.
for i := 0; i < 3*N/2; i++ {
delete(m, i)
}
runtime.MapTombstoneCheck(m)
}
type canString int
func (c canString) String() string {
@ -1060,44 +1037,6 @@ func TestEmptyMapWithInterfaceKey(t *testing.T) {
})
}
func TestMapKeys(t *testing.T) {
if goexperiment.SwissMap {
t.Skip("mapkeys not implemented for swissmaps")
}
type key struct {
s string
pad [128]byte // sizeof(key) > abi.MapMaxKeyBytes
}
m := map[key]int{{s: "a"}: 1, {s: "b"}: 2}
keys := make([]key, 0, len(m))
runtime.MapKeys(m, unsafe.Pointer(&keys))
for _, k := range keys {
if len(k.s) != 1 {
t.Errorf("len(k.s) == %d, want 1", len(k.s))
}
}
}
func TestMapValues(t *testing.T) {
if goexperiment.SwissMap {
t.Skip("mapvalues not implemented for swissmaps")
}
type val struct {
s string
pad [128]byte // sizeof(val) > abi.MapMaxElemBytes
}
m := map[int]val{1: {s: "a"}, 2: {s: "b"}}
vals := make([]val, 0, len(m))
runtime.MapValues(m, unsafe.Pointer(&vals))
for _, v := range vals {
if len(v.s) != 1 {
t.Errorf("len(v.s) == %d, want 1", len(v.s))
}
}
}
func computeHash() uintptr {
var v struct{}
return runtime.MemHash(unsafe.Pointer(&v), 0, unsafe.Sizeof(v))
@ -1202,3 +1141,62 @@ func TestMapIterDeleteReplace(t *testing.T) {
})
}
}
func TestHmapSize(t *testing.T) {
// The structure of Map is defined in internal/runtime/maps/map.go
// and in cmd/compile/internal/reflectdata/map.go and must be in sync.
// The size of Map should be 48 bytes on 64 bit and 32 bytes on 32 bit platforms.
wantSize := uintptr(2*8 + 4*goarch.PtrSize)
gotSize := unsafe.Sizeof(maps.Map{})
if gotSize != wantSize {
t.Errorf("sizeof(maps.Map{})==%d, want %d", gotSize, wantSize)
}
}
// See also reflect_test.TestGroupSizeZero.
func TestGroupSizeZero(t *testing.T) {
var m map[struct{}]struct{}
mTyp := abi.TypeOf(m)
mt := (*abi.SwissMapType)(unsafe.Pointer(mTyp))
// internal/runtime/maps when create pointers to slots, even if slots
// are size 0. The compiler should have reserved an extra word to
// ensure that pointers to the zero-size type at the end of group are
// valid.
if mt.Group.Size() <= 8 {
t.Errorf("Group size got %d want >8", mt.Group.Size())
}
}
func TestMapIterOrder(t *testing.T) {
sizes := []int{3, 7, 9, 15}
for _, n := range sizes {
for i := 0; i < 1000; i++ {
// Make m be {0: true, 1: true, ..., n-1: true}.
m := make(map[int]bool)
for i := 0; i < n; i++ {
m[i] = true
}
// Check that iterating over the map produces at least two different orderings.
ord := func() []int {
var s []int
for key := range m {
s = append(s, key)
}
return s
}
first := ord()
ok := false
for try := 0; try < 100; try++ {
if !slices.Equal(first, ord()) {
ok = true
break
}
}
if !ok {
t.Errorf("Map with n=%d elements had consistent iteration order: %v", n, first)
break
}
}
}
}

View file

@ -160,13 +160,6 @@ class MapTypePrinter:
return str(self.val.type)
def children(self):
fields = [f.name for f in self.val.type.strip_typedefs().target().fields()]
if 'buckets' in fields:
yield from self.old_map_children()
else:
yield from self.swiss_map_children()
def swiss_map_children(self):
SwissMapGroupSlots = 8 # see internal/abi:SwissMapGroupSlots
cnt = 0
@ -270,40 +263,6 @@ class MapTypePrinter:
yield from group_slots(group)
def old_map_children(self):
MapBucketCount = 8 # see internal/abi:OldMapBucketCount
B = self.val['B']
buckets = self.val['buckets']
oldbuckets = self.val['oldbuckets']
flags = self.val['flags']
inttype = self.val['hash0'].type
cnt = 0
for bucket in xrange(2 ** int(B)):
bp = buckets + bucket
if oldbuckets:
oldbucket = bucket & (2 ** (B - 1) - 1)
oldbp = oldbuckets + oldbucket
oldb = oldbp.dereference()
if (oldb['overflow'].cast(inttype) & 1) == 0: # old bucket not evacuated yet
if bucket >= 2 ** (B - 1):
continue # already did old bucket
bp = oldbp
while bp:
b = bp.dereference()
for i in xrange(MapBucketCount):
if b['tophash'][i] != 0:
k = b['keys'][i]
v = b['values'][i]
if flags & 1:
k = k.dereference()
if flags & 2:
v = v.dereference()
yield str(cnt), k
yield str(cnt + 1), v
cnt += 2
bp = b['overflow']
class ChanTypePrinter:
"""Pretty print chan[T] types.

View file

@ -8,8 +8,6 @@ import (
"bytes"
"flag"
"fmt"
"internal/abi"
"internal/goexperiment"
"internal/testenv"
"os"
"os/exec"
@ -155,9 +153,6 @@ func checkPtraceScope(t *testing.T) {
}
}
// NOTE: the maps below are allocated larger than abi.MapBucketCount
// to ensure that they are not "optimized out".
var helloSource = `
import "fmt"
import "runtime"
@ -166,8 +161,10 @@ var gslice []string
var smallmapvar map[string]string
func main() {
smallmapvar = make(map[string]string)
mapvar := make(map[string]string, ` + strconv.FormatInt(abi.OldMapBucketCount+9, 10) + `)
slicemap := make(map[string][]string,` + strconv.FormatInt(abi.OldMapBucketCount+3, 10) + `)
// NOTE: the maps below are allocated large to ensure that they are not
// "optimized out".
mapvar := make(map[string]string, 10)
slicemap := make(map[string][]string, 10)
chanint := make(chan int, 10)
chanstr := make(chan string, 10)
chanint <- 99
@ -638,20 +635,10 @@ func TestGdbAutotmpTypes(t *testing.T) {
types := []string{
"[]main.astruct",
"main.astruct",
}
if goexperiment.SwissMap {
types = append(types, []string{
"groupReference<string,main.astruct>",
"table<string,main.astruct>",
"map<string,main.astruct>",
"map<string,main.astruct> * map[string]main.astruct",
}...)
} else {
types = append(types, []string{
"bucket<string,main.astruct>",
"hash<string,main.astruct>",
"hash<string,main.astruct> * map[string]main.astruct",
}...)
}
for _, name := range types {
if !strings.Contains(sgot, name) {

View file

@ -9,7 +9,6 @@ package runtime
import (
"internal/abi"
"internal/goarch"
"internal/goexperiment"
"internal/runtime/atomic"
"unsafe"
)
@ -605,14 +604,9 @@ func typesEqual(t, v *_type, seen map[_typePair]struct{}) bool {
}
return true
case abi.Map:
if goexperiment.SwissMap {
mt := (*abi.SwissMapType)(unsafe.Pointer(t))
mv := (*abi.SwissMapType)(unsafe.Pointer(v))
return typesEqual(mt.Key, mv.Key, seen) && typesEqual(mt.Elem, mv.Elem, seen)
}
mt := (*abi.OldMapType)(unsafe.Pointer(t))
mv := (*abi.OldMapType)(unsafe.Pointer(v))
return typesEqual(mt.Key, mv.Key, seen) && typesEqual(mt.Elem, mv.Elem, seen)
case abi.Pointer:
pt := (*ptrtype)(unsafe.Pointer(t))
pv := (*ptrtype)(unsafe.Pointer(v))

View file

@ -1,59 +0,0 @@
// run
//go:build !goexperiment.swissmap
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"maps"
_ "unsafe"
)
func main() {
for i := 0; i < 100; i++ {
f()
}
}
const NB = 4
func f() {
// Make a map with NB buckets, at max capacity.
// 6.5 entries/bucket.
ne := NB * 13 / 2
m := map[int]int{}
for i := 0; i < ne; i++ {
m[i] = i
}
// delete/insert a lot, to hopefully get lots of overflow buckets
// and trigger a same-size grow.
ssg := false
for i := ne; i < ne+1000; i++ {
delete(m, i-ne)
m[i] = i
if sameSizeGrow(m) {
ssg = true
break
}
}
if !ssg {
return
}
// Insert 1 more entry, which would ordinarily trigger a growth.
// We can't grow while growing, so we instead go over our
// target capacity.
m[-1] = -1
// Cloning in this state will make a map with a destination bucket
// array twice the size of the source.
_ = maps.Clone(m)
}
//go:linkname sameSizeGrow runtime.sameSizeGrowForIssue69110Test
func sameSizeGrow(m map[int]int) bool

View file

@ -1,4 +1,4 @@
// run -goexperiment noswissmap
// run
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style

View file

@ -492,7 +492,7 @@ func f28(b bool) {
func f29(b bool) {
if b {
for k := range m { // ERROR "live at call to (mapiterinit|mapIterStart): .autotmp_[0-9]+$" "live at call to (mapiternext|mapIterNext): .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ (runtime.hiter|internal/runtime/maps.Iter)$"
for k := range m { // ERROR "live at call to (mapiterinit|mapIterStart): .autotmp_[0-9]+$" "live at call to (mapiternext|mapIterNext): .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ internal/runtime/maps.Iter$"
printstring(k) // ERROR "live at call to printstring: .autotmp_[0-9]+$"
}
}
@ -695,7 +695,7 @@ func newT40() *T40 {
func good40() {
ret := T40{} // ERROR "stack object ret T40$"
ret.m = make(map[int]int) // ERROR "live at call to rand(32)?: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ (runtime.hmap|internal/runtime/maps.Map)$"
ret.m = make(map[int]int) // ERROR "live at call to rand(32)?: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ internal/runtime/maps.Map$"
t := &ret
printnl() // ERROR "live at call to printnl: ret$"
// Note: ret is live at the printnl because the compiler moves &ret

View file

@ -27,14 +27,14 @@ func newT40() *T40 {
}
func bad40() {
t := newT40() // ERROR "stack object ret T40$" "stack object .autotmp_[0-9]+ (runtime.hmap|internal/runtime/maps.Map)$"
t := newT40() // ERROR "stack object ret T40$" "stack object .autotmp_[0-9]+ internal/runtime/maps.Map$"
printnl() // ERROR "live at call to printnl: ret$"
useT40(t)
}
func good40() {
ret := T40{} // ERROR "stack object ret T40$"
ret.m = make(map[int]int, 42) // ERROR "stack object .autotmp_[0-9]+ (runtime.hmap|internal/runtime/maps.Map)$"
ret.m = make(map[int]int, 42) // ERROR "stack object .autotmp_[0-9]+ internal/runtime/maps.Map$"
t := &ret
printnl() // ERROR "live at call to printnl: ret$"
useT40(t)

View file

@ -490,7 +490,7 @@ func f28(b bool) {
func f29(b bool) {
if b {
for k := range m { // ERROR "live at call to (mapiterinit|mapIterStart): .autotmp_[0-9]+$" "live at call to (mapiternext|mapIterNext): .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ (runtime.hiter|internal/runtime/maps.Iter)$"
for k := range m { // ERROR "live at call to (mapiterinit|mapIterStart): .autotmp_[0-9]+$" "live at call to (mapiternext|mapIterNext): .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ internal/runtime/maps.Iter$"
printstring(k) // ERROR "live at call to printstring: .autotmp_[0-9]+$"
}
}
@ -693,7 +693,7 @@ func newT40() *T40 {
func good40() {
ret := T40{} // ERROR "stack object ret T40$"
ret.m = make(map[int]int) // ERROR "live at call to rand(32)?: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ (runtime.hmap|internal/runtime/maps.Map)$"
ret.m = make(map[int]int) // ERROR "live at call to rand(32)?: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ internal/runtime/maps.Map$"
t := &ret
printnl() // ERROR "live at call to printnl: ret$"
// Note: ret is live at the printnl because the compiler moves &ret