all: remove redundant Swiss prefixes

Now that there is only one map implementation we can simplify names.

For #54766.

Change-Id: I6a6a636cc6a8fc5e7712c27782fc0ced7467b939
Reviewed-on: https://go-review.googlesource.com/c/go/+/691596
Reviewed-by: Keith Randall <khr@google.com>
Auto-Submit: Michael Pratt <mpratt@google.com>
Reviewed-by: Keith Randall <khr@golang.org>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
This commit is contained in:
Michael Pratt 2025-07-29 13:58:35 -04:00 committed by Gopher Robot
parent 2ae059ccaf
commit 82a1921c3b
37 changed files with 286 additions and 286 deletions

View file

@ -15,10 +15,10 @@ import (
"internal/abi"
)
// SwissMapGroupType makes the map slot group type given the type of the map.
func SwissMapGroupType(t *types.Type) *types.Type {
if t.MapType().SwissGroup != nil {
return t.MapType().SwissGroup
// MapGroupType makes the map slot group type given the type of the map.
func MapGroupType(t *types.Type) *types.Type {
if t.MapType().Group != nil {
return t.MapType().Group
}
// Builds a type representing a group structure for the given map type.
@ -29,7 +29,7 @@ func SwissMapGroupType(t *types.Type) *types.Type {
//
// type group struct {
// ctrl uint64
// slots [abi.SwissMapGroupSlots]struct {
// slots [abi.MapGroupSlots]struct {
// key keyType
// elem elemType
// }
@ -39,10 +39,10 @@ func SwissMapGroupType(t *types.Type) *types.Type {
elemtype := t.Elem()
types.CalcSize(keytype)
types.CalcSize(elemtype)
if keytype.Size() > abi.SwissMapMaxKeyBytes {
if keytype.Size() > abi.MapMaxKeyBytes {
keytype = types.NewPtr(keytype)
}
if elemtype.Size() > abi.SwissMapMaxElemBytes {
if elemtype.Size() > abi.MapMaxElemBytes {
elemtype = types.NewPtr(elemtype)
}
@ -53,7 +53,7 @@ func SwissMapGroupType(t *types.Type) *types.Type {
slot := types.NewStruct(slotFields)
slot.SetNoalg(true)
slotArr := types.NewArray(slot, abi.SwissMapGroupSlots)
slotArr := types.NewArray(slot, abi.MapGroupSlots)
slotArr.SetNoalg(true)
fields := []*types.Field{
@ -76,25 +76,25 @@ func SwissMapGroupType(t *types.Type) *types.Type {
// the end to ensure pointers are valid.
base.Fatalf("bad group size for %v", t)
}
if t.Key().Size() > abi.SwissMapMaxKeyBytes && !keytype.IsPtr() {
if t.Key().Size() > abi.MapMaxKeyBytes && !keytype.IsPtr() {
base.Fatalf("key indirect incorrect for %v", t)
}
if t.Elem().Size() > abi.SwissMapMaxElemBytes && !elemtype.IsPtr() {
if t.Elem().Size() > abi.MapMaxElemBytes && !elemtype.IsPtr() {
base.Fatalf("elem indirect incorrect for %v", t)
}
t.MapType().SwissGroup = group
t.MapType().Group = group
group.StructType().Map = t
return group
}
var cachedSwissTableType *types.Type
var cachedMapTableType *types.Type
// swissTableType returns a type interchangeable with internal/runtime/maps.table.
// mapTableType returns a type interchangeable with internal/runtime/maps.table.
// Make sure this stays in sync with internal/runtime/maps/table.go.
func swissTableType() *types.Type {
if cachedSwissTableType != nil {
return cachedSwissTableType
func mapTableType() *types.Type {
if cachedMapTableType != nil {
return cachedMapTableType
}
// type table struct {
@ -135,17 +135,17 @@ func swissTableType() *types.Type {
base.Fatalf("internal/runtime/maps.table size not correct: got %d, want %d", table.Size(), size)
}
cachedSwissTableType = table
cachedMapTableType = table
return table
}
var cachedSwissMapType *types.Type
var cachedMapType *types.Type
// SwissMapType returns a type interchangeable with internal/runtime/maps.Map.
// MapType returns a type interchangeable with internal/runtime/maps.Map.
// Make sure this stays in sync with internal/runtime/maps/map.go.
func SwissMapType() *types.Type {
if cachedSwissMapType != nil {
return cachedSwissMapType
func MapType() *types.Type {
if cachedMapType != nil {
return cachedMapType
}
// type Map struct {
@ -191,23 +191,23 @@ func SwissMapType() *types.Type {
base.Fatalf("internal/runtime/maps.Map size not correct: got %d, want %d", m.Size(), size)
}
cachedSwissMapType = m
cachedMapType = m
return m
}
var cachedSwissIterType *types.Type
var cachedMapIterType *types.Type
// SwissMapIterType returns a type interchangeable with runtime.hiter.
// Make sure this stays in sync with runtime/map.go.
func SwissMapIterType() *types.Type {
if cachedSwissIterType != nil {
return cachedSwissIterType
// MapIterType returns a type interchangeable with internal/runtime/maps.Iter.
// Make sure this stays in sync with internal/runtime/maps/table.go.
func MapIterType() *types.Type {
if cachedMapIterType != nil {
return cachedMapIterType
}
// type Iter struct {
// key unsafe.Pointer // *Key
// elem unsafe.Pointer // *Elem
// typ unsafe.Pointer // *SwissMapType
// typ unsafe.Pointer // *MapType
// m *Map
//
// groupSlotOffset uint64
@ -231,13 +231,13 @@ func SwissMapIterType() *types.Type {
makefield("key", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP.
makefield("elem", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP.
makefield("typ", types.Types[types.TUNSAFEPTR]),
makefield("m", types.NewPtr(SwissMapType())),
makefield("m", types.NewPtr(MapType())),
makefield("groupSlotOffset", types.Types[types.TUINT64]),
makefield("dirOffset", types.Types[types.TUINT64]),
makefield("clearSeq", types.Types[types.TUINT64]),
makefield("globalDepth", types.Types[types.TUINT8]),
makefield("dirIdx", types.Types[types.TINT]),
makefield("tab", types.NewPtr(swissTableType())),
makefield("tab", types.NewPtr(mapTableType())),
makefield("group", types.Types[types.TUNSAFEPTR]),
makefield("entryIdx", types.Types[types.TUINT64]),
}
@ -257,13 +257,13 @@ func SwissMapIterType() *types.Type {
base.Fatalf("internal/runtime/maps.Iter size not correct: got %d, want %d", iter.Size(), size)
}
cachedSwissIterType = iter
cachedMapIterType = iter
return iter
}
func writeSwissMapType(t *types.Type, lsym *obj.LSym, c rttype.Cursor) {
// internal/abi.SwissMapType
gtyp := SwissMapGroupType(t)
func writeMapType(t *types.Type, lsym *obj.LSym, c rttype.Cursor) {
// internal/abi.MapType
gtyp := MapGroupType(t)
s1 := writeType(t.Key())
s2 := writeType(t.Elem())
s3 := writeType(gtyp)
@ -287,16 +287,16 @@ func writeSwissMapType(t *types.Type, lsym *obj.LSym, c rttype.Cursor) {
c.Field("ElemOff").WriteUintptr(uint64(elemOff))
var flags uint32
if needkeyupdate(t.Key()) {
flags |= abi.SwissMapNeedKeyUpdate
flags |= abi.MapNeedKeyUpdate
}
if hashMightPanic(t.Key()) {
flags |= abi.SwissMapHashMightPanic
flags |= abi.MapHashMightPanic
}
if t.Key().Size() > abi.SwissMapMaxKeyBytes {
flags |= abi.SwissMapIndirectKey
if t.Key().Size() > abi.MapMaxKeyBytes {
flags |= abi.MapIndirectKey
}
if t.Elem().Size() > abi.SwissMapMaxKeyBytes {
flags |= abi.SwissMapIndirectElem
if t.Elem().Size() > abi.MapMaxKeyBytes {
flags |= abi.MapIndirectElem
}
c.Field("Flags").WriteUint32(flags)

View file

@ -872,7 +872,7 @@ func writeType(t *types.Type) *obj.LSym {
}
case types.TMAP:
writeSwissMapType(t, lsym, c)
writeMapType(t, lsym, c)
case types.TPTR:
// internal/abi.PtrType

View file

@ -54,7 +54,7 @@ func Init() {
ChanType = FromReflect(reflect.TypeOf(abi.ChanType{}))
FuncType = FromReflect(reflect.TypeOf(abi.FuncType{}))
InterfaceType = FromReflect(reflect.TypeOf(abi.InterfaceType{}))
MapType = FromReflect(reflect.TypeOf(abi.SwissMapType{}))
MapType = FromReflect(reflect.TypeOf(abi.MapType{}))
PtrType = FromReflect(reflect.TypeOf(abi.PtrType{}))
SliceType = FromReflect(reflect.TypeOf(abi.SliceType{}))
StructType = FromReflect(reflect.TypeOf(abi.StructType{}))

View file

@ -1508,7 +1508,7 @@ func initIntrinsics(cfg *intrinsicBuildConfig) {
// No PSIGNB, simply do byte equality with ctrlEmpty.
// Load ctrlEmpty into each byte of a control word.
var ctrlsEmpty uint64 = abi.SwissMapCtrlEmpty
var ctrlsEmpty uint64 = abi.MapCtrlEmpty
e := s.constInt64(types.Types[types.TUINT64], int64(ctrlsEmpty))
// Explicit copy to fp register. See
// https://go.dev/issue/70451.

View file

@ -94,7 +94,7 @@ func InitConfig() {
_ = types.NewPtr(types.Types[types.TINT16]) // *int16
_ = types.NewPtr(types.Types[types.TINT64]) // *int64
_ = types.NewPtr(types.ErrorType) // *error
_ = types.NewPtr(reflectdata.SwissMapType()) // *internal/runtime/maps.Map
_ = types.NewPtr(reflectdata.MapType()) // *internal/runtime/maps.Map
_ = types.NewPtr(deferstruct()) // *runtime._defer
types.NewPtrCacheEnabled = false
ssaConfig = ssa.NewConfig(base.Ctxt.Arch.Name, *types_, base.Ctxt, base.Flag.N == 0, Arch.SoftFloat)
@ -3080,7 +3080,7 @@ func (s *state) exprCheckPtr(n ir.Node, checkPtrOK bool) *ssa.Value {
}
// map <--> *internal/runtime/maps.Map
mt := types.NewPtr(reflectdata.SwissMapType())
mt := types.NewPtr(reflectdata.MapType())
if to.Kind() == types.TMAP && from == mt {
return v
}
@ -5752,7 +5752,7 @@ func (s *state) referenceTypeBuiltin(n *ir.UnaryExpr, x *ssa.Value) *ssa.Value {
case ir.OLEN:
if n.X.Type().IsMap() {
// length is stored in the first word, but needs conversion to int.
loadType := reflectdata.SwissMapType().Field(0).Type // uint64
loadType := reflectdata.MapType().Field(0).Type // uint64
load := s.load(loadType, x)
s.vars[n] = s.conv(nil, load, loadType, lenType) // integer conversion doesn't need Node
} else {

View file

@ -474,7 +474,7 @@ func tconv2(b *bytes.Buffer, t *Type, verb rune, mode fmtMode, visited map[*Type
// Format the bucket struct for map[x]y as map.group[x]y.
// This avoids a recursive print that generates very long names.
switch t {
case mt.SwissGroup:
case mt.Group:
b.WriteString("map.group[")
default:
base.Fatalf("unknown internal map type")

View file

@ -280,7 +280,7 @@ type Map struct {
Key *Type // Key type
Elem *Type // Val (elem) type
SwissGroup *Type // internal struct type representing a slot group
Group *Type // internal struct type representing a slot group
}
// MapType returns t's extra map-specific fields.

View file

@ -313,7 +313,7 @@ func walkMakeChan(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
// walkMakeMap walks an OMAKEMAP node.
func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
t := n.Type()
mapType := reflectdata.SwissMapType()
mapType := reflectdata.MapType()
hint := n.Len
// var m *Map
@ -326,28 +326,28 @@ func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
m = stackTempAddr(init, mapType)
// Allocate one group pointed to by m.dirPtr on stack if hint
// is not larger than SwissMapGroupSlots. In case hint is
// is not larger than MapGroupSlots. In case hint is
// larger, runtime.makemap will allocate on the heap.
// Maximum key and elem size is 128 bytes, larger objects
// are stored with an indirection. So max bucket size is 2048+eps.
if !ir.IsConst(hint, constant.Int) ||
constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.SwissMapGroupSlots)) {
constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.MapGroupSlots)) {
// In case hint is larger than SwissMapGroupSlots
// In case hint is larger than MapGroupSlots
// runtime.makemap will allocate on the heap, see
// #20184
//
// if hint <= abi.SwissMapGroupSlots {
// if hint <= abi.MapGroupSlots {
// var gv group
// g = &gv
// g.ctrl = abi.SwissMapCtrlEmpty
// g.ctrl = abi.MapCtrlEmpty
// m.dirPtr = g
// }
nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(base.Pos, abi.SwissMapGroupSlots)), nil, nil)
nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(base.Pos, abi.MapGroupSlots)), nil, nil)
nif.Likely = true
groupType := reflectdata.SwissMapGroupType(t)
groupType := reflectdata.MapGroupType(t)
// var gv group
// g = &gv
@ -355,9 +355,9 @@ func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
// Can't use ir.NewInt because bit 63 is set, which
// makes conversion to uint64 upset.
empty := ir.NewBasicLit(base.Pos, types.UntypedInt, constant.MakeUint64(abi.SwissMapCtrlEmpty))
empty := ir.NewBasicLit(base.Pos, types.UntypedInt, constant.MakeUint64(abi.MapCtrlEmpty))
// g.ctrl = abi.SwissMapCtrlEmpty
// g.ctrl = abi.MapCtrlEmpty
csym := groupType.Field(0).Sym // g.ctrl see reflectdata/map.go
ca := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, g, csym), empty)
nif.Body.Append(ca)
@ -370,12 +370,12 @@ func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
}
}
if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.SwissMapGroupSlots)) {
if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.MapGroupSlots)) {
// Handling make(map[any]any) and
// make(map[any]any, hint) where hint <= abi.SwissMapGroupSlots
// make(map[any]any, hint) where hint <= abi.MapGroupSlots
// specially allows for faster map initialization and
// improves binary size by using calls with fewer arguments.
// For hint <= abi.SwissMapGroupSlots no groups will be
// For hint <= abi.MapGroupSlots no groups will be
// allocated by makemap. Therefore, no groups need to be
// allocated in this code path.
if n.Esc() == ir.EscNone {

View file

@ -966,8 +966,8 @@ func (o *orderState) stmt(n ir.Node) {
n.X = o.copyExpr(r)
// n.Prealloc is the temp for the iterator.
// SwissMapIterType contains pointers and needs to be zeroed.
n.Prealloc = o.newTemp(reflectdata.SwissMapIterType(), true)
// MapIterType contains pointers and needs to be zeroed.
n.Prealloc = o.newTemp(reflectdata.MapIterType(), true)
}
n.Key = o.exprInPlace(n.Key)
n.Value = o.exprInPlace(n.Value)

View file

@ -246,7 +246,7 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
hit := nrange.Prealloc
th := hit.Type()
// depends on layout of iterator struct.
// See cmd/compile/internal/reflectdata/map.go:SwissMapIterType
// See cmd/compile/internal/reflectdata/map.go:MapIterType
keysym := th.Field(0).Sym
elemsym := th.Field(1).Sym // ditto
iterInit := "mapIterStart"

View file

@ -191,7 +191,7 @@ var mapassign = mkmapnames("mapassign", "ptr")
var mapdelete = mkmapnames("mapdelete", "")
func mapfast(t *types.Type) int {
if t.Elem().Size() > abi.SwissMapMaxElemBytes {
if t.Elem().Size() > abi.MapMaxElemBytes {
return mapslow
}
switch reflectdata.AlgType(t.Key()) {

View file

@ -560,7 +560,7 @@ func (d *deadcodePass) decodetypeMethods(ldr *loader.Loader, arch *sys.Arch, sym
case abi.Chan: // reflect.chanType
off += 2 * arch.PtrSize
case abi.Map:
off += 7*arch.PtrSize + 4 // internal/abi.SwissMapType
off += 7*arch.PtrSize + 4 // internal/abi.MapType
if arch.PtrSize == 8 {
off += 4 // padding for final uint32 field (Flags).
}

View file

@ -158,7 +158,7 @@ func decodetypeMapValue(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym) l
return decodeRelocSym(ldr, symIdx, &relocs, int32(commonsize(arch))+int32(arch.PtrSize)) // 0x20 / 0x38
}
func decodetypeMapSwissGroup(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym) loader.Sym {
func decodetypeMapGroup(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym) loader.Sym {
relocs := ldr.Relocs(symIdx)
return decodeRelocSym(ldr, symIdx, &relocs, int32(commonsize(arch))+2*int32(arch.PtrSize)) // 0x24 / 0x40
}

View file

@ -884,7 +884,7 @@ func (d *dwctxt) synthesizemaptypes(ctxt *Link, die *dwarf.DWDie) {
keyType := decodetypeMapKey(d.ldr, d.arch, gotype)
valType := decodetypeMapValue(d.ldr, d.arch, gotype)
groupType := decodetypeMapSwissGroup(d.ldr, d.arch, gotype)
groupType := decodetypeMapGroup(d.ldr, d.arch, gotype)
keyType = d.walksymtypedef(d.defgotype(keyType))
valType = d.walksymtypedef(d.defgotype(valType))
@ -1922,10 +1922,10 @@ func dwarfGenerateDebugInfo(ctxt *Link) {
"type:internal/abi.ArrayType",
"type:internal/abi.ChanType",
"type:internal/abi.FuncType",
"type:internal/abi.MapType",
"type:internal/abi.PtrType",
"type:internal/abi.SliceType",
"type:internal/abi.StructType",
"type:internal/abi.SwissMapType",
"type:internal/abi.InterfaceType",
"type:internal/abi.ITab",
"type:internal/abi.Imethod"} {

View file

@ -60,10 +60,10 @@ func TestRuntimeTypesPresent(t *testing.T) {
"internal/abi.ArrayType": true,
"internal/abi.ChanType": true,
"internal/abi.FuncType": true,
"internal/abi.MapType": true,
"internal/abi.PtrType": true,
"internal/abi.SliceType": true,
"internal/abi.StructType": true,
"internal/abi.SwissMapType": true,
"internal/abi.InterfaceType": true,
"internal/abi.ITab": true,
}

View file

@ -50,7 +50,7 @@ func comparableHash[T comparable](v T, seed Seed) uint64 {
s := seed.s
var m map[T]struct{}
mTyp := abi.TypeOf(m)
hasher := (*abi.SwissMapType)(unsafe.Pointer(mTyp)).Hasher
hasher := (*abi.MapType)(unsafe.Pointer(mTyp)).Hasher
if goarch.PtrSize == 8 {
return uint64(hasher(abi.NoEscape(unsafe.Pointer(&v)), uintptr(s)))
}

View file

@ -12,24 +12,24 @@ import (
// runtime/runtime-gdb.py:MapTypePrinter contains its own copy
const (
// Number of bits in the group.slot count.
SwissMapGroupSlotsBits = 3
MapGroupSlotsBits = 3
// Number of slots in a group.
SwissMapGroupSlots = 1 << SwissMapGroupSlotsBits // 8
MapGroupSlots = 1 << MapGroupSlotsBits // 8
// Maximum key or elem size to keep inline (instead of mallocing per element).
// Must fit in a uint8.
SwissMapMaxKeyBytes = 128
SwissMapMaxElemBytes = 128
MapMaxKeyBytes = 128
MapMaxElemBytes = 128
ctrlEmpty = 0b10000000
bitsetLSB = 0x0101010101010101
// Value of control word with all empty slots.
SwissMapCtrlEmpty = bitsetLSB * uint64(ctrlEmpty)
MapCtrlEmpty = bitsetLSB * uint64(ctrlEmpty)
)
type SwissMapType struct {
type MapType struct {
Type
Key *Type
Elem *Type
@ -44,21 +44,21 @@ type SwissMapType struct {
// Flag values
const (
SwissMapNeedKeyUpdate = 1 << iota
SwissMapHashMightPanic
SwissMapIndirectKey
SwissMapIndirectElem
MapNeedKeyUpdate = 1 << iota
MapHashMightPanic
MapIndirectKey
MapIndirectElem
)
func (mt *SwissMapType) NeedKeyUpdate() bool { // true if we need to update key on an overwrite
return mt.Flags&SwissMapNeedKeyUpdate != 0
func (mt *MapType) NeedKeyUpdate() bool { // true if we need to update key on an overwrite
return mt.Flags&MapNeedKeyUpdate != 0
}
func (mt *SwissMapType) HashMightPanic() bool { // true if hash function might panic
return mt.Flags&SwissMapHashMightPanic != 0
func (mt *MapType) HashMightPanic() bool { // true if hash function might panic
return mt.Flags&MapHashMightPanic != 0
}
func (mt *SwissMapType) IndirectKey() bool { // store ptr to key instead of key itself
return mt.Flags&SwissMapIndirectKey != 0
func (mt *MapType) IndirectKey() bool { // store ptr to key instead of key itself
return mt.Flags&MapIndirectKey != 0
}
func (mt *SwissMapType) IndirectElem() bool { // store ptr to elem instead of elem itself
return mt.Flags&SwissMapIndirectElem != 0
func (mt *MapType) IndirectElem() bool { // store ptr to elem instead of elem itself
return mt.Flags&MapIndirectElem != 0
}

View file

@ -355,7 +355,7 @@ func (t *Type) Uncommon() *UncommonType {
return &(*u)(unsafe.Pointer(t)).u
case Map:
type u struct {
SwissMapType
MapType
u UncommonType
}
return &(*u)(unsafe.Pointer(t)).u
@ -384,7 +384,7 @@ func (t *Type) Elem() *Type {
tt := (*ChanType)(unsafe.Pointer(t))
return tt.Elem
case Map:
tt := (*SwissMapType)(unsafe.Pointer(t))
tt := (*MapType)(unsafe.Pointer(t))
return tt.Elem
case Pointer:
tt := (*PtrType)(unsafe.Pointer(t))
@ -404,12 +404,12 @@ func (t *Type) StructType() *StructType {
return (*StructType)(unsafe.Pointer(t))
}
// MapType returns t cast to a *SwissMapType, or nil if its tag does not match.
func (t *Type) MapType() *SwissMapType {
// MapType returns t cast to a *MapType, or nil if its tag does not match.
func (t *Type) MapType() *MapType {
if t.Kind() != Map {
return nil
}
return (*SwissMapType)(unsafe.Pointer(t))
return (*MapType)(unsafe.Pointer(t))
}
// ArrayType returns t cast to a *ArrayType, or nil if its tag does not match.
@ -471,7 +471,7 @@ func (t *InterfaceType) NumMethod() int { return len(t.Methods) }
func (t *Type) Key() *Type {
if t.Kind() == Map {
return (*SwissMapType)(unsafe.Pointer(t)).Key
return (*MapType)(unsafe.Pointer(t)).Key
}
return nil
}

View file

@ -22,14 +22,14 @@ const MaxAvgGroupLoad = maxAvgGroupLoad
// we can't properly test hint alloc overflows with this.
const maxAllocTest = 1 << 30
func newTestMapType[K comparable, V any]() *abi.SwissMapType {
func newTestMapType[K comparable, V any]() *abi.MapType {
var m map[K]V
mTyp := abi.TypeOf(m)
mt := (*abi.SwissMapType)(unsafe.Pointer(mTyp))
mt := (*abi.MapType)(unsafe.Pointer(mTyp))
return mt
}
func NewTestMap[K comparable, V any](hint uintptr) (*Map, *abi.SwissMapType) {
func NewTestMap[K comparable, V any](hint uintptr) (*Map, *abi.MapType) {
mt := newTestMapType[K, V]()
return NewMap(mt, hint, nil, maxAllocTest), mt
}
@ -68,7 +68,7 @@ func (m *Map) GroupCount() uint64 {
// Returns nil if there are no full groups.
// Returns nil if a group is full but contains entirely deleted slots.
// Returns nil if the map is small.
func (m *Map) KeyFromFullGroup(typ *abi.SwissMapType) unsafe.Pointer {
func (m *Map) KeyFromFullGroup(typ *abi.MapType) unsafe.Pointer {
if m.dirLen <= 0 {
return nil
}
@ -89,7 +89,7 @@ func (m *Map) KeyFromFullGroup(typ *abi.SwissMapType) unsafe.Pointer {
}
// All full or deleted slots.
for j := uintptr(0); j < abi.SwissMapGroupSlots; j++ {
for j := uintptr(0); j < abi.MapGroupSlots; j++ {
if g.ctrls().get(j) == ctrlDeleted {
continue
}
@ -106,7 +106,7 @@ func (m *Map) KeyFromFullGroup(typ *abi.SwissMapType) unsafe.Pointer {
}
// Returns nil if the map is small.
func (m *Map) TableFor(typ *abi.SwissMapType, key unsafe.Pointer) *table {
func (m *Map) TableFor(typ *abi.MapType, key unsafe.Pointer) *table {
if m.dirLen <= 0 {
return nil
}

View file

@ -122,7 +122,7 @@ func (b bitset) count() int {
// TODO(prattmic): Consider inverting the top bit so that the zero value is empty.
type ctrl uint8
// ctrlGroup is a fixed size array of abi.SwissMapGroupSlots control bytes
// ctrlGroup is a fixed size array of abi.MapGroupSlots control bytes
// stored in a uint64.
type ctrlGroup uint64
@ -233,7 +233,7 @@ func ctrlGroupMatchFull(g ctrlGroup) bitset {
// groupReference is a wrapper type representing a single slot group stored at
// data.
//
// A group holds abi.SwissMapGroupSlots slots (key/elem pairs) plus their
// A group holds abi.MapGroupSlots slots (key/elem pairs) plus their
// control word.
type groupReference struct {
// data points to the group, which is described by typ.Group and has
@ -241,7 +241,7 @@ type groupReference struct {
//
// type group struct {
// ctrls ctrlGroup
// slots [abi.SwissMapGroupSlots]slot
// slots [abi.MapGroupSlots]slot
// }
//
// type slot struct {
@ -281,14 +281,14 @@ func (g *groupReference) ctrls() *ctrlGroup {
}
// key returns a pointer to the key at index i.
func (g *groupReference) key(typ *abi.SwissMapType, i uintptr) unsafe.Pointer {
func (g *groupReference) key(typ *abi.MapType, i uintptr) unsafe.Pointer {
offset := groupSlotsOffset + i*typ.SlotSize
return unsafe.Pointer(uintptr(g.data) + offset)
}
// elem returns a pointer to the element at index i.
func (g *groupReference) elem(typ *abi.SwissMapType, i uintptr) unsafe.Pointer {
func (g *groupReference) elem(typ *abi.MapType, i uintptr) unsafe.Pointer {
offset := groupSlotsOffset + i*typ.SlotSize + typ.ElemOff
return unsafe.Pointer(uintptr(g.data) + offset)
@ -310,7 +310,7 @@ type groupsReference struct {
// newGroups allocates a new array of length groups.
//
// Length must be a power of two.
func newGroups(typ *abi.SwissMapType, length uint64) groupsReference {
func newGroups(typ *abi.MapType, length uint64) groupsReference {
return groupsReference{
// TODO: make the length type the same throughout.
data: newarray(typ.Group, int(length)),
@ -319,7 +319,7 @@ func newGroups(typ *abi.SwissMapType, length uint64) groupsReference {
}
// group returns the group at index i.
func (g *groupsReference) group(typ *abi.SwissMapType, i uint64) groupReference {
func (g *groupsReference) group(typ *abi.MapType, i uint64) groupReference {
// TODO(prattmic): Do something here about truncation on cast to
// uintptr on 32-bit systems?
offset := uintptr(i) * typ.GroupSize
@ -329,11 +329,11 @@ func (g *groupsReference) group(typ *abi.SwissMapType, i uint64) groupReference
}
}
func cloneGroup(typ *abi.SwissMapType, newGroup, oldGroup groupReference) {
func cloneGroup(typ *abi.MapType, newGroup, oldGroup groupReference) {
typedmemmove(typ.Group, newGroup.data, oldGroup.data)
if typ.IndirectKey() {
// Deep copy keys if indirect.
for i := uintptr(0); i < abi.SwissMapGroupSlots; i++ {
for i := uintptr(0); i < abi.MapGroupSlots; i++ {
oldKey := *(*unsafe.Pointer)(oldGroup.key(typ, i))
if oldKey == nil {
continue
@ -345,7 +345,7 @@ func cloneGroup(typ *abi.SwissMapType, newGroup, oldGroup groupReference) {
}
if typ.IndirectElem() {
// Deep copy elems if indirect.
for i := uintptr(0); i < abi.SwissMapGroupSlots; i++ {
for i := uintptr(0); i < abi.MapGroupSlots; i++ {
oldElem := *(*unsafe.Pointer)(oldGroup.elem(typ, i))
if oldElem == nil {
continue

View file

@ -21,7 +21,7 @@ import (
//
// Terminology:
// - Slot: A storage location of a single key/element pair.
// - Group: A group of abi.SwissMapGroupSlots (8) slots, plus a control word.
// - Group: A group of abi.MapGroupSlots (8) slots, plus a control word.
// - Control word: An 8-byte word which denotes whether each slot is empty,
// deleted, or used. If a slot is used, its control byte also contains the
// lower 7 bits of the hash (H2).
@ -191,7 +191,7 @@ func h2(h uintptr) uintptr {
return h & 0x7f
}
// Note: changes here must be reflected in cmd/compile/internal/reflectdata/map.go:SwissMapType.
// Note: changes here must be reflected in cmd/compile/internal/reflectdata/map.go:MapType.
type Map struct {
// The number of filled slots (i.e. the number of elements in all
// tables). Excludes deleted slots.
@ -212,7 +212,7 @@ type Map struct {
// details.
//
// Small map optimization: if the map always contained
// abi.SwissMapGroupSlots or fewer entries, it fits entirely in a
// abi.MapGroupSlots or fewer entries, it fits entirely in a
// single group. In that case dirPtr points directly to a single group.
//
// dirPtr *group
@ -257,14 +257,14 @@ func depthToShift(depth uint8) uint8 {
// maxAlloc should be runtime.maxAlloc.
//
// TODO(prattmic): Put maxAlloc somewhere accessible.
func NewMap(mt *abi.SwissMapType, hint uintptr, m *Map, maxAlloc uintptr) *Map {
func NewMap(mt *abi.MapType, hint uintptr, m *Map, maxAlloc uintptr) *Map {
if m == nil {
m = new(Map)
}
m.seed = uintptr(rand())
if hint <= abi.SwissMapGroupSlots {
if hint <= abi.MapGroupSlots {
// A small map can fill all 8 slots, so no need to increase
// target capacity.
//
@ -286,7 +286,7 @@ func NewMap(mt *abi.SwissMapType, hint uintptr, m *Map, maxAlloc uintptr) *Map {
// Set initial capacity to hold hint entries without growing in the
// average case.
targetCapacity := (hint * abi.SwissMapGroupSlots) / maxAvgGroupLoad
targetCapacity := (hint * abi.MapGroupSlots) / maxAvgGroupLoad
if targetCapacity < hint { // overflow
return m // return an empty map.
}
@ -396,11 +396,11 @@ func (m *Map) Used() uint64 {
// Get performs a lookup of the key that key points to. It returns a pointer to
// the element, or false if the key doesn't exist.
func (m *Map) Get(typ *abi.SwissMapType, key unsafe.Pointer) (unsafe.Pointer, bool) {
func (m *Map) Get(typ *abi.MapType, key unsafe.Pointer) (unsafe.Pointer, bool) {
return m.getWithoutKey(typ, key)
}
func (m *Map) getWithKey(typ *abi.SwissMapType, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) {
func (m *Map) getWithKey(typ *abi.MapType, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) {
if m.Used() == 0 {
return nil, nil, false
}
@ -419,7 +419,7 @@ func (m *Map) getWithKey(typ *abi.SwissMapType, key unsafe.Pointer) (unsafe.Poin
return m.directoryAt(idx).getWithKey(typ, hash, key)
}
func (m *Map) getWithoutKey(typ *abi.SwissMapType, key unsafe.Pointer) (unsafe.Pointer, bool) {
func (m *Map) getWithoutKey(typ *abi.MapType, key unsafe.Pointer) (unsafe.Pointer, bool) {
if m.Used() == 0 {
return nil, false
}
@ -439,7 +439,7 @@ func (m *Map) getWithoutKey(typ *abi.SwissMapType, key unsafe.Pointer) (unsafe.P
return m.directoryAt(idx).getWithoutKey(typ, hash, key)
}
func (m *Map) getWithKeySmall(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) {
func (m *Map) getWithKeySmall(typ *abi.MapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) {
g := groupReference{
data: m.dirPtr,
}
@ -470,7 +470,7 @@ func (m *Map) getWithKeySmall(typ *abi.SwissMapType, hash uintptr, key unsafe.Po
return nil, nil, false
}
func (m *Map) Put(typ *abi.SwissMapType, key, elem unsafe.Pointer) {
func (m *Map) Put(typ *abi.MapType, key, elem unsafe.Pointer) {
slotElem := m.PutSlot(typ, key)
typedmemmove(typ.Elem, slotElem, elem)
}
@ -479,7 +479,7 @@ func (m *Map) Put(typ *abi.SwissMapType, key, elem unsafe.Pointer) {
// should be written.
//
// PutSlot never returns nil.
func (m *Map) PutSlot(typ *abi.SwissMapType, key unsafe.Pointer) unsafe.Pointer {
func (m *Map) PutSlot(typ *abi.MapType, key unsafe.Pointer) unsafe.Pointer {
if m.writing != 0 {
fatal("concurrent map writes")
}
@ -495,7 +495,7 @@ func (m *Map) PutSlot(typ *abi.SwissMapType, key unsafe.Pointer) unsafe.Pointer
}
if m.dirLen == 0 {
if m.used < abi.SwissMapGroupSlots {
if m.used < abi.MapGroupSlots {
elem := m.putSlotSmall(typ, hash, key)
if m.writing == 0 {
@ -529,7 +529,7 @@ func (m *Map) PutSlot(typ *abi.SwissMapType, key unsafe.Pointer) unsafe.Pointer
}
}
func (m *Map) putSlotSmall(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointer) unsafe.Pointer {
func (m *Map) putSlotSmall(typ *abi.MapType, hash uintptr, key unsafe.Pointer) unsafe.Pointer {
g := groupReference{
data: m.dirPtr,
}
@ -591,7 +591,7 @@ func (m *Map) putSlotSmall(typ *abi.SwissMapType, hash uintptr, key unsafe.Point
return slotElem
}
func (m *Map) growToSmall(typ *abi.SwissMapType) {
func (m *Map) growToSmall(typ *abi.MapType) {
grp := newGroups(typ, 1)
m.dirPtr = grp.data
@ -601,14 +601,14 @@ func (m *Map) growToSmall(typ *abi.SwissMapType) {
g.ctrls().setEmpty()
}
func (m *Map) growToTable(typ *abi.SwissMapType) {
tab := newTable(typ, 2*abi.SwissMapGroupSlots, 0, 0)
func (m *Map) growToTable(typ *abi.MapType) {
tab := newTable(typ, 2*abi.MapGroupSlots, 0, 0)
g := groupReference{
data: m.dirPtr,
}
for i := uintptr(0); i < abi.SwissMapGroupSlots; i++ {
for i := uintptr(0); i < abi.MapGroupSlots; i++ {
if (g.ctrls().get(i) & ctrlEmpty) == ctrlEmpty {
// Empty
continue
@ -640,7 +640,7 @@ func (m *Map) growToTable(typ *abi.SwissMapType) {
m.globalShift = depthToShift(m.globalDepth)
}
func (m *Map) Delete(typ *abi.SwissMapType, key unsafe.Pointer) {
func (m *Map) Delete(typ *abi.MapType, key unsafe.Pointer) {
if m == nil || m.Used() == 0 {
if err := mapKeyError(typ, key); err != nil {
panic(err) // see issue 23734
@ -680,7 +680,7 @@ func (m *Map) Delete(typ *abi.SwissMapType, key unsafe.Pointer) {
m.writing ^= 1
}
func (m *Map) deleteSmall(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointer) {
func (m *Map) deleteSmall(typ *abi.MapType, hash uintptr, key unsafe.Pointer) {
g := groupReference{
data: m.dirPtr,
}
@ -728,7 +728,7 @@ func (m *Map) deleteSmall(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointe
}
// Clear deletes all entries from the map resulting in an empty map.
func (m *Map) Clear(typ *abi.SwissMapType) {
func (m *Map) Clear(typ *abi.MapType) {
if m == nil || m.Used() == 0 && !m.tombstonePossible {
return
}
@ -766,7 +766,7 @@ func (m *Map) Clear(typ *abi.SwissMapType) {
m.writing ^= 1
}
func (m *Map) clearSmall(typ *abi.SwissMapType) {
func (m *Map) clearSmall(typ *abi.MapType) {
g := groupReference{
data: m.dirPtr,
}
@ -777,7 +777,7 @@ func (m *Map) clearSmall(typ *abi.SwissMapType) {
m.used = 0
}
func (m *Map) Clone(typ *abi.SwissMapType) *Map {
func (m *Map) Clone(typ *abi.MapType) *Map {
// Note: this should never be called with a nil map.
if m.writing != 0 {
fatal("concurrent map clone and map write")
@ -814,7 +814,7 @@ func (m *Map) Clone(typ *abi.SwissMapType) *Map {
return m
}
func mapKeyError(t *abi.SwissMapType, p unsafe.Pointer) error {
func mapKeyError(t *abi.MapType, p unsafe.Pointer) error {
if !t.HashMightPanic() {
return nil
}

View file

@ -15,8 +15,8 @@ import (
func TestCtrlSize(t *testing.T) {
cs := unsafe.Sizeof(maps.CtrlGroup(0))
if cs != abi.SwissMapGroupSlots {
t.Errorf("ctrlGroup size got %d want abi.SwissMapGroupSlots %d", cs, abi.SwissMapGroupSlots)
if cs != abi.MapGroupSlots {
t.Errorf("ctrlGroup size got %d want abi.MapGroupSlots %d", cs, abi.MapGroupSlots)
}
}
@ -630,7 +630,7 @@ func TestMapZeroSizeSlot(t *testing.T) {
}
func TestMapIndirect(t *testing.T) {
type big [abi.SwissMapMaxKeyBytes + abi.SwissMapMaxElemBytes]byte
type big [abi.MapMaxKeyBytes + abi.MapMaxElemBytes]byte
m, typ := maps.NewTestMap[big, big](8)
@ -711,8 +711,8 @@ func escape[T any](x T) T {
}
const (
belowMax = abi.SwissMapGroupSlots * 3 / 2 // 1.5 * group max = 2 groups @ 75%
atMax = (2 * abi.SwissMapGroupSlots * maps.MaxAvgGroupLoad) / abi.SwissMapGroupSlots // 2 groups at 7/8 full.
belowMax = abi.MapGroupSlots * 3 / 2 // 1.5 * group max = 2 groups @ 75%
atMax = (2 * abi.MapGroupSlots * maps.MaxAvgGroupLoad) / abi.MapGroupSlots // 2 groups at 7/8 full.
)
func TestTableGroupCount(t *testing.T) {
@ -767,7 +767,7 @@ func TestTableGroupCount(t *testing.T) {
},
},
{
n: abi.SwissMapGroupSlots,
n: abi.MapGroupSlots,
escape: mapCase{
initialLit: mapCount{0, 0},
initialHint: mapCount{0, 0},
@ -775,7 +775,7 @@ func TestTableGroupCount(t *testing.T) {
},
},
{
n: abi.SwissMapGroupSlots + 1,
n: abi.MapGroupSlots + 1,
escape: mapCase{
initialLit: mapCount{0, 0},
initialHint: mapCount{1, 2},

View file

@ -53,7 +53,7 @@ var zeroVal [abi.ZeroValSize]byte
// hold onto it for very long.
//
//go:linkname runtime_mapaccess1 runtime.mapaccess1
func runtime_mapaccess1(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
func runtime_mapaccess1(typ *abi.MapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
if race.Enabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapaccess1)
@ -127,7 +127,7 @@ func runtime_mapaccess1(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsaf
}
//go:linkname runtime_mapaccess2 runtime.mapaccess2
func runtime_mapaccess2(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) (unsafe.Pointer, bool) {
func runtime_mapaccess2(typ *abi.MapType, m *Map, key unsafe.Pointer) (unsafe.Pointer, bool) {
if race.Enabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapaccess1)
@ -201,7 +201,7 @@ func runtime_mapaccess2(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) (unsa
}
//go:linkname runtime_mapassign runtime.mapassign
func runtime_mapassign(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
func runtime_mapassign(typ *abi.MapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
if m == nil {
panic(errNilAssign)
}
@ -232,7 +232,7 @@ func runtime_mapassign(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe
}
if m.dirLen == 0 {
if m.used < abi.SwissMapGroupSlots {
if m.used < abi.MapGroupSlots {
elem := m.putSlotSmall(typ, hash, key)
if m.writing == 0 {

View file

@ -12,7 +12,7 @@ import (
)
//go:linkname runtime_mapaccess1_fast32 runtime.mapaccess1_fast32
func runtime_mapaccess1_fast32(typ *abi.SwissMapType, m *Map, key uint32) unsafe.Pointer {
func runtime_mapaccess1_fast32(typ *abi.MapType, m *Map, key uint32) unsafe.Pointer {
if race.Enabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapaccess1_fast32)
@ -81,7 +81,7 @@ func runtime_mapaccess1_fast32(typ *abi.SwissMapType, m *Map, key uint32) unsafe
}
//go:linkname runtime_mapaccess2_fast32 runtime.mapaccess2_fast32
func runtime_mapaccess2_fast32(typ *abi.SwissMapType, m *Map, key uint32) (unsafe.Pointer, bool) {
func runtime_mapaccess2_fast32(typ *abi.MapType, m *Map, key uint32) (unsafe.Pointer, bool) {
if race.Enabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapaccess2_fast32)
@ -149,7 +149,7 @@ func runtime_mapaccess2_fast32(typ *abi.SwissMapType, m *Map, key uint32) (unsaf
}
}
func (m *Map) putSlotSmallFast32(typ *abi.SwissMapType, hash uintptr, key uint32) unsafe.Pointer {
func (m *Map) putSlotSmallFast32(typ *abi.MapType, hash uintptr, key uint32) unsafe.Pointer {
g := groupReference{
data: m.dirPtr,
}
@ -190,7 +190,7 @@ func (m *Map) putSlotSmallFast32(typ *abi.SwissMapType, hash uintptr, key uint32
}
//go:linkname runtime_mapassign_fast32 runtime.mapassign_fast32
func runtime_mapassign_fast32(typ *abi.SwissMapType, m *Map, key uint32) unsafe.Pointer {
func runtime_mapassign_fast32(typ *abi.MapType, m *Map, key uint32) unsafe.Pointer {
if m == nil {
panic(errNilAssign)
}
@ -215,7 +215,7 @@ func runtime_mapassign_fast32(typ *abi.SwissMapType, m *Map, key uint32) unsafe.
}
if m.dirLen == 0 {
if m.used < abi.SwissMapGroupSlots {
if m.used < abi.MapGroupSlots {
elem := m.putSlotSmallFast32(typ, hash, key)
if m.writing == 0 {
@ -329,7 +329,7 @@ outer:
// TODO(prattmic): With some compiler refactoring we could avoid duplication of this function.
//
//go:linkname runtime_mapassign_fast32ptr runtime.mapassign_fast32ptr
func runtime_mapassign_fast32ptr(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
func runtime_mapassign_fast32ptr(typ *abi.MapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
if m == nil {
panic(errNilAssign)
}
@ -354,7 +354,7 @@ func runtime_mapassign_fast32ptr(typ *abi.SwissMapType, m *Map, key unsafe.Point
}
if m.dirLen == 0 {
if m.used < abi.SwissMapGroupSlots {
if m.used < abi.MapGroupSlots {
elem := m.putSlotSmallFastPtr(typ, hash, key)
if m.writing == 0 {
@ -458,7 +458,7 @@ outer:
}
//go:linkname runtime_mapdelete_fast32 runtime.mapdelete_fast32
func runtime_mapdelete_fast32(typ *abi.SwissMapType, m *Map, key uint32) {
func runtime_mapdelete_fast32(typ *abi.MapType, m *Map, key uint32) {
if race.Enabled {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapdelete_fast32)

View file

@ -12,7 +12,7 @@ import (
)
//go:linkname runtime_mapaccess1_fast64 runtime.mapaccess1_fast64
func runtime_mapaccess1_fast64(typ *abi.SwissMapType, m *Map, key uint64) unsafe.Pointer {
func runtime_mapaccess1_fast64(typ *abi.MapType, m *Map, key uint64) unsafe.Pointer {
if race.Enabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapaccess1_fast64)
@ -81,7 +81,7 @@ func runtime_mapaccess1_fast64(typ *abi.SwissMapType, m *Map, key uint64) unsafe
}
//go:linkname runtime_mapaccess2_fast64 runtime.mapaccess2_fast64
func runtime_mapaccess2_fast64(typ *abi.SwissMapType, m *Map, key uint64) (unsafe.Pointer, bool) {
func runtime_mapaccess2_fast64(typ *abi.MapType, m *Map, key uint64) (unsafe.Pointer, bool) {
if race.Enabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapaccess2_fast64)
@ -149,7 +149,7 @@ func runtime_mapaccess2_fast64(typ *abi.SwissMapType, m *Map, key uint64) (unsaf
}
}
func (m *Map) putSlotSmallFast64(typ *abi.SwissMapType, hash uintptr, key uint64) unsafe.Pointer {
func (m *Map) putSlotSmallFast64(typ *abi.MapType, hash uintptr, key uint64) unsafe.Pointer {
g := groupReference{
data: m.dirPtr,
}
@ -190,7 +190,7 @@ func (m *Map) putSlotSmallFast64(typ *abi.SwissMapType, hash uintptr, key uint64
}
//go:linkname runtime_mapassign_fast64 runtime.mapassign_fast64
func runtime_mapassign_fast64(typ *abi.SwissMapType, m *Map, key uint64) unsafe.Pointer {
func runtime_mapassign_fast64(typ *abi.MapType, m *Map, key uint64) unsafe.Pointer {
if m == nil {
panic(errNilAssign)
}
@ -215,7 +215,7 @@ func runtime_mapassign_fast64(typ *abi.SwissMapType, m *Map, key uint64) unsafe.
}
if m.dirLen == 0 {
if m.used < abi.SwissMapGroupSlots {
if m.used < abi.MapGroupSlots {
elem := m.putSlotSmallFast64(typ, hash, key)
if m.writing == 0 {
@ -324,7 +324,7 @@ outer:
return slotElem
}
func (m *Map) putSlotSmallFastPtr(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointer) unsafe.Pointer {
func (m *Map) putSlotSmallFastPtr(typ *abi.MapType, hash uintptr, key unsafe.Pointer) unsafe.Pointer {
g := groupReference{
data: m.dirPtr,
}
@ -367,7 +367,7 @@ func (m *Map) putSlotSmallFastPtr(typ *abi.SwissMapType, hash uintptr, key unsaf
// Key is a 64-bit pointer (only called on 64-bit GOARCH).
//
//go:linkname runtime_mapassign_fast64ptr runtime.mapassign_fast64ptr
func runtime_mapassign_fast64ptr(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
func runtime_mapassign_fast64ptr(typ *abi.MapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
if m == nil {
panic(errNilAssign)
}
@ -392,7 +392,7 @@ func runtime_mapassign_fast64ptr(typ *abi.SwissMapType, m *Map, key unsafe.Point
}
if m.dirLen == 0 {
if m.used < abi.SwissMapGroupSlots {
if m.used < abi.MapGroupSlots {
elem := m.putSlotSmallFastPtr(typ, hash, key)
if m.writing == 0 {
@ -497,7 +497,7 @@ outer:
}
//go:linkname runtime_mapdelete_fast64 runtime.mapdelete_fast64
func runtime_mapdelete_fast64(typ *abi.SwissMapType, m *Map, key uint64) {
func runtime_mapdelete_fast64(typ *abi.MapType, m *Map, key uint64) {
if race.Enabled {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapdelete_fast64)

View file

@ -12,7 +12,7 @@ import (
"unsafe"
)
func (m *Map) getWithoutKeySmallFastStr(typ *abi.SwissMapType, key string) unsafe.Pointer {
func (m *Map) getWithoutKeySmallFastStr(typ *abi.MapType, key string) unsafe.Pointer {
g := groupReference{
data: m.dirPtr,
}
@ -27,10 +27,10 @@ func (m *Map) getWithoutKeySmallFastStr(typ *abi.SwissMapType, key string) unsaf
// for strings that are long enough that hashing is expensive.
if len(key) > 64 {
// String hashing and equality might be expensive. Do a quick check first.
j := abi.SwissMapGroupSlots
for i := range abi.SwissMapGroupSlots {
j := abi.MapGroupSlots
for i := range abi.MapGroupSlots {
if ctrls&(1<<7) == 0 && longStringQuickEqualityTest(key, *(*string)(slotKey)) {
if j < abi.SwissMapGroupSlots {
if j < abi.MapGroupSlots {
// 2 strings both passed the quick equality test.
// Break out of this loop and do it the slow way.
goto dohash
@ -40,7 +40,7 @@ func (m *Map) getWithoutKeySmallFastStr(typ *abi.SwissMapType, key string) unsaf
slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize)
ctrls >>= 8
}
if j == abi.SwissMapGroupSlots {
if j == abi.MapGroupSlots {
// No slot passed the quick test.
return nil
}
@ -59,7 +59,7 @@ dohash:
ctrls = *g.ctrls()
slotKey = g.key(typ, 0)
for range abi.SwissMapGroupSlots {
for range abi.MapGroupSlots {
if uint8(ctrls) == h2 && key == *(*string)(slotKey) {
return unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize)
}
@ -98,7 +98,7 @@ func stringPtr(s string) unsafe.Pointer {
}
//go:linkname runtime_mapaccess1_faststr runtime.mapaccess1_faststr
func runtime_mapaccess1_faststr(typ *abi.SwissMapType, m *Map, key string) unsafe.Pointer {
func runtime_mapaccess1_faststr(typ *abi.MapType, m *Map, key string) unsafe.Pointer {
if race.Enabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapaccess1_faststr)
@ -157,7 +157,7 @@ func runtime_mapaccess1_faststr(typ *abi.SwissMapType, m *Map, key string) unsaf
}
//go:linkname runtime_mapaccess2_faststr runtime.mapaccess2_faststr
func runtime_mapaccess2_faststr(typ *abi.SwissMapType, m *Map, key string) (unsafe.Pointer, bool) {
func runtime_mapaccess2_faststr(typ *abi.MapType, m *Map, key string) (unsafe.Pointer, bool) {
if race.Enabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapaccess2_faststr)
@ -215,7 +215,7 @@ func runtime_mapaccess2_faststr(typ *abi.SwissMapType, m *Map, key string) (unsa
}
}
func (m *Map) putSlotSmallFastStr(typ *abi.SwissMapType, hash uintptr, key string) unsafe.Pointer {
func (m *Map) putSlotSmallFastStr(typ *abi.MapType, hash uintptr, key string) unsafe.Pointer {
g := groupReference{
data: m.dirPtr,
}
@ -258,7 +258,7 @@ func (m *Map) putSlotSmallFastStr(typ *abi.SwissMapType, hash uintptr, key strin
}
//go:linkname runtime_mapassign_faststr runtime.mapassign_faststr
func runtime_mapassign_faststr(typ *abi.SwissMapType, m *Map, key string) unsafe.Pointer {
func runtime_mapassign_faststr(typ *abi.MapType, m *Map, key string) unsafe.Pointer {
if m == nil {
panic(errNilAssign)
}
@ -283,7 +283,7 @@ func runtime_mapassign_faststr(typ *abi.SwissMapType, m *Map, key string) unsafe
}
if m.dirLen == 0 {
if m.used < abi.SwissMapGroupSlots {
if m.used < abi.MapGroupSlots {
elem := m.putSlotSmallFastStr(typ, hash, key)
if m.writing == 0 {
@ -396,7 +396,7 @@ outer:
}
//go:linkname runtime_mapdelete_faststr runtime.mapdelete_faststr
func runtime_mapdelete_faststr(typ *abi.SwissMapType, m *Map, key string) {
func runtime_mapdelete_faststr(typ *abi.MapType, m *Map, key string) {
if race.Enabled {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapdelete_faststr)

View file

@ -36,7 +36,7 @@ type table struct {
used uint16
// The total number of slots (always 2^N). Equal to
// `(groups.lengthMask+1)*abi.SwissMapGroupSlots`.
// `(groups.lengthMask+1)*abi.MapGroupSlots`.
capacity uint16
// The number of slots we can still fill without needing to rehash.
@ -59,7 +59,7 @@ type table struct {
// directory).
index int
// groups is an array of slot groups. Each group holds abi.SwissMapGroupSlots
// groups is an array of slot groups. Each group holds abi.MapGroupSlots
// key/elem slots and their control bytes. A table has a fixed size
// groups array. The table is replaced (in rehash) when more space is
// required.
@ -71,9 +71,9 @@ type table struct {
groups groupsReference
}
func newTable(typ *abi.SwissMapType, capacity uint64, index int, localDepth uint8) *table {
if capacity < abi.SwissMapGroupSlots {
capacity = abi.SwissMapGroupSlots
func newTable(typ *abi.MapType, capacity uint64, index int, localDepth uint8) *table {
if capacity < abi.MapGroupSlots {
capacity = abi.MapGroupSlots
}
t := &table{
@ -99,8 +99,8 @@ func newTable(typ *abi.SwissMapType, capacity uint64, index int, localDepth uint
// reset resets the table with new, empty groups with the specified new total
// capacity.
func (t *table) reset(typ *abi.SwissMapType, capacity uint16) {
groupCount := uint64(capacity) / abi.SwissMapGroupSlots
func (t *table) reset(typ *abi.MapType, capacity uint16) {
groupCount := uint64(capacity) / abi.MapGroupSlots
t.groups = newGroups(typ, groupCount)
t.capacity = capacity
t.growthLeft = t.maxGrowthLeft()
@ -118,7 +118,7 @@ func (t *table) maxGrowthLeft() uint16 {
// No real reason to support zero capacity table, since an
// empty Map simply won't have a table.
panic("table must have positive capacity")
} else if t.capacity <= abi.SwissMapGroupSlots {
} else if t.capacity <= abi.MapGroupSlots {
// If the map fits in a single group then we're able to fill all of
// the slots except 1 (an empty slot is needed to terminate find
// operations).
@ -131,7 +131,7 @@ func (t *table) maxGrowthLeft() uint16 {
// TODO(prattmic): Do something cleaner.
panic("overflow")
}
return (t.capacity * maxAvgGroupLoad) / abi.SwissMapGroupSlots
return (t.capacity * maxAvgGroupLoad) / abi.MapGroupSlots
}
}
@ -142,7 +142,7 @@ func (t *table) Used() uint64 {
// Get performs a lookup of the key that key points to. It returns a pointer to
// the element, or false if the key doesn't exist.
func (t *table) Get(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) (unsafe.Pointer, bool) {
func (t *table) Get(typ *abi.MapType, m *Map, key unsafe.Pointer) (unsafe.Pointer, bool) {
// TODO(prattmic): We could avoid hashing in a variety of special
// cases.
//
@ -163,7 +163,7 @@ func (t *table) Get(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) (unsafe.P
// expose updated elements. For NeedsKeyUpdate keys, iteration also must return
// the new key value, not the old key value.
// hash must be the hash of the key.
func (t *table) getWithKey(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) {
func (t *table) getWithKey(typ *abi.MapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) {
// To find the location of a key in the table, we compute hash(key). From
// h1(hash(key)) and the capacity, we construct a probeSeq that visits
// every group of slots in some interesting order. See [probeSeq].
@ -223,7 +223,7 @@ func (t *table) getWithKey(typ *abi.SwissMapType, hash uintptr, key unsafe.Point
}
}
func (t *table) getWithoutKey(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, bool) {
func (t *table) getWithoutKey(typ *abi.MapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, bool) {
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
@ -263,7 +263,7 @@ func (t *table) getWithoutKey(typ *abi.SwissMapType, hash uintptr, key unsafe.Po
// the new table.
//
// hash must be the hash of key.
func (t *table) PutSlot(typ *abi.SwissMapType, m *Map, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, bool) {
func (t *table) PutSlot(typ *abi.MapType, m *Map, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, bool) {
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
// As we look for a match, keep track of the first deleted slot we
@ -378,7 +378,7 @@ func (t *table) PutSlot(typ *abi.SwissMapType, m *Map, hash uintptr, key unsafe.
// requires the caller to ensure that the referenced memory never
// changes (by sourcing those pointers from another indirect key/elem
// map).
func (t *table) uncheckedPutSlot(typ *abi.SwissMapType, hash uintptr, key, elem unsafe.Pointer) {
func (t *table) uncheckedPutSlot(typ *abi.MapType, hash uintptr, key, elem unsafe.Pointer) {
if t.growthLeft == 0 {
panic("invariant failed: growthLeft is unexpectedly 0")
}
@ -418,7 +418,7 @@ func (t *table) uncheckedPutSlot(typ *abi.SwissMapType, hash uintptr, key, elem
}
// Delete returns true if it put a tombstone in t.
func (t *table) Delete(typ *abi.SwissMapType, m *Map, hash uintptr, key unsafe.Pointer) bool {
func (t *table) Delete(typ *abi.MapType, m *Map, hash uintptr, key unsafe.Pointer) bool {
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
@ -505,14 +505,14 @@ func (t *table) Delete(typ *abi.SwissMapType, m *Map, hash uintptr, key unsafe.P
// We really need to remove O(n) tombstones so we can pay for the cost
// of finding them. If we can't, then we need to grow (which is also O(n),
// but guarantees O(n) subsequent inserts can happen in constant time).
func (t *table) pruneTombstones(typ *abi.SwissMapType, m *Map) {
func (t *table) pruneTombstones(typ *abi.MapType, m *Map) {
if t.tombstones()*10 < t.capacity { // 10% of capacity
// Not enough tombstones to be worth the effort.
return
}
// Bit set marking all the groups whose tombstones are needed.
var needed [(maxTableCapacity/abi.SwissMapGroupSlots + 31) / 32]uint32
var needed [(maxTableCapacity/abi.MapGroupSlots + 31) / 32]uint32
// Trace the probe sequence of every full entry.
for i := uint64(0); i <= t.groups.lengthMask; i++ {
@ -591,11 +591,11 @@ func (t *table) pruneTombstones(typ *abi.SwissMapType, m *Map) {
// tombstone is a slot that has been deleted but is still considered occupied
// so as not to violate the probing invariant.
func (t *table) tombstones() uint16 {
return (t.capacity*maxAvgGroupLoad)/abi.SwissMapGroupSlots - t.used - t.growthLeft
return (t.capacity*maxAvgGroupLoad)/abi.MapGroupSlots - t.used - t.growthLeft
}
// Clear deletes all entries from the map resulting in an empty map.
func (t *table) Clear(typ *abi.SwissMapType) {
func (t *table) Clear(typ *abi.MapType) {
mgl := t.maxGrowthLeft()
if t.used == 0 && t.growthLeft == mgl { // no current entries and no tombstones
return
@ -614,7 +614,7 @@ func (t *table) Clear(typ *abi.SwissMapType) {
type Iter struct {
key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/compile/internal/walk/range.go).
elem unsafe.Pointer // Must be in second position (see cmd/compile/internal/walk/range.go).
typ *abi.SwissMapType
typ *abi.MapType
m *Map
// Randomize iteration order by starting iteration at a random slot
@ -648,7 +648,7 @@ type Iter struct {
}
// Init initializes Iter for iteration.
func (it *Iter) Init(typ *abi.SwissMapType, m *Map) {
func (it *Iter) Init(typ *abi.MapType, m *Map) {
it.typ = typ
if m == nil || m.used == 0 {
@ -795,8 +795,8 @@ func (it *Iter) Next() {
if it.dirIdx < 0 {
// Map was small at Init.
for ; it.entryIdx < abi.SwissMapGroupSlots; it.entryIdx++ {
k := uintptr(it.entryIdx+it.entryOffset) % abi.SwissMapGroupSlots
for ; it.entryIdx < abi.MapGroupSlots; it.entryIdx++ {
k := uintptr(it.entryIdx+it.entryOffset) % abi.MapGroupSlots
if (it.group.ctrls().get(k) & ctrlEmpty) == ctrlEmpty {
// Empty or deleted.
@ -933,13 +933,13 @@ func (it *Iter) Next() {
// match.
entryIdx := (it.entryIdx + it.entryOffset) & entryMask
slotIdx := uintptr(entryIdx & (abi.SwissMapGroupSlots - 1))
slotIdx := uintptr(entryIdx & (abi.MapGroupSlots - 1))
if slotIdx == 0 || it.group.data == nil {
// Only compute the group (a) when we switch
// groups (slotIdx rolls over) and (b) on the
// first iteration in this table (slotIdx may
// not be zero due to entryOffset).
groupIdx := entryIdx >> abi.SwissMapGroupSlotsBits
groupIdx := entryIdx >> abi.MapGroupSlotsBits
it.group = it.tab.groups.group(it.typ, groupIdx)
}
@ -1000,14 +1000,14 @@ func (it *Iter) Next() {
var groupMatch bitset
for it.entryIdx <= entryMask {
entryIdx := (it.entryIdx + it.entryOffset) & entryMask
slotIdx := uintptr(entryIdx & (abi.SwissMapGroupSlots - 1))
slotIdx := uintptr(entryIdx & (abi.MapGroupSlots - 1))
if slotIdx == 0 || it.group.data == nil {
// Only compute the group (a) when we switch
// groups (slotIdx rolls over) and (b) on the
// first iteration in this table (slotIdx may
// not be zero due to entryOffset).
groupIdx := entryIdx >> abi.SwissMapGroupSlotsBits
groupIdx := entryIdx >> abi.MapGroupSlotsBits
it.group = it.tab.groups.group(it.typ, groupIdx)
}
@ -1025,7 +1025,7 @@ func (it *Iter) Next() {
if groupMatch == 0 {
// Jump past remaining slots in this
// group.
it.entryIdx += abi.SwissMapGroupSlots - uint64(slotIdx)
it.entryIdx += abi.MapGroupSlots - uint64(slotIdx)
continue
}
@ -1067,7 +1067,7 @@ func (it *Iter) Next() {
// No more entries in this
// group. Continue to next
// group.
it.entryIdx += abi.SwissMapGroupSlots - uint64(slotIdx)
it.entryIdx += abi.MapGroupSlots - uint64(slotIdx)
continue
}
@ -1092,7 +1092,7 @@ func (it *Iter) Next() {
// No more entries in
// this group. Continue
// to next group.
it.entryIdx += abi.SwissMapGroupSlots - uint64(slotIdx)
it.entryIdx += abi.MapGroupSlots - uint64(slotIdx)
} else {
// Next full slot.
i := groupMatch.first()
@ -1115,7 +1115,7 @@ func (it *Iter) Next() {
// Replaces the table with one larger table or two split tables to fit more
// entries. Since the table is replaced, t is now stale and should not be
// modified.
func (t *table) rehash(typ *abi.SwissMapType, m *Map) {
func (t *table) rehash(typ *abi.MapType, m *Map) {
// TODO(prattmic): SwissTables typically perform a "rehash in place"
// operation which recovers capacity consumed by tombstones without growing
// the table by reordering slots as necessary to maintain the probe
@ -1149,7 +1149,7 @@ func localDepthMask(localDepth uint8) uintptr {
}
// split the table into two, installing the new tables in the map directory.
func (t *table) split(typ *abi.SwissMapType, m *Map) {
func (t *table) split(typ *abi.MapType, m *Map) {
localDepth := t.localDepth
localDepth++
@ -1162,7 +1162,7 @@ func (t *table) split(typ *abi.SwissMapType, m *Map) {
for i := uint64(0); i <= t.groups.lengthMask; i++ {
g := t.groups.group(typ, i)
for j := uintptr(0); j < abi.SwissMapGroupSlots; j++ {
for j := uintptr(0); j < abi.MapGroupSlots; j++ {
if (g.ctrls().get(j) & ctrlEmpty) == ctrlEmpty {
// Empty or deleted
continue
@ -1197,13 +1197,13 @@ func (t *table) split(typ *abi.SwissMapType, m *Map) {
// and uncheckedPutting each element of the table into the new table (we know
// that no insertion here will Put an already-present value), and discard the
// old table.
func (t *table) grow(typ *abi.SwissMapType, m *Map, newCapacity uint16) {
func (t *table) grow(typ *abi.MapType, m *Map, newCapacity uint16) {
newTable := newTable(typ, uint64(newCapacity), t.index, t.localDepth)
if t.capacity > 0 {
for i := uint64(0); i <= t.groups.lengthMask; i++ {
g := t.groups.group(typ, i)
for j := uintptr(0); j < abi.SwissMapGroupSlots; j++ {
for j := uintptr(0); j < abi.MapGroupSlots; j++ {
if (g.ctrls().get(j) & ctrlEmpty) == ctrlEmpty {
// Empty or deleted
continue
@ -1262,7 +1262,7 @@ func (s probeSeq) next() probeSeq {
return s
}
func (t *table) clone(typ *abi.SwissMapType) *table {
func (t *table) clone(typ *abi.MapType) *table {
// Shallow copy the table structure.
t2 := new(table)
*t2 = *t

View file

@ -12,7 +12,7 @@ import (
const debugLog = false
func (t *table) checkInvariants(typ *abi.SwissMapType, m *Map) {
func (t *table) checkInvariants(typ *abi.MapType, m *Map) {
if !debugLog {
return
}
@ -24,7 +24,7 @@ func (t *table) checkInvariants(typ *abi.SwissMapType, m *Map) {
var empty uint16
for i := uint64(0); i <= t.groups.lengthMask; i++ {
g := t.groups.group(typ, i)
for j := uintptr(0); j < abi.SwissMapGroupSlots; j++ {
for j := uintptr(0); j < abi.MapGroupSlots; j++ {
c := g.ctrls().get(j)
switch {
case c == ctrlDeleted:
@ -63,7 +63,7 @@ func (t *table) checkInvariants(typ *abi.SwissMapType, m *Map) {
panic("invariant failed: found mismatched used slot count")
}
growthLeft := (t.capacity*maxAvgGroupLoad)/abi.SwissMapGroupSlots - t.used - deleted
growthLeft := (t.capacity*maxAvgGroupLoad)/abi.MapGroupSlots - t.used - deleted
if growthLeft != t.growthLeft {
print("invariant failed: found ", t.growthLeft, " growthLeft, but expected ", growthLeft, "\n")
t.Print(typ, m)
@ -81,7 +81,7 @@ func (t *table) checkInvariants(typ *abi.SwissMapType, m *Map) {
panic("invariant failed: found no empty slots (violates probe invariant)")
}
}
func (t *table) Print(typ *abi.SwissMapType, m *Map) {
func (t *table) Print(typ *abi.MapType, m *Map) {
print(`table{
index: `, t.index, `
localDepth: `, t.localDepth, `
@ -96,7 +96,7 @@ func (t *table) Print(typ *abi.SwissMapType, m *Map) {
g := t.groups.group(typ, i)
ctrls := g.ctrls()
for j := uintptr(0); j < abi.SwissMapGroupSlots; j++ {
for j := uintptr(0); j < abi.MapGroupSlots; j++ {
print("\t\t\tslot ", j, "\n")
c := ctrls.get(j)

View file

@ -16,7 +16,7 @@ func (t *rtype) Key() Type {
if t.Kind() != Map {
panic("reflect: Key of non-map type " + t.String())
}
tt := (*abi.SwissMapType)(unsafe.Pointer(t))
tt := (*abi.MapType)(unsafe.Pointer(t))
return toType(tt.Key)
}
@ -43,7 +43,7 @@ func MapOf(key, elem Type) Type {
// Look in known types.
s := "map[" + stringFor(ktyp) + "]" + stringFor(etyp)
for _, tt := range typesByString(s) {
mt := (*abi.SwissMapType)(unsafe.Pointer(tt))
mt := (*abi.MapType)(unsafe.Pointer(tt))
if mt.Key == ktyp && mt.Elem == etyp {
ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt))
return ti.(Type)
@ -56,7 +56,7 @@ func MapOf(key, elem Type) Type {
// Note: flag values must match those used in the TMAP case
// in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
var imap any = (map[unsafe.Pointer]unsafe.Pointer)(nil)
mt := **(**abi.SwissMapType)(unsafe.Pointer(&imap))
mt := **(**abi.MapType)(unsafe.Pointer(&imap))
mt.Str = resolveReflectName(newName(s, "", false, false))
mt.TFlag = abi.TFlagDirectIface
mt.Hash = fnv1(etyp.Hash, 'm', byte(ktyp.Hash>>24), byte(ktyp.Hash>>16), byte(ktyp.Hash>>8), byte(ktyp.Hash))
@ -71,16 +71,16 @@ func MapOf(key, elem Type) Type {
mt.ElemOff = slot.Field(1).Offset
mt.Flags = 0
if needKeyUpdate(ktyp) {
mt.Flags |= abi.SwissMapNeedKeyUpdate
mt.Flags |= abi.MapNeedKeyUpdate
}
if hashMightPanic(ktyp) {
mt.Flags |= abi.SwissMapHashMightPanic
mt.Flags |= abi.MapHashMightPanic
}
if ktyp.Size_ > abi.SwissMapMaxKeyBytes {
mt.Flags |= abi.SwissMapIndirectKey
if ktyp.Size_ > abi.MapMaxKeyBytes {
mt.Flags |= abi.MapIndirectKey
}
if etyp.Size_ > abi.SwissMapMaxKeyBytes {
mt.Flags |= abi.SwissMapIndirectElem
if etyp.Size_ > abi.MapMaxKeyBytes {
mt.Flags |= abi.MapIndirectElem
}
mt.PtrToThis = 0
@ -91,16 +91,16 @@ func MapOf(key, elem Type) Type {
func groupAndSlotOf(ktyp, etyp Type) (Type, Type) {
// type group struct {
// ctrl uint64
// slots [abi.SwissMapGroupSlots]struct {
// slots [abi.MapGroupSlots]struct {
// key keyType
// elem elemType
// }
// }
if ktyp.Size() > abi.SwissMapMaxKeyBytes {
if ktyp.Size() > abi.MapMaxKeyBytes {
ktyp = PointerTo(ktyp)
}
if etyp.Size() > abi.SwissMapMaxElemBytes {
if etyp.Size() > abi.MapMaxElemBytes {
etyp = PointerTo(etyp)
}
@ -123,7 +123,7 @@ func groupAndSlotOf(ktyp, etyp Type) (Type, Type) {
},
{
Name: "Slots",
Type: ArrayOf(abi.SwissMapGroupSlots, slot),
Type: ArrayOf(abi.MapGroupSlots, slot),
},
}
group := StructOf(fields)
@ -138,7 +138,7 @@ var stringType = rtypeOf("")
// As in Go, the key's value must be assignable to the map's key type.
func (v Value) MapIndex(key Value) Value {
v.mustBe(Map)
tt := (*abi.SwissMapType)(unsafe.Pointer(v.typ()))
tt := (*abi.MapType)(unsafe.Pointer(v.typ()))
// Do not require key to be exported, so that DeepEqual
// and other programs can use all the keys returned by
@ -149,7 +149,7 @@ func (v Value) MapIndex(key Value) Value {
// of unexported fields.
var e unsafe.Pointer
if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.SwissMapMaxElemBytes {
if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.MapMaxElemBytes {
k := *(*string)(key.ptr)
e = mapaccess_faststr(v.typ(), v.pointer(), k)
} else {
@ -174,7 +174,7 @@ func (v Value) MapIndex(key Value) Value {
// Equivalent to runtime.mapIterStart.
//
//go:noinline
func mapIterStart(t *abi.SwissMapType, m *maps.Map, it *maps.Iter) {
func mapIterStart(t *abi.MapType, m *maps.Map, it *maps.Iter) {
if race.Enabled && m != nil {
callerpc := sys.GetCallerPC()
race.ReadPC(unsafe.Pointer(m), callerpc, abi.FuncPCABIInternal(mapIterStart))
@ -202,7 +202,7 @@ func mapIterNext(it *maps.Iter) {
// It returns an empty slice if v represents a nil map.
func (v Value) MapKeys() []Value {
v.mustBe(Map)
tt := (*abi.SwissMapType)(unsafe.Pointer(v.typ()))
tt := (*abi.MapType)(unsafe.Pointer(v.typ()))
keyType := tt.Key
fl := v.flag.ro() | flag(keyType.Kind())
@ -251,7 +251,7 @@ func (iter *MapIter) Key() Value {
panic("MapIter.Key called on exhausted iterator")
}
t := (*abi.SwissMapType)(unsafe.Pointer(iter.m.typ()))
t := (*abi.MapType)(unsafe.Pointer(iter.m.typ()))
ktype := t.Key
return copyVal(ktype, iter.m.flag.ro()|flag(ktype.Kind()), iterkey)
}
@ -276,7 +276,7 @@ func (v Value) SetIterKey(iter *MapIter) {
target = v.ptr
}
t := (*abi.SwissMapType)(unsafe.Pointer(iter.m.typ()))
t := (*abi.MapType)(unsafe.Pointer(iter.m.typ()))
ktype := t.Key
iter.m.mustBeExported() // do not let unexported m leak
@ -295,7 +295,7 @@ func (iter *MapIter) Value() Value {
panic("MapIter.Value called on exhausted iterator")
}
t := (*abi.SwissMapType)(unsafe.Pointer(iter.m.typ()))
t := (*abi.MapType)(unsafe.Pointer(iter.m.typ()))
vtype := t.Elem
return copyVal(vtype, iter.m.flag.ro()|flag(vtype.Kind()), iterelem)
}
@ -320,7 +320,7 @@ func (v Value) SetIterValue(iter *MapIter) {
target = v.ptr
}
t := (*abi.SwissMapType)(unsafe.Pointer(iter.m.typ()))
t := (*abi.MapType)(unsafe.Pointer(iter.m.typ()))
vtype := t.Elem
iter.m.mustBeExported() // do not let unexported m leak
@ -337,7 +337,7 @@ func (iter *MapIter) Next() bool {
panic("MapIter.Next called on an iterator that does not have an associated map Value")
}
if !iter.hiter.Initialized() {
t := (*abi.SwissMapType)(unsafe.Pointer(iter.m.typ()))
t := (*abi.MapType)(unsafe.Pointer(iter.m.typ()))
m := (*maps.Map)(iter.m.pointer())
mapIterStart(t, m, &iter.hiter)
} else {
@ -397,9 +397,9 @@ func (v Value) SetMapIndex(key, elem Value) {
v.mustBe(Map)
v.mustBeExported()
key.mustBeExported()
tt := (*abi.SwissMapType)(unsafe.Pointer(v.typ()))
tt := (*abi.MapType)(unsafe.Pointer(v.typ()))
if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.SwissMapMaxElemBytes {
if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.MapMaxElemBytes {
k := *(*string)(key.ptr)
if elem.typ() == nil {
mapdelete_faststr(v.typ(), v.pointer(), k)

View file

@ -61,7 +61,7 @@ type linknameIter struct {
// Fields from hiter.
key unsafe.Pointer
elem unsafe.Pointer
typ *abi.SwissMapType
typ *abi.MapType
// The real iterator.
it *maps.Iter
@ -85,7 +85,7 @@ type linknameIter struct {
// See go.dev/issue/67401.
//
//go:linkname mapiterinit
func mapiterinit(t *abi.SwissMapType, m *maps.Map, it *linknameIter) {
func mapiterinit(t *abi.MapType, m *maps.Map, it *linknameIter) {
if raceenabled && m != nil {
callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(m), callerpc, abi.FuncPCABIInternal(mapiterinit))
@ -117,7 +117,7 @@ func mapiterinit(t *abi.SwissMapType, m *maps.Map, it *linknameIter) {
// See go.dev/issue/67401.
//
//go:linkname reflect_mapiterinit reflect.mapiterinit
func reflect_mapiterinit(t *abi.SwissMapType, m *maps.Map, it *linknameIter) {
func reflect_mapiterinit(t *abi.MapType, m *maps.Map, it *linknameIter) {
mapiterinit(t, m, it)
}

View file

@ -20,7 +20,7 @@ const (
//go:linkname maps_errNilAssign internal/runtime/maps.errNilAssign
var maps_errNilAssign error = plainError("assignment to entry in nil map")
func makemap64(t *abi.SwissMapType, hint int64, m *maps.Map) *maps.Map {
func makemap64(t *abi.MapType, hint int64, m *maps.Map) *maps.Map {
if int64(int(hint)) != hint {
hint = 0
}
@ -28,7 +28,7 @@ func makemap64(t *abi.SwissMapType, hint int64, m *maps.Map) *maps.Map {
}
// makemap_small implements Go map creation for make(map[k]v) and
// make(map[k]v, hint) when hint is known to be at most abi.SwissMapGroupSlots
// make(map[k]v, hint) when hint is known to be at most abi.MapGroupSlots
// at compile time and the map needs to be allocated on the heap.
//
// makemap_small should be an internal detail,
@ -59,7 +59,7 @@ func makemap_small() *maps.Map {
// See go.dev/issue/67401.
//
//go:linkname makemap
func makemap(t *abi.SwissMapType, hint int, m *maps.Map) *maps.Map {
func makemap(t *abi.MapType, hint int, m *maps.Map) *maps.Map {
if hint < 0 {
hint = 0
}
@ -77,7 +77,7 @@ func makemap(t *abi.SwissMapType, hint int, m *maps.Map) *maps.Map {
// we want to avoid one layer of call.
//
//go:linkname mapaccess1
func mapaccess1(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
func mapaccess1(t *abi.MapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
// mapaccess2 should be an internal detail,
// but widely used packages access it using linkname.
@ -88,9 +88,9 @@ func mapaccess1(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Poi
// See go.dev/issue/67401.
//
//go:linkname mapaccess2
func mapaccess2(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) (unsafe.Pointer, bool)
func mapaccess2(t *abi.MapType, m *maps.Map, key unsafe.Pointer) (unsafe.Pointer, bool)
func mapaccess1_fat(t *abi.SwissMapType, m *maps.Map, key, zero unsafe.Pointer) unsafe.Pointer {
func mapaccess1_fat(t *abi.MapType, m *maps.Map, key, zero unsafe.Pointer) unsafe.Pointer {
e := mapaccess1(t, m, key)
if e == unsafe.Pointer(&zeroVal[0]) {
return zero
@ -98,7 +98,7 @@ func mapaccess1_fat(t *abi.SwissMapType, m *maps.Map, key, zero unsafe.Pointer)
return e
}
func mapaccess2_fat(t *abi.SwissMapType, m *maps.Map, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
func mapaccess2_fat(t *abi.MapType, m *maps.Map, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
e := mapaccess1(t, m, key)
if e == unsafe.Pointer(&zeroVal[0]) {
return zero, false
@ -121,7 +121,7 @@ func mapaccess2_fat(t *abi.SwissMapType, m *maps.Map, key, zero unsafe.Pointer)
// See go.dev/issue/67401.
//
//go:linkname mapassign
func mapassign(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
func mapassign(t *abi.MapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
// mapdelete should be an internal detail,
// but widely used packages access it using linkname.
@ -132,7 +132,7 @@ func mapassign(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Poin
// See go.dev/issue/67401.
//
//go:linkname mapdelete
func mapdelete(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) {
func mapdelete(t *abi.MapType, m *maps.Map, key unsafe.Pointer) {
if raceenabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(mapdelete)
@ -153,7 +153,7 @@ func mapdelete(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) {
// performs the first step of iteration. The Iter struct pointed to by 'it' is
// allocated on the stack by the compilers order pass or on the heap by
// reflect. Both need to have zeroed it since the struct contains pointers.
func mapIterStart(t *abi.SwissMapType, m *maps.Map, it *maps.Iter) {
func mapIterStart(t *abi.MapType, m *maps.Map, it *maps.Iter) {
if raceenabled && m != nil {
callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(m), callerpc, abi.FuncPCABIInternal(mapIterStart))
@ -175,7 +175,7 @@ func mapIterNext(it *maps.Iter) {
}
// mapclear deletes all keys from a map.
func mapclear(t *abi.SwissMapType, m *maps.Map) {
func mapclear(t *abi.MapType, m *maps.Map) {
if raceenabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(mapclear)
@ -201,7 +201,7 @@ func mapclear(t *abi.SwissMapType, m *maps.Map) {
// See go.dev/issue/67401.
//
//go:linkname reflect_makemap reflect.makemap
func reflect_makemap(t *abi.SwissMapType, cap int) *maps.Map {
func reflect_makemap(t *abi.MapType, cap int) *maps.Map {
// Check invariants and reflects math.
if t.Key.Equal == nil {
throw("runtime.reflect_makemap: unsupported map key type")
@ -222,7 +222,7 @@ func reflect_makemap(t *abi.SwissMapType, cap int) *maps.Map {
// See go.dev/issue/67401.
//
//go:linkname reflect_mapaccess reflect.mapaccess
func reflect_mapaccess(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer {
func reflect_mapaccess(t *abi.MapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer {
elem, ok := mapaccess2(t, m, key)
if !ok {
// reflect wants nil for a missing element
@ -232,7 +232,7 @@ func reflect_mapaccess(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) uns
}
//go:linkname reflect_mapaccess_faststr reflect.mapaccess_faststr
func reflect_mapaccess_faststr(t *abi.SwissMapType, m *maps.Map, key string) unsafe.Pointer {
func reflect_mapaccess_faststr(t *abi.MapType, m *maps.Map, key string) unsafe.Pointer {
elem, ok := mapaccess2_faststr(t, m, key)
if !ok {
// reflect wants nil for a missing element
@ -250,24 +250,24 @@ func reflect_mapaccess_faststr(t *abi.SwissMapType, m *maps.Map, key string) uns
// Do not remove or change the type signature.
//
//go:linkname reflect_mapassign reflect.mapassign0
func reflect_mapassign(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer, elem unsafe.Pointer) {
func reflect_mapassign(t *abi.MapType, m *maps.Map, key unsafe.Pointer, elem unsafe.Pointer) {
p := mapassign(t, m, key)
typedmemmove(t.Elem, p, elem)
}
//go:linkname reflect_mapassign_faststr reflect.mapassign_faststr0
func reflect_mapassign_faststr(t *abi.SwissMapType, m *maps.Map, key string, elem unsafe.Pointer) {
func reflect_mapassign_faststr(t *abi.MapType, m *maps.Map, key string, elem unsafe.Pointer) {
p := mapassign_faststr(t, m, key)
typedmemmove(t.Elem, p, elem)
}
//go:linkname reflect_mapdelete reflect.mapdelete
func reflect_mapdelete(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) {
func reflect_mapdelete(t *abi.MapType, m *maps.Map, key unsafe.Pointer) {
mapdelete(t, m, key)
}
//go:linkname reflect_mapdelete_faststr reflect.mapdelete_faststr
func reflect_mapdelete_faststr(t *abi.SwissMapType, m *maps.Map, key string) {
func reflect_mapdelete_faststr(t *abi.MapType, m *maps.Map, key string) {
mapdelete_faststr(t, m, key)
}
@ -293,7 +293,7 @@ func reflect_maplen(m *maps.Map) int {
}
//go:linkname reflect_mapclear reflect.mapclear
func reflect_mapclear(t *abi.SwissMapType, m *maps.Map) {
func reflect_mapclear(t *abi.MapType, m *maps.Map) {
mapclear(t, m)
}
@ -321,7 +321,7 @@ func mapinitnoop()
//go:linkname mapclone maps.clone
func mapclone(m any) any {
e := efaceOf(&m)
typ := (*abi.SwissMapType)(unsafe.Pointer(e._type))
typ := (*abi.MapType)(unsafe.Pointer(e._type))
map_ := (*maps.Map)(e.data)
map_ = map_.Clone(typ)
e.data = (unsafe.Pointer)(map_)

View file

@ -13,7 +13,7 @@ import (
// Functions below pushed from internal/runtime/maps.
//go:linkname mapaccess1_fast32
func mapaccess1_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) unsafe.Pointer
func mapaccess1_fast32(t *abi.MapType, m *maps.Map, key uint32) unsafe.Pointer
// mapaccess2_fast32 should be an internal detail,
// but widely used packages access it using linkname.
@ -24,7 +24,7 @@ func mapaccess1_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) unsafe.Poin
// See go.dev/issue/67401.
//
//go:linkname mapaccess2_fast32
func mapaccess2_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) (unsafe.Pointer, bool)
func mapaccess2_fast32(t *abi.MapType, m *maps.Map, key uint32) (unsafe.Pointer, bool)
// mapassign_fast32 should be an internal detail,
// but widely used packages access it using linkname.
@ -36,7 +36,7 @@ func mapaccess2_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) (unsafe.Poi
// See go.dev/issue/67401.
//
//go:linkname mapassign_fast32
func mapassign_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) unsafe.Pointer
func mapassign_fast32(t *abi.MapType, m *maps.Map, key uint32) unsafe.Pointer
// mapassign_fast32ptr should be an internal detail,
// but widely used packages access it using linkname.
@ -47,7 +47,7 @@ func mapassign_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) unsafe.Point
// See go.dev/issue/67401.
//
//go:linkname mapassign_fast32ptr
func mapassign_fast32ptr(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
func mapassign_fast32ptr(t *abi.MapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
//go:linkname mapdelete_fast32
func mapdelete_fast32(t *abi.SwissMapType, m *maps.Map, key uint32)
func mapdelete_fast32(t *abi.MapType, m *maps.Map, key uint32)

View file

@ -13,7 +13,7 @@ import (
// Functions below pushed from internal/runtime/maps.
//go:linkname mapaccess1_fast64
func mapaccess1_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) unsafe.Pointer
func mapaccess1_fast64(t *abi.MapType, m *maps.Map, key uint64) unsafe.Pointer
// mapaccess2_fast64 should be an internal detail,
// but widely used packages access it using linkname.
@ -24,7 +24,7 @@ func mapaccess1_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) unsafe.Poin
// See go.dev/issue/67401.
//
//go:linkname mapaccess2_fast64
func mapaccess2_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) (unsafe.Pointer, bool)
func mapaccess2_fast64(t *abi.MapType, m *maps.Map, key uint64) (unsafe.Pointer, bool)
// mapassign_fast64 should be an internal detail,
// but widely used packages access it using linkname.
@ -36,7 +36,7 @@ func mapaccess2_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) (unsafe.Poi
// See go.dev/issue/67401.
//
//go:linkname mapassign_fast64
func mapassign_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) unsafe.Pointer
func mapassign_fast64(t *abi.MapType, m *maps.Map, key uint64) unsafe.Pointer
// mapassign_fast64ptr should be an internal detail,
// but widely used packages access it using linkname.
@ -48,7 +48,7 @@ func mapassign_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) unsafe.Point
// See go.dev/issue/67401.
//
//go:linkname mapassign_fast64ptr
func mapassign_fast64ptr(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
func mapassign_fast64ptr(t *abi.MapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
//go:linkname mapdelete_fast64
func mapdelete_fast64(t *abi.SwissMapType, m *maps.Map, key uint64)
func mapdelete_fast64(t *abi.MapType, m *maps.Map, key uint64)

View file

@ -13,7 +13,7 @@ import (
// Functions below pushed from internal/runtime/maps.
//go:linkname mapaccess1_faststr
func mapaccess1_faststr(t *abi.SwissMapType, m *maps.Map, ky string) unsafe.Pointer
func mapaccess1_faststr(t *abi.MapType, m *maps.Map, ky string) unsafe.Pointer
// mapaccess2_faststr should be an internal detail,
// but widely used packages access it using linkname.
@ -24,7 +24,7 @@ func mapaccess1_faststr(t *abi.SwissMapType, m *maps.Map, ky string) unsafe.Poin
// See go.dev/issue/67401.
//
//go:linkname mapaccess2_faststr
func mapaccess2_faststr(t *abi.SwissMapType, m *maps.Map, ky string) (unsafe.Pointer, bool)
func mapaccess2_faststr(t *abi.MapType, m *maps.Map, ky string) (unsafe.Pointer, bool)
// mapassign_faststr should be an internal detail,
// but widely used packages access it using linkname.
@ -36,7 +36,7 @@ func mapaccess2_faststr(t *abi.SwissMapType, m *maps.Map, ky string) (unsafe.Poi
// See go.dev/issue/67401.
//
//go:linkname mapassign_faststr
func mapassign_faststr(t *abi.SwissMapType, m *maps.Map, s string) unsafe.Pointer
func mapassign_faststr(t *abi.MapType, m *maps.Map, s string) unsafe.Pointer
//go:linkname mapdelete_faststr
func mapdelete_faststr(t *abi.SwissMapType, m *maps.Map, ky string)
func mapdelete_faststr(t *abi.MapType, m *maps.Map, ky string)

View file

@ -1157,7 +1157,7 @@ func TestHmapSize(t *testing.T) {
func TestGroupSizeZero(t *testing.T) {
var m map[struct{}]struct{}
mTyp := abi.TypeOf(m)
mt := (*abi.SwissMapType)(unsafe.Pointer(mTyp))
mt := (*abi.MapType)(unsafe.Pointer(mTyp))
// internal/runtime/maps when create pointers to slots, even if slots
// are size 0. The compiler should have reserved an extra word to

View file

@ -160,7 +160,7 @@ class MapTypePrinter:
return str(self.val.type)
def children(self):
SwissMapGroupSlots = 8 # see internal/abi:SwissMapGroupSlots
MapGroupSlots = 8 # see internal/abi:MapGroupSlots
cnt = 0
# Yield keys and elements in group.
@ -168,7 +168,7 @@ class MapTypePrinter:
def group_slots(group):
ctrl = group['ctrl']
for i in xrange(SwissMapGroupSlots):
for i in xrange(MapGroupSlots):
c = (ctrl >> (8*i)) & 0xff
if (c & 0x80) != 0:
# Empty or deleted
@ -179,7 +179,7 @@ class MapTypePrinter:
yield str(cnt+1), group['slots'][i]['elem']
# The linker DWARF generation
# (cmd/link/internal/ld.(*dwctxt).synthesizemaptypesSwiss) records
# (cmd/link/internal/ld.(*dwctxt).synthesizemaptypes) records
# dirPtr as a **table[K,V], but it may actually be two different types:
#
# For "full size" maps (dirLen > 0), dirPtr is actually a pointer to
@ -242,7 +242,7 @@ class MapTypePrinter:
length = table['groups']['lengthMask'] + 1
# The linker DWARF generation
# (cmd/link/internal/ld.(*dwctxt).synthesizemaptypesSwiss) records
# (cmd/link/internal/ld.(*dwctxt).synthesizemaptypes) records
# groups.data as a *group[K,V], but it is actually a pointer to
# variable length array *[length]group[K,V].
#

View file

@ -604,8 +604,8 @@ func typesEqual(t, v *_type, seen map[_typePair]struct{}) bool {
}
return true
case abi.Map:
mt := (*abi.SwissMapType)(unsafe.Pointer(t))
mv := (*abi.SwissMapType)(unsafe.Pointer(v))
mt := (*abi.MapType)(unsafe.Pointer(t))
mv := (*abi.MapType)(unsafe.Pointer(v))
return typesEqual(mt.Key, mv.Key, seen) && typesEqual(mt.Elem, mv.Elem, seen)
case abi.Pointer:
pt := (*ptrtype)(unsafe.Pointer(t))