mirror of
https://github.com/golang/go.git
synced 2026-02-07 02:09:55 +00:00
internal/maps,cmd/compile/internal/walk: replace calls to mapaccess1* with mapaccess2*
mapaccess1* and mapaccess2* functions share the same implementation and differ only in whether the boolean "found" is returned. This change replaces mapaccess1* calls with mapaccess2*. We can do this transparently, since the call site can safely discard the second (boolean) result. Ideally, mapacces1* functions could be removed entirely, but this change keeps them as thin wrappers for compatibility. Fixes #73196 Change-Id: I07c3423d22ed1095ac3666d00e134c2747b2f9c1 Reviewed-on: https://go-review.googlesource.com/c/go/+/736020 Reviewed-by: Keith Randall <khr@google.com> Reviewed-by: Michael Pratt <mpratt@google.com> Auto-Submit: Keith Randall <khr@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Keith Randall <khr@golang.org>
This commit is contained in:
parent
35abaf75c3
commit
62d08234b7
8 changed files with 59 additions and 290 deletions
|
|
@ -154,12 +154,14 @@ func walkAssignMapRead(init *ir.Nodes, n *ir.AssignListStmt) ir.Node {
|
|||
|
||||
r := n.Rhs[0].(*ir.IndexExpr)
|
||||
walkExprListSafe(n.Lhs, init)
|
||||
|
||||
r.X = walkExpr(r.X, init)
|
||||
r.Index = walkExpr(r.Index, init)
|
||||
map_ := r.X
|
||||
t := r.X.Type()
|
||||
|
||||
fast := mapfast(t)
|
||||
key := mapKeyArg(fast, r, r.Index, false)
|
||||
args := []ir.Node{reflectdata.IndexMapRType(base.Pos, r), map_, key}
|
||||
|
||||
// from:
|
||||
// a,b = m[i]
|
||||
|
|
@ -168,15 +170,14 @@ func walkAssignMapRead(init *ir.Nodes, n *ir.AssignListStmt) ir.Node {
|
|||
// a = *var
|
||||
a := n.Lhs[0]
|
||||
|
||||
var call *ir.CallExpr
|
||||
if w := t.Elem().Size(); w <= abi.ZeroValSize {
|
||||
fn := mapfn(mapaccess2[fast], t, false)
|
||||
call = mkcall1(fn, fn.Type().ResultsTuple(), init, reflectdata.IndexMapRType(base.Pos, r), r.X, key)
|
||||
var mapFn ir.Node
|
||||
if t.Elem().Size() > abi.ZeroValSize {
|
||||
args = append(args, reflectdata.ZeroAddr(t.Elem().Size()))
|
||||
mapFn = mapfn("mapaccess2_fat", t, true)
|
||||
} else {
|
||||
fn := mapfn("mapaccess2_fat", t, true)
|
||||
z := reflectdata.ZeroAddr(w)
|
||||
call = mkcall1(fn, fn.Type().ResultsTuple(), init, reflectdata.IndexMapRType(base.Pos, r), r.X, key, z)
|
||||
mapFn = mapfn(mapaccess[fast], t, false)
|
||||
}
|
||||
call := mkcall1(mapFn, mapFn.Type().ResultsTuple(), init, args...)
|
||||
|
||||
// mapaccess2* returns a typed bool, but due to spec changes,
|
||||
// the boolean result of i.(T) is now untyped so we make it the
|
||||
|
|
|
|||
|
|
@ -868,20 +868,43 @@ func walkIndexMap(n *ir.IndexExpr, init *ir.Nodes) ir.Node {
|
|||
key := mapKeyArg(fast, n, n.Index, n.Assigned)
|
||||
args := []ir.Node{reflectdata.IndexMapRType(base.Pos, n), map_, key}
|
||||
|
||||
var mapFn ir.Node
|
||||
switch {
|
||||
case n.Assigned:
|
||||
mapFn = mapfn(mapassign[fast], t, false)
|
||||
case t.Elem().Size() > abi.ZeroValSize:
|
||||
args = append(args, reflectdata.ZeroAddr(t.Elem().Size()))
|
||||
mapFn = mapfn("mapaccess1_fat", t, true)
|
||||
default:
|
||||
mapFn = mapfn(mapaccess1[fast], t, false)
|
||||
if n.Assigned {
|
||||
mapFn := mapfn(mapassign[fast], t, false)
|
||||
call := mkcall1(mapFn, nil, init, args...)
|
||||
call.SetType(types.NewPtr(t.Elem()))
|
||||
call.MarkNonNil() // mapassign always return non-nil pointers.
|
||||
star := ir.NewStarExpr(base.Pos, call)
|
||||
star.SetType(t.Elem())
|
||||
star.SetTypecheck(1)
|
||||
return star
|
||||
}
|
||||
call := mkcall1(mapFn, nil, init, args...)
|
||||
call.SetType(types.NewPtr(t.Elem()))
|
||||
call.MarkNonNil() // mapaccess1* and mapassign always return non-nil pointers.
|
||||
star := ir.NewStarExpr(base.Pos, call)
|
||||
|
||||
// from:
|
||||
// m[i]
|
||||
// to:
|
||||
// var, _ = mapaccess2*(t, m, i)
|
||||
// *var
|
||||
var mapFn ir.Node
|
||||
if t.Elem().Size() > abi.ZeroValSize {
|
||||
args = append(args, reflectdata.ZeroAddr(t.Elem().Size()))
|
||||
mapFn = mapfn("mapaccess2_fat", t, true)
|
||||
} else {
|
||||
mapFn = mapfn(mapaccess[fast], t, false)
|
||||
}
|
||||
call := mkcall1(mapFn, mapFn.Type().ResultsTuple(), init, args...)
|
||||
|
||||
var_ := typecheck.TempAt(base.Pos, ir.CurFunc, types.NewPtr(t.Elem()))
|
||||
var_.SetTypecheck(1)
|
||||
var_.MarkNonNil() // mapaccess always returns a non-nill pointer
|
||||
|
||||
bool_ := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TBOOL])
|
||||
bool_.SetTypecheck(1)
|
||||
|
||||
r := ir.NewAssignListStmt(base.Pos, ir.OAS2FUNC, []ir.Node{var_, bool_}, []ir.Node{call})
|
||||
r.SetTypecheck(1)
|
||||
init.Append(walkExpr(r, init))
|
||||
|
||||
star := ir.NewStarExpr(base.Pos, var_)
|
||||
star.SetType(t.Elem())
|
||||
star.SetTypecheck(1)
|
||||
return star
|
||||
|
|
|
|||
|
|
@ -184,8 +184,7 @@ func mkmapnames(base string, ptr string) mapnames {
|
|||
return mapnames{base, base + "_fast32", base + "_fast32" + ptr, base + "_fast64", base + "_fast64" + ptr, base + "_faststr"}
|
||||
}
|
||||
|
||||
var mapaccess1 = mkmapnames("mapaccess1", "")
|
||||
var mapaccess2 = mkmapnames("mapaccess2", "")
|
||||
var mapaccess = mkmapnames("mapaccess2", "")
|
||||
var mapassign = mkmapnames("mapassign", "ptr")
|
||||
var mapdelete = mkmapnames("mapdelete", "")
|
||||
|
||||
|
|
|
|||
|
|
@ -38,9 +38,6 @@ func newobject(typ *abi.Type) unsafe.Pointer
|
|||
//go:linkname errNilAssign
|
||||
var errNilAssign error
|
||||
|
||||
// Pull from runtime. It is important that is this the exact same copy as the
|
||||
// runtime because runtime.mapaccess1_fat compares the returned pointer with
|
||||
// &runtime.zeroVal[0].
|
||||
// TODO: move zeroVal to internal/abi?
|
||||
//
|
||||
//go:linkname zeroVal runtime.zeroVal
|
||||
|
|
@ -54,84 +51,15 @@ var zeroVal [abi.ZeroValSize]byte
|
|||
//
|
||||
//go:linkname runtime_mapaccess1 runtime.mapaccess1
|
||||
func runtime_mapaccess1(typ *abi.MapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
|
||||
if race.Enabled && m != nil {
|
||||
callerpc := sys.GetCallerPC()
|
||||
pc := abi.FuncPCABIInternal(runtime_mapaccess1)
|
||||
race.ReadPC(unsafe.Pointer(m), callerpc, pc)
|
||||
race.ReadObjectPC(typ.Key, key, callerpc, pc)
|
||||
}
|
||||
if msan.Enabled && m != nil {
|
||||
msan.Read(key, typ.Key.Size_)
|
||||
}
|
||||
if asan.Enabled && m != nil {
|
||||
asan.Read(key, typ.Key.Size_)
|
||||
}
|
||||
|
||||
if m == nil || m.Used() == 0 {
|
||||
if err := mapKeyError(typ, key); err != nil {
|
||||
panic(err) // see issue 23734
|
||||
}
|
||||
return unsafe.Pointer(&zeroVal[0])
|
||||
}
|
||||
|
||||
if m.writing != 0 {
|
||||
fatal("concurrent map read and map write")
|
||||
}
|
||||
|
||||
hash := typ.Hasher(key, m.seed)
|
||||
|
||||
if m.dirLen <= 0 {
|
||||
_, elem, ok := m.getWithKeySmall(typ, hash, key)
|
||||
if !ok {
|
||||
return unsafe.Pointer(&zeroVal[0])
|
||||
}
|
||||
return elem
|
||||
}
|
||||
|
||||
// Select table.
|
||||
idx := m.directoryIndex(hash)
|
||||
t := m.directoryAt(idx)
|
||||
|
||||
// Probe table.
|
||||
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
|
||||
h2Hash := h2(hash)
|
||||
for ; ; seq = seq.next() {
|
||||
g := t.groups.group(typ, seq.offset)
|
||||
|
||||
match := g.ctrls().matchH2(h2Hash)
|
||||
|
||||
for match != 0 {
|
||||
i := match.first()
|
||||
|
||||
slotKey := g.key(typ, i)
|
||||
slotKeyOrig := slotKey
|
||||
if typ.IndirectKey() {
|
||||
slotKey = *((*unsafe.Pointer)(slotKey))
|
||||
}
|
||||
if typ.Key.Equal(key, slotKey) {
|
||||
slotElem := unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
|
||||
if typ.IndirectElem() {
|
||||
slotElem = *((*unsafe.Pointer)(slotElem))
|
||||
}
|
||||
return slotElem
|
||||
}
|
||||
match = match.removeFirst()
|
||||
}
|
||||
|
||||
match = g.ctrls().matchEmpty()
|
||||
if match != 0 {
|
||||
// Finding an empty slot means we've reached the end of
|
||||
// the probe sequence.
|
||||
return unsafe.Pointer(&zeroVal[0])
|
||||
}
|
||||
}
|
||||
p, _ := runtime_mapaccess2(typ, m, key)
|
||||
return p
|
||||
}
|
||||
|
||||
//go:linkname runtime_mapaccess2 runtime.mapaccess2
|
||||
func runtime_mapaccess2(typ *abi.MapType, m *Map, key unsafe.Pointer) (unsafe.Pointer, bool) {
|
||||
if race.Enabled && m != nil {
|
||||
callerpc := sys.GetCallerPC()
|
||||
pc := abi.FuncPCABIInternal(runtime_mapaccess1)
|
||||
pc := abi.FuncPCABIInternal(runtime_mapaccess2)
|
||||
race.ReadPC(unsafe.Pointer(m), callerpc, pc)
|
||||
race.ReadObjectPC(typ.Key, key, callerpc, pc)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -13,72 +13,8 @@ import (
|
|||
|
||||
//go:linkname runtime_mapaccess1_fast32 runtime.mapaccess1_fast32
|
||||
func runtime_mapaccess1_fast32(typ *abi.MapType, m *Map, key uint32) unsafe.Pointer {
|
||||
if race.Enabled && m != nil {
|
||||
callerpc := sys.GetCallerPC()
|
||||
pc := abi.FuncPCABIInternal(runtime_mapaccess1_fast32)
|
||||
race.ReadPC(unsafe.Pointer(m), callerpc, pc)
|
||||
}
|
||||
|
||||
if m == nil || m.Used() == 0 {
|
||||
return unsafe.Pointer(&zeroVal[0])
|
||||
}
|
||||
|
||||
if m.writing != 0 {
|
||||
fatal("concurrent map read and map write")
|
||||
return nil
|
||||
}
|
||||
|
||||
if m.dirLen == 0 {
|
||||
g := groupReference{
|
||||
data: m.dirPtr,
|
||||
}
|
||||
full := g.ctrls().matchFull()
|
||||
slotKey := g.key(typ, 0)
|
||||
slotSize := typ.SlotSize
|
||||
for full != 0 {
|
||||
if key == *(*uint32)(slotKey) && full.lowestSet() {
|
||||
slotElem := unsafe.Pointer(uintptr(slotKey) + typ.ElemOff)
|
||||
return slotElem
|
||||
}
|
||||
slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize)
|
||||
full = full.shiftOutLowest()
|
||||
}
|
||||
return unsafe.Pointer(&zeroVal[0])
|
||||
}
|
||||
|
||||
k := key
|
||||
hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed)
|
||||
|
||||
// Select table.
|
||||
idx := m.directoryIndex(hash)
|
||||
t := m.directoryAt(idx)
|
||||
|
||||
// Probe table.
|
||||
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
|
||||
h2Hash := h2(hash)
|
||||
for ; ; seq = seq.next() {
|
||||
g := t.groups.group(typ, seq.offset)
|
||||
|
||||
match := g.ctrls().matchH2(h2Hash)
|
||||
|
||||
for match != 0 {
|
||||
i := match.first()
|
||||
|
||||
slotKey := g.key(typ, i)
|
||||
if key == *(*uint32)(slotKey) {
|
||||
slotElem := unsafe.Pointer(uintptr(slotKey) + typ.ElemOff)
|
||||
return slotElem
|
||||
}
|
||||
match = match.removeFirst()
|
||||
}
|
||||
|
||||
match = g.ctrls().matchEmpty()
|
||||
if match != 0 {
|
||||
// Finding an empty slot means we've reached the end of
|
||||
// the probe sequence.
|
||||
return unsafe.Pointer(&zeroVal[0])
|
||||
}
|
||||
}
|
||||
p, _ := runtime_mapaccess2_fast32(typ, m, key)
|
||||
return p
|
||||
}
|
||||
|
||||
//go:linkname runtime_mapaccess2_fast32 runtime.mapaccess2_fast32
|
||||
|
|
|
|||
|
|
@ -13,72 +13,8 @@ import (
|
|||
|
||||
//go:linkname runtime_mapaccess1_fast64 runtime.mapaccess1_fast64
|
||||
func runtime_mapaccess1_fast64(typ *abi.MapType, m *Map, key uint64) unsafe.Pointer {
|
||||
if race.Enabled && m != nil {
|
||||
callerpc := sys.GetCallerPC()
|
||||
pc := abi.FuncPCABIInternal(runtime_mapaccess1_fast64)
|
||||
race.ReadPC(unsafe.Pointer(m), callerpc, pc)
|
||||
}
|
||||
|
||||
if m == nil || m.Used() == 0 {
|
||||
return unsafe.Pointer(&zeroVal[0])
|
||||
}
|
||||
|
||||
if m.writing != 0 {
|
||||
fatal("concurrent map read and map write")
|
||||
return nil
|
||||
}
|
||||
|
||||
if m.dirLen == 0 {
|
||||
g := groupReference{
|
||||
data: m.dirPtr,
|
||||
}
|
||||
full := g.ctrls().matchFull()
|
||||
slotKey := g.key(typ, 0)
|
||||
slotSize := typ.SlotSize
|
||||
for full != 0 {
|
||||
if key == *(*uint64)(slotKey) && full.lowestSet() {
|
||||
slotElem := unsafe.Pointer(uintptr(slotKey) + 8)
|
||||
return slotElem
|
||||
}
|
||||
slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize)
|
||||
full = full.shiftOutLowest()
|
||||
}
|
||||
return unsafe.Pointer(&zeroVal[0])
|
||||
}
|
||||
|
||||
k := key
|
||||
hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed)
|
||||
|
||||
// Select table.
|
||||
idx := m.directoryIndex(hash)
|
||||
t := m.directoryAt(idx)
|
||||
|
||||
// Probe table.
|
||||
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
|
||||
h2Hash := h2(hash)
|
||||
for ; ; seq = seq.next() {
|
||||
g := t.groups.group(typ, seq.offset)
|
||||
|
||||
match := g.ctrls().matchH2(h2Hash)
|
||||
|
||||
for match != 0 {
|
||||
i := match.first()
|
||||
|
||||
slotKey := g.key(typ, i)
|
||||
if key == *(*uint64)(slotKey) {
|
||||
slotElem := unsafe.Pointer(uintptr(slotKey) + 8)
|
||||
return slotElem
|
||||
}
|
||||
match = match.removeFirst()
|
||||
}
|
||||
|
||||
match = g.ctrls().matchEmpty()
|
||||
if match != 0 {
|
||||
// Finding an empty slot means we've reached the end of
|
||||
// the probe sequence.
|
||||
return unsafe.Pointer(&zeroVal[0])
|
||||
}
|
||||
}
|
||||
p, _ := runtime_mapaccess2_fast64(typ, m, key)
|
||||
return p
|
||||
}
|
||||
|
||||
//go:linkname runtime_mapaccess2_fast64 runtime.mapaccess2_fast64
|
||||
|
|
|
|||
|
|
@ -99,62 +99,8 @@ func stringPtr(s string) unsafe.Pointer {
|
|||
|
||||
//go:linkname runtime_mapaccess1_faststr runtime.mapaccess1_faststr
|
||||
func runtime_mapaccess1_faststr(typ *abi.MapType, m *Map, key string) unsafe.Pointer {
|
||||
if race.Enabled && m != nil {
|
||||
callerpc := sys.GetCallerPC()
|
||||
pc := abi.FuncPCABIInternal(runtime_mapaccess1_faststr)
|
||||
race.ReadPC(unsafe.Pointer(m), callerpc, pc)
|
||||
}
|
||||
|
||||
if m == nil || m.Used() == 0 {
|
||||
return unsafe.Pointer(&zeroVal[0])
|
||||
}
|
||||
|
||||
if m.writing != 0 {
|
||||
fatal("concurrent map read and map write")
|
||||
return nil
|
||||
}
|
||||
|
||||
if m.dirLen <= 0 {
|
||||
elem := m.getWithoutKeySmallFastStr(typ, key)
|
||||
if elem == nil {
|
||||
return unsafe.Pointer(&zeroVal[0])
|
||||
}
|
||||
return elem
|
||||
}
|
||||
|
||||
k := key
|
||||
hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed)
|
||||
|
||||
// Select table.
|
||||
idx := m.directoryIndex(hash)
|
||||
t := m.directoryAt(idx)
|
||||
|
||||
// Probe table.
|
||||
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
|
||||
h2Hash := h2(hash)
|
||||
for ; ; seq = seq.next() {
|
||||
g := t.groups.group(typ, seq.offset)
|
||||
|
||||
match := g.ctrls().matchH2(h2Hash)
|
||||
|
||||
for match != 0 {
|
||||
i := match.first()
|
||||
|
||||
slotKey := g.key(typ, i)
|
||||
if key == *(*string)(slotKey) {
|
||||
slotElem := unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize)
|
||||
return slotElem
|
||||
}
|
||||
match = match.removeFirst()
|
||||
}
|
||||
|
||||
match = g.ctrls().matchEmpty()
|
||||
if match != 0 {
|
||||
// Finding an empty slot means we've reached the end of
|
||||
// the probe sequence.
|
||||
return unsafe.Pointer(&zeroVal[0])
|
||||
}
|
||||
}
|
||||
p, _ := runtime_mapaccess2_faststr(typ, m, key)
|
||||
return p
|
||||
}
|
||||
|
||||
//go:linkname runtime_mapaccess2_faststr runtime.mapaccess2_faststr
|
||||
|
|
|
|||
|
|
@ -91,16 +91,16 @@ func mapaccess1(t *abi.MapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
|
|||
func mapaccess2(t *abi.MapType, m *maps.Map, key unsafe.Pointer) (unsafe.Pointer, bool)
|
||||
|
||||
func mapaccess1_fat(t *abi.MapType, m *maps.Map, key, zero unsafe.Pointer) unsafe.Pointer {
|
||||
e := mapaccess1(t, m, key)
|
||||
if e == unsafe.Pointer(&zeroVal[0]) {
|
||||
e, ok := mapaccess2(t, m, key)
|
||||
if !ok {
|
||||
return zero
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
func mapaccess2_fat(t *abi.MapType, m *maps.Map, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
|
||||
e := mapaccess1(t, m, key)
|
||||
if e == unsafe.Pointer(&zeroVal[0]) {
|
||||
e, ok := mapaccess2(t, m, key)
|
||||
if !ok {
|
||||
return zero, false
|
||||
}
|
||||
return e, true
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue