[dev.regabi] cmd/compile: rewrite problematic use of Node fields

For the upcoming rewrite to access methods, a few direct accesses
are problematic for the automated tool, most notably direct copies
or use of Node structs as opposed to pointers.

Fix these manually.

Passes toolstash -cmp.

Change-Id: I8bdbb33216737c09e1edda284d5c414422d86284
Reviewed-on: https://go-review.googlesource.com/c/go/+/273006
Trust: Russ Cox <rsc@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
This commit is contained in:
Russ Cox 2020-11-22 12:09:08 -05:00
parent 9e0e43d84d
commit 9262909764
6 changed files with 156 additions and 150 deletions

View file

@ -53,8 +53,8 @@ func funcSym(fn *Node) *types.Sym {
// Walk hasn't generated (goto|label).Left.Sym.Label yet, so we'll cheat // Walk hasn't generated (goto|label).Left.Sym.Label yet, so we'll cheat
// and set it to one of the following two. Then in esc we'll clear it again. // and set it to one of the following two. Then in esc we'll clear it again.
var ( var (
looping Node looping = nod(OXXX, nil, nil)
nonlooping Node nonlooping = nod(OXXX, nil, nil)
) )
func isSliceSelfAssign(dst, src *Node) bool { func isSliceSelfAssign(dst, src *Node) bool {

View file

@ -227,13 +227,13 @@ func (e *Escape) walkFunc(fn *Node) {
inspectList(fn.Nbody, func(n *Node) bool { inspectList(fn.Nbody, func(n *Node) bool {
switch n.Op { switch n.Op {
case OLABEL: case OLABEL:
n.Sym.Label = asTypesNode(&nonlooping) n.Sym.Label = asTypesNode(nonlooping)
case OGOTO: case OGOTO:
// If we visited the label before the goto, // If we visited the label before the goto,
// then this is a looping label. // then this is a looping label.
if n.Sym.Label == asTypesNode(&nonlooping) { if n.Sym.Label == asTypesNode(nonlooping) {
n.Sym.Label = asTypesNode(&looping) n.Sym.Label = asTypesNode(looping)
} }
} }
@ -309,11 +309,11 @@ func (e *Escape) stmt(n *Node) {
case OLABEL: case OLABEL:
switch asNode(n.Sym.Label) { switch asNode(n.Sym.Label) {
case &nonlooping: case nonlooping:
if Debug.m > 2 { if Debug.m > 2 {
fmt.Printf("%v:%v non-looping label\n", linestr(lineno), n) fmt.Printf("%v:%v non-looping label\n", linestr(lineno), n)
} }
case &looping: case looping:
if Debug.m > 2 { if Debug.m > 2 {
fmt.Printf("%v: %v looping label\n", linestr(lineno), n) fmt.Printf("%v: %v looping label\n", linestr(lineno), n)
} }

View file

@ -839,7 +839,8 @@ func (r *importReader) node() *Node {
if s := r.ident(); s != nil { if s := r.ident(); s != nil {
n.Left = npos(n.Pos, newnoname(s)) n.Left = npos(n.Pos, newnoname(s))
} }
n.Right, _ = r.exprsOrNil() right, _ := r.exprsOrNil()
n.Right = right
return n return n
// case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC: // case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC:
@ -1021,7 +1022,9 @@ func (r *importReader) node() *Node {
case OFOR: case OFOR:
n := nodl(r.pos(), OFOR, nil, nil) n := nodl(r.pos(), OFOR, nil, nil)
n.Ninit.Set(r.stmtList()) n.Ninit.Set(r.stmtList())
n.Left, n.Right = r.exprsOrNil() left, right := r.exprsOrNil()
n.Left = left
n.Right = right
n.Nbody.Set(r.stmtList()) n.Nbody.Set(r.stmtList())
return n return n
@ -1035,7 +1038,8 @@ func (r *importReader) node() *Node {
case OSELECT, OSWITCH: case OSELECT, OSWITCH:
n := nodl(r.pos(), op, nil, nil) n := nodl(r.pos(), op, nil, nil)
n.Ninit.Set(r.stmtList()) n.Ninit.Set(r.stmtList())
n.Left, _ = r.exprsOrNil() left, _ := r.exprsOrNil()
n.Left = left
n.List.Set(r.caseList(n)) n.List.Set(r.caseList(n))
return n return n

View file

@ -186,9 +186,8 @@ func (s *InitSchedule) staticassign(l *Node, r *Node) bool {
return true return true
case OADDR: case OADDR:
var nam Node if nam := stataddr(r.Left); nam != nil {
if stataddr(&nam, r.Left) { addrsym(l, nam)
addrsym(l, &nam)
return true return true
} }
fallthrough fallthrough
@ -609,11 +608,11 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
// copy static to slice // copy static to slice
var_ = typecheck(var_, ctxExpr|ctxAssign) var_ = typecheck(var_, ctxExpr|ctxAssign)
var nam Node nam := stataddr(var_)
if !stataddr(&nam, var_) || nam.Class() != PEXTERN { if nam == nil || nam.Class() != PEXTERN {
Fatalf("slicelit: %v", var_) Fatalf("slicelit: %v", var_)
} }
slicesym(&nam, vstat, t.NumElem()) slicesym(nam, vstat, t.NumElem())
return return
} }
@ -1001,30 +1000,31 @@ func getlit(lit *Node) int {
return -1 return -1
} }
// stataddr sets nam to the static address of n and reports whether it succeeded. // stataddr returns the static address of n, if n has one, or else nil.
func stataddr(nam *Node, n *Node) bool { func stataddr(n *Node) *Node {
if n == nil { if n == nil {
return false return nil
} }
switch n.Op { switch n.Op {
case ONAME, OMETHEXPR: case ONAME, OMETHEXPR:
*nam = *n return n.sepcopy()
return true
case ODOT: case ODOT:
if !stataddr(nam, n.Left) { nam := stataddr(n.Left)
if nam == nil {
break break
} }
nam.Xoffset += n.Xoffset nam.Xoffset += n.Xoffset
nam.Type = n.Type nam.Type = n.Type
return true return nam
case OINDEX: case OINDEX:
if n.Left.Type.IsSlice() { if n.Left.Type.IsSlice() {
break break
} }
if !stataddr(nam, n.Left) { nam := stataddr(n.Left)
if nam == nil {
break break
} }
l := getlit(n.Right) l := getlit(n.Right)
@ -1038,10 +1038,10 @@ func stataddr(nam *Node, n *Node) bool {
} }
nam.Xoffset += int64(l) * n.Type.Width nam.Xoffset += int64(l) * n.Type.Width
nam.Type = n.Type nam.Type = n.Type
return true return nam
} }
return false return nil
} }
func (s *InitSchedule) initplan(n *Node) { func (s *InitSchedule) initplan(n *Node) {
@ -1158,16 +1158,16 @@ func genAsStatic(as *Node) {
Fatalf("genAsStatic as.Left not typechecked") Fatalf("genAsStatic as.Left not typechecked")
} }
var nam Node nam := stataddr(as.Left)
if !stataddr(&nam, as.Left) || (nam.Class() != PEXTERN && as.Left != nblank) { if nam == nil || (nam.Class() != PEXTERN && as.Left != nblank) {
Fatalf("genAsStatic: lhs %v", as.Left) Fatalf("genAsStatic: lhs %v", as.Left)
} }
switch { switch {
case as.Right.Op == OLITERAL: case as.Right.Op == OLITERAL:
litsym(&nam, as.Right, int(as.Right.Type.Width)) litsym(nam, as.Right, int(as.Right.Type.Width))
case (as.Right.Op == ONAME || as.Right.Op == OMETHEXPR) && as.Right.Class() == PFUNC: case (as.Right.Op == ONAME || as.Right.Op == OMETHEXPR) && as.Right.Class() == PFUNC:
pfuncsym(&nam, as.Right) pfuncsym(nam, as.Right)
default: default:
Fatalf("genAsStatic: rhs %v", as.Right) Fatalf("genAsStatic: rhs %v", as.Right)
} }

View file

@ -388,7 +388,7 @@ func buildssa(fn *Node, worker int) *ssa.Func {
s.sb = s.entryNewValue0(ssa.OpSB, types.Types[TUINTPTR]) s.sb = s.entryNewValue0(ssa.OpSB, types.Types[TUINTPTR])
s.startBlock(s.f.Entry) s.startBlock(s.f.Entry)
s.vars[&memVar] = s.startmem s.vars[memVar] = s.startmem
if s.hasOpenDefers { if s.hasOpenDefers {
// Create the deferBits variable and stack slot. deferBits is a // Create the deferBits variable and stack slot. deferBits is a
// bitmask showing which of the open-coded defers in this function // bitmask showing which of the open-coded defers in this function
@ -397,7 +397,7 @@ func buildssa(fn *Node, worker int) *ssa.Func {
s.deferBitsTemp = deferBitsTemp s.deferBitsTemp = deferBitsTemp
// For this value, AuxInt is initialized to zero by default // For this value, AuxInt is initialized to zero by default
startDeferBits := s.entryNewValue0(ssa.OpConst8, types.Types[TUINT8]) startDeferBits := s.entryNewValue0(ssa.OpConst8, types.Types[TUINT8])
s.vars[&deferBitsVar] = startDeferBits s.vars[deferBitsVar] = startDeferBits
s.deferBitsAddr = s.addr(deferBitsTemp) s.deferBitsAddr = s.addr(deferBitsTemp)
s.store(types.Types[TUINT8], s.deferBitsAddr, startDeferBits) s.store(types.Types[TUINT8], s.deferBitsAddr, startDeferBits)
// Make sure that the deferBits stack slot is kept alive (for use // Make sure that the deferBits stack slot is kept alive (for use
@ -405,7 +405,7 @@ func buildssa(fn *Node, worker int) *ssa.Func {
// all checking code on deferBits in the function exit can be // all checking code on deferBits in the function exit can be
// eliminated, because the defer statements were all // eliminated, because the defer statements were all
// unconditional. // unconditional.
s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, deferBitsTemp, s.mem(), false) s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, deferBitsTemp, s.mem(), false)
} }
// Generate addresses of local declarations // Generate addresses of local declarations
@ -691,18 +691,22 @@ func (s *state) Fatalf(msg string, args ...interface{}) {
func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) } func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) }
func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() } func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() }
func ssaMarker(name string) *Node {
return newname(&types.Sym{Name: name})
}
var ( var (
// marker node for the memory variable // marker node for the memory variable
memVar = Node{Op: ONAME, Sym: &types.Sym{Name: "mem"}} memVar = ssaMarker("mem")
// marker nodes for temporary variables // marker nodes for temporary variables
ptrVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ptr"}} ptrVar = ssaMarker("ptr")
lenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "len"}} lenVar = ssaMarker("len")
newlenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "newlen"}} newlenVar = ssaMarker("newlen")
capVar = Node{Op: ONAME, Sym: &types.Sym{Name: "cap"}} capVar = ssaMarker("cap")
typVar = Node{Op: ONAME, Sym: &types.Sym{Name: "typ"}} typVar = ssaMarker("typ")
okVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ok"}} okVar = ssaMarker("ok")
deferBitsVar = Node{Op: ONAME, Sym: &types.Sym{Name: "deferBits"}} deferBitsVar = ssaMarker("deferBits")
) )
// startBlock sets the current block we're generating code in to b. // startBlock sets the current block we're generating code in to b.
@ -1027,14 +1031,14 @@ func (s *state) rawLoad(t *types.Type, src *ssa.Value) *ssa.Value {
} }
func (s *state) store(t *types.Type, dst, val *ssa.Value) { func (s *state) store(t *types.Type, dst, val *ssa.Value) {
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, dst, val, s.mem()) s.vars[memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, dst, val, s.mem())
} }
func (s *state) zero(t *types.Type, dst *ssa.Value) { func (s *state) zero(t *types.Type, dst *ssa.Value) {
s.instrument(t, dst, true) s.instrument(t, dst, true)
store := s.newValue2I(ssa.OpZero, types.TypeMem, t.Size(), dst, s.mem()) store := s.newValue2I(ssa.OpZero, types.TypeMem, t.Size(), dst, s.mem())
store.Aux = t store.Aux = t
s.vars[&memVar] = store s.vars[memVar] = store
} }
func (s *state) move(t *types.Type, dst, src *ssa.Value) { func (s *state) move(t *types.Type, dst, src *ssa.Value) {
@ -1042,7 +1046,7 @@ func (s *state) move(t *types.Type, dst, src *ssa.Value) {
s.instrument(t, dst, true) s.instrument(t, dst, true)
store := s.newValue3I(ssa.OpMove, types.TypeMem, t.Size(), dst, src, s.mem()) store := s.newValue3I(ssa.OpMove, types.TypeMem, t.Size(), dst, src, s.mem())
store.Aux = t store.Aux = t
s.vars[&memVar] = store s.vars[memVar] = store
} }
// stmtList converts the statement list n to SSA and adds it to s. // stmtList converts the statement list n to SSA and adds it to s.
@ -1509,7 +1513,7 @@ func (s *state) stmt(n *Node) {
case OVARDEF: case OVARDEF:
if !s.canSSA(n.Left) { if !s.canSSA(n.Left) {
s.vars[&memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.Left, s.mem(), false) s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.Left, s.mem(), false)
} }
case OVARKILL: case OVARKILL:
// Insert a varkill op to record that a variable is no longer live. // Insert a varkill op to record that a variable is no longer live.
@ -1517,7 +1521,7 @@ func (s *state) stmt(n *Node) {
// varkill in the store chain is enough to keep it correctly ordered // varkill in the store chain is enough to keep it correctly ordered
// with respect to call ops. // with respect to call ops.
if !s.canSSA(n.Left) { if !s.canSSA(n.Left) {
s.vars[&memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.Left, s.mem(), false) s.vars[memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.Left, s.mem(), false)
} }
case OVARLIVE: case OVARLIVE:
@ -1530,7 +1534,7 @@ func (s *state) stmt(n *Node) {
default: default:
s.Fatalf("VARLIVE variable %v must be Auto or Arg", n.Left) s.Fatalf("VARLIVE variable %v must be Auto or Arg", n.Left)
} }
s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, n.Left, s.mem()) s.vars[memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, n.Left, s.mem())
case OCHECKNIL: case OCHECKNIL:
p := s.expr(n.Left) p := s.expr(n.Left)
@ -1576,7 +1580,7 @@ func (s *state) exit() *ssa.Block {
for _, n := range s.returns { for _, n := range s.returns {
addr := s.decladdrs[n] addr := s.decladdrs[n]
val := s.variable(n, n.Type) val := s.variable(n, n.Type)
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem()) s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
s.store(n.Type, addr, val) s.store(n.Type, addr, val)
// TODO: if val is ever spilled, we'd like to use the // TODO: if val is ever spilled, we'd like to use the
// PPARAMOUT slot for spilling it. That won't happen // PPARAMOUT slot for spilling it. That won't happen
@ -2843,14 +2847,14 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value {
c := s.newValue1(ssa.OpSliceCap, types.Types[TINT], slice) c := s.newValue1(ssa.OpSliceCap, types.Types[TINT], slice)
nl := s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs)) nl := s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs))
cmp := s.newValue2(s.ssaOp(OLT, types.Types[TUINT]), types.Types[TBOOL], c, nl) cmp := s.newValue2(s.ssaOp(OLT, types.Types[TUINT]), types.Types[types.TBOOL], c, nl)
s.vars[&ptrVar] = p s.vars[ptrVar] = p
if !inplace { if !inplace {
s.vars[&newlenVar] = nl s.vars[newlenVar] = nl
s.vars[&capVar] = c s.vars[capVar] = c
} else { } else {
s.vars[&lenVar] = l s.vars[lenVar] = l
} }
b := s.endBlock() b := s.endBlock()
@ -2868,18 +2872,18 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value {
if inplace { if inplace {
if sn.Op == ONAME && sn.Class() != PEXTERN { if sn.Op == ONAME && sn.Class() != PEXTERN {
// Tell liveness we're about to build a new slice // Tell liveness we're about to build a new slice
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem()) s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem())
} }
capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, sliceCapOffset, addr) capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, sliceCapOffset, addr)
s.store(types.Types[TINT], capaddr, r[2]) s.store(types.Types[TINT], capaddr, r[2])
s.store(pt, addr, r[0]) s.store(pt, addr, r[0])
// load the value we just stored to avoid having to spill it // load the value we just stored to avoid having to spill it
s.vars[&ptrVar] = s.load(pt, addr) s.vars[ptrVar] = s.load(pt, addr)
s.vars[&lenVar] = r[1] // avoid a spill in the fast path s.vars[lenVar] = r[1] // avoid a spill in the fast path
} else { } else {
s.vars[&ptrVar] = r[0] s.vars[ptrVar] = r[0]
s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], r[1], s.constInt(types.Types[TINT], nargs)) s.vars[newlenVar] = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], r[1], s.constInt(types.Types[TINT], nargs))
s.vars[&capVar] = r[2] s.vars[capVar] = r[2]
} }
b = s.endBlock() b = s.endBlock()
@ -2889,7 +2893,7 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value {
s.startBlock(assign) s.startBlock(assign)
if inplace { if inplace {
l = s.variable(&lenVar, types.Types[TINT]) // generates phi for len l = s.variable(lenVar, types.Types[TINT]) // generates phi for len
nl = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs)) nl = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs))
lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, sliceLenOffset, addr) lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, sliceLenOffset, addr)
s.store(types.Types[TINT], lenaddr, nl) s.store(types.Types[TINT], lenaddr, nl)
@ -2912,10 +2916,10 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value {
} }
} }
p = s.variable(&ptrVar, pt) // generates phi for ptr p = s.variable(ptrVar, pt) // generates phi for ptr
if !inplace { if !inplace {
nl = s.variable(&newlenVar, types.Types[TINT]) // generates phi for nl nl = s.variable(newlenVar, types.Types[TINT]) // generates phi for nl
c = s.variable(&capVar, types.Types[TINT]) // generates phi for cap c = s.variable(capVar, types.Types[TINT]) // generates phi for cap
} }
p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l) p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l)
for i, arg := range args { for i, arg := range args {
@ -2927,13 +2931,13 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value {
} }
} }
delete(s.vars, &ptrVar) delete(s.vars, ptrVar)
if inplace { if inplace {
delete(s.vars, &lenVar) delete(s.vars, lenVar)
return nil return nil
} }
delete(s.vars, &newlenVar) delete(s.vars, newlenVar)
delete(s.vars, &capVar) delete(s.vars, capVar)
// make result // make result
return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c) return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c)
} }
@ -3074,7 +3078,7 @@ func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask)
// If this assignment clobbers an entire local variable, then emit // If this assignment clobbers an entire local variable, then emit
// OpVarDef so liveness analysis knows the variable is redefined. // OpVarDef so liveness analysis knows the variable is redefined.
if base := clobberBase(left); base.Op == ONAME && base.Class() != PEXTERN && skip == 0 { if base := clobberBase(left); base.Op == ONAME && base.Class() != PEXTERN && skip == 0 {
s.vars[&memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !base.IsAutoTmp()) s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !base.IsAutoTmp())
} }
// Left is not ssa-able. Compute its address. // Left is not ssa-able. Compute its address.
@ -3332,7 +3336,7 @@ func init() {
add("runtime", "KeepAlive", add("runtime", "KeepAlive",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0]) data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem()) s.vars[memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem())
return nil return nil
}, },
all...) all...)
@ -3380,79 +3384,79 @@ func init() {
addF("runtime/internal/atomic", "Load", addF("runtime/internal/atomic", "Load",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem()) v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
}, },
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Load8", addF("runtime/internal/atomic", "Load8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[TUINT8], types.TypeMem), args[0], s.mem()) v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[TUINT8], types.TypeMem), args[0], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT8], v) return s.newValue1(ssa.OpSelect0, types.Types[TUINT8], v)
}, },
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Load64", addF("runtime/internal/atomic", "Load64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem()) v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
}, },
sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "LoadAcq", addF("runtime/internal/atomic", "LoadAcq",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem()) v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
}, },
sys.PPC64, sys.S390X) sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "LoadAcq64", addF("runtime/internal/atomic", "LoadAcq64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem()) v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
}, },
sys.PPC64) sys.PPC64)
addF("runtime/internal/atomic", "Loadp", addF("runtime/internal/atomic", "Loadp",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem()) v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v) return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v)
}, },
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Store", addF("runtime/internal/atomic", "Store",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem()) s.vars[memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem())
return nil return nil
}, },
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Store8", addF("runtime/internal/atomic", "Store8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem()) s.vars[memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem())
return nil return nil
}, },
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Store64", addF("runtime/internal/atomic", "Store64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem()) s.vars[memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem())
return nil return nil
}, },
sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "StorepNoWB", addF("runtime/internal/atomic", "StorepNoWB",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem()) s.vars[memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem())
return nil return nil
}, },
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.RISCV64, sys.S390X) sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "StoreRel", addF("runtime/internal/atomic", "StoreRel",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem()) s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem())
return nil return nil
}, },
sys.PPC64, sys.S390X) sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "StoreRel64", addF("runtime/internal/atomic", "StoreRel64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem()) s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem())
return nil return nil
}, },
sys.PPC64) sys.PPC64)
@ -3460,14 +3464,14 @@ func init() {
addF("runtime/internal/atomic", "Xchg", addF("runtime/internal/atomic", "Xchg",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem()) v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
}, },
sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Xchg64", addF("runtime/internal/atomic", "Xchg64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem()) v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
}, },
sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
@ -3512,7 +3516,7 @@ func init() {
atomicXchgXaddEmitterARM64 := func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType) { atomicXchgXaddEmitterARM64 := func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
v := s.newValue3(op, types.NewTuple(types.Types[typ], types.TypeMem), args[0], args[1], s.mem()) v := s.newValue3(op, types.NewTuple(types.Types[typ], types.TypeMem), args[0], args[1], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v) s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
} }
addF("runtime/internal/atomic", "Xchg", addF("runtime/internal/atomic", "Xchg",
@ -3525,14 +3529,14 @@ func init() {
addF("runtime/internal/atomic", "Xadd", addF("runtime/internal/atomic", "Xadd",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem()) v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
}, },
sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Xadd64", addF("runtime/internal/atomic", "Xadd64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem()) v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
}, },
sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
@ -3546,29 +3550,29 @@ func init() {
addF("runtime/internal/atomic", "Cas", addF("runtime/internal/atomic", "Cas",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v) return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
}, },
sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Cas64", addF("runtime/internal/atomic", "Cas64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v) return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
}, },
sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "CasRel", addF("runtime/internal/atomic", "CasRel",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v) return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
}, },
sys.PPC64) sys.PPC64)
atomicCasEmitterARM64 := func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType) { atomicCasEmitterARM64 := func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
v := s.newValue4(op, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) v := s.newValue4(op, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v) s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
} }
@ -3581,31 +3585,31 @@ func init() {
addF("runtime/internal/atomic", "And8", addF("runtime/internal/atomic", "And8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem()) s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem())
return nil return nil
}, },
sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X) sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "And", addF("runtime/internal/atomic", "And",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem()) s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem())
return nil return nil
}, },
sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X) sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "Or8", addF("runtime/internal/atomic", "Or8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem()) s.vars[memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem())
return nil return nil
}, },
sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X) sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "Or", addF("runtime/internal/atomic", "Or",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem()) s.vars[memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem())
return nil return nil
}, },
sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X) sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X)
atomicAndOrEmitterARM64 := func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType) { atomicAndOrEmitterARM64 := func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
s.vars[&memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem()) s.vars[memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem())
} }
addF("runtime/internal/atomic", "And8", addF("runtime/internal/atomic", "And8",
@ -4274,8 +4278,8 @@ func (s *state) openDeferRecord(n *Node) {
// Update deferBits only after evaluation and storage to stack of // Update deferBits only after evaluation and storage to stack of
// args/receiver/interface is successful. // args/receiver/interface is successful.
bitvalue := s.constInt8(types.Types[TUINT8], 1<<uint(index)) bitvalue := s.constInt8(types.Types[TUINT8], 1<<uint(index))
newDeferBits := s.newValue2(ssa.OpOr8, types.Types[TUINT8], s.variable(&deferBitsVar, types.Types[TUINT8]), bitvalue) newDeferBits := s.newValue2(ssa.OpOr8, types.Types[TUINT8], s.variable(deferBitsVar, types.Types[TUINT8]), bitvalue)
s.vars[&deferBitsVar] = newDeferBits s.vars[deferBitsVar] = newDeferBits
s.store(types.Types[TUINT8], s.deferBitsAddr, newDeferBits) s.store(types.Types[TUINT8], s.deferBitsAddr, newDeferBits)
} }
@ -4304,15 +4308,15 @@ func (s *state) openDeferSave(n *Node, t *types.Type, val *ssa.Value) *ssa.Value
// declared in the entry block, so that it will be live for the // declared in the entry block, so that it will be live for the
// defer exit code (which will actually access it only if the // defer exit code (which will actually access it only if the
// associated defer call has been activated). // associated defer call has been activated).
s.defvars[s.f.Entry.ID][&memVar] = s.entryNewValue1A(ssa.OpVarDef, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][&memVar]) s.defvars[s.f.Entry.ID][memVar] = s.entryNewValue1A(ssa.OpVarDef, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][memVar])
s.defvars[s.f.Entry.ID][&memVar] = s.entryNewValue1A(ssa.OpVarLive, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][&memVar]) s.defvars[s.f.Entry.ID][memVar] = s.entryNewValue1A(ssa.OpVarLive, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][memVar])
addrArgTemp = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(argTemp.Type), argTemp, s.sp, s.defvars[s.f.Entry.ID][&memVar]) addrArgTemp = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(argTemp.Type), argTemp, s.sp, s.defvars[s.f.Entry.ID][memVar])
} else { } else {
// Special case if we're still in the entry block. We can't use // Special case if we're still in the entry block. We can't use
// the above code, since s.defvars[s.f.Entry.ID] isn't defined // the above code, since s.defvars[s.f.Entry.ID] isn't defined
// until we end the entry block with s.endBlock(). // until we end the entry block with s.endBlock().
s.vars[&memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, argTemp, s.mem(), false) s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, argTemp, s.mem(), false)
s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argTemp, s.mem(), false) s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argTemp, s.mem(), false)
addrArgTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(argTemp.Type), argTemp, s.sp, s.mem(), false) addrArgTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(argTemp.Type), argTemp, s.sp, s.mem(), false)
} }
if t.HasPointers() { if t.HasPointers() {
@ -4352,7 +4356,7 @@ func (s *state) openDeferExit() {
bCond := s.f.NewBlock(ssa.BlockPlain) bCond := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain) bEnd := s.f.NewBlock(ssa.BlockPlain)
deferBits := s.variable(&deferBitsVar, types.Types[TUINT8]) deferBits := s.variable(deferBitsVar, types.Types[TUINT8])
// Generate code to check if the bit associated with the current // Generate code to check if the bit associated with the current
// defer is set. // defer is set.
bitval := s.constInt8(types.Types[TUINT8], 1<<uint(i)) bitval := s.constInt8(types.Types[TUINT8], 1<<uint(i))
@ -4373,7 +4377,7 @@ func (s *state) openDeferExit() {
s.store(types.Types[TUINT8], s.deferBitsAddr, maskedval) s.store(types.Types[TUINT8], s.deferBitsAddr, maskedval)
// Use this value for following tests, so we keep previous // Use this value for following tests, so we keep previous
// bits cleared. // bits cleared.
s.vars[&deferBitsVar] = maskedval s.vars[deferBitsVar] = maskedval
// Generate code to call the function call of the defer, using the // Generate code to call the function call of the defer, using the
// closure/receiver/args that were stored in argtmps at the point // closure/receiver/args that were stored in argtmps at the point
@ -4443,25 +4447,25 @@ func (s *state) openDeferExit() {
} }
call.AuxInt = stksize call.AuxInt = stksize
if testLateExpansion { if testLateExpansion {
s.vars[&memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call) s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
} else { } else {
s.vars[&memVar] = call s.vars[memVar] = call
} }
// Make sure that the stack slots with pointers are kept live // Make sure that the stack slots with pointers are kept live
// through the call (which is a pre-emption point). Also, we will // through the call (which is a pre-emption point). Also, we will
// use the first call of the last defer exit to compute liveness // use the first call of the last defer exit to compute liveness
// for the deferreturn, so we want all stack slots to be live. // for the deferreturn, so we want all stack slots to be live.
if r.closureNode != nil { if r.closureNode != nil {
s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false) s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false)
} }
if r.rcvrNode != nil { if r.rcvrNode != nil {
if r.rcvrNode.Type.HasPointers() { if r.rcvrNode.Type.HasPointers() {
s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.rcvrNode, s.mem(), false) s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.rcvrNode, s.mem(), false)
} }
} }
for _, argNode := range r.argNodes { for _, argNode := range r.argNodes {
if argNode.Type.HasPointers() { if argNode.Type.HasPointers() {
s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argNode, s.mem(), false) s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argNode, s.mem(), false)
} }
} }
@ -4554,7 +4558,7 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
t := deferstruct(stksize) t := deferstruct(stksize)
d := tempAt(n.Pos, s.curfn, t) d := tempAt(n.Pos, s.curfn, t)
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, d, s.mem()) s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, d, s.mem())
addr := s.addr(d) addr := s.addr(d)
// Must match reflect.go:deferstruct and src/runtime/runtime2.go:_defer. // Must match reflect.go:deferstruct and src/runtime/runtime2.go:_defer.
@ -4729,9 +4733,9 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
} }
if testLateExpansion { if testLateExpansion {
s.prevCall = call s.prevCall = call
s.vars[&memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call) s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
} else { } else {
s.vars[&memVar] = call s.vars[memVar] = call
} }
// Insert OVARLIVE nodes // Insert OVARLIVE nodes
s.stmtList(n.Nbody) s.stmtList(n.Nbody)
@ -5204,10 +5208,10 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args .
callArgs = append(callArgs, s.mem()) callArgs = append(callArgs, s.mem())
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
call.AddArgs(callArgs...) call.AddArgs(callArgs...)
s.vars[&memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call) s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
} else { } else {
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem()) call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
s.vars[&memVar] = call s.vars[memVar] = call
} }
if !returns { if !returns {
@ -5257,7 +5261,7 @@ func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask,
if skip == 0 && (!t.HasPointers() || ssa.IsStackAddr(left)) { if skip == 0 && (!t.HasPointers() || ssa.IsStackAddr(left)) {
// Known to not have write barrier. Store the whole type. // Known to not have write barrier. Store the whole type.
s.vars[&memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, left, right, s.mem(), leftIsStmt) s.vars[memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, left, right, s.mem(), leftIsStmt)
return return
} }
@ -5880,12 +5884,12 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
// nonempty -> empty // nonempty -> empty
// Need to load type from itab // Need to load type from itab
off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab) off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab)
s.vars[&typVar] = s.load(byteptr, off) s.vars[typVar] = s.load(byteptr, off)
s.endBlock() s.endBlock()
// itab is nil, might as well use that as the nil result. // itab is nil, might as well use that as the nil result.
s.startBlock(bFail) s.startBlock(bFail)
s.vars[&typVar] = itab s.vars[typVar] = itab
s.endBlock() s.endBlock()
// Merge point. // Merge point.
@ -5894,9 +5898,9 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
bFail.AddEdgeTo(bEnd) bFail.AddEdgeTo(bEnd)
s.startBlock(bEnd) s.startBlock(bEnd)
idata := s.newValue1(ssa.OpIData, n.Type, iface) idata := s.newValue1(ssa.OpIData, n.Type, iface)
res = s.newValue2(ssa.OpIMake, n.Type, s.variable(&typVar, byteptr), idata) res = s.newValue2(ssa.OpIMake, n.Type, s.variable(typVar, byteptr), idata)
resok = cond resok = cond
delete(s.vars, &typVar) delete(s.vars, typVar)
return return
} }
// converting to a nonempty interface needs a runtime call. // converting to a nonempty interface needs a runtime call.
@ -5942,7 +5946,7 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
// unSSAable type, use temporary. // unSSAable type, use temporary.
// TODO: get rid of some of these temporaries. // TODO: get rid of some of these temporaries.
tmp = tempAt(n.Pos, s.curfn, n.Type) tmp = tempAt(n.Pos, s.curfn, n.Type)
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem()) s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem())
addr = s.addr(tmp) addr = s.addr(tmp)
} }
@ -5981,7 +5985,7 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
bEnd := s.f.NewBlock(ssa.BlockPlain) bEnd := s.f.NewBlock(ssa.BlockPlain)
// Note that we need a new valVar each time (unlike okVar where we can // Note that we need a new valVar each time (unlike okVar where we can
// reuse the variable) because it might have a different type every time. // reuse the variable) because it might have a different type every time.
valVar := &Node{Op: ONAME, Sym: &types.Sym{Name: "val"}} valVar := ssaMarker("val")
// type assertion succeeded // type assertion succeeded
s.startBlock(bOk) s.startBlock(bOk)
@ -5996,7 +6000,7 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface) p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
s.move(n.Type, addr, p) s.move(n.Type, addr, p)
} }
s.vars[&okVar] = s.constBool(true) s.vars[okVar] = s.constBool(true)
s.endBlock() s.endBlock()
bOk.AddEdgeTo(bEnd) bOk.AddEdgeTo(bEnd)
@ -6007,7 +6011,7 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
} else { } else {
s.zero(n.Type, addr) s.zero(n.Type, addr)
} }
s.vars[&okVar] = s.constBool(false) s.vars[okVar] = s.constBool(false)
s.endBlock() s.endBlock()
bFail.AddEdgeTo(bEnd) bFail.AddEdgeTo(bEnd)
@ -6018,10 +6022,10 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
delete(s.vars, valVar) delete(s.vars, valVar)
} else { } else {
res = s.load(n.Type, addr) res = s.load(n.Type, addr)
s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp, s.mem()) s.vars[memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp, s.mem())
} }
resok = s.variable(&okVar, types.Types[TBOOL]) resok = s.variable(okVar, types.Types[types.TBOOL])
delete(s.vars, &okVar) delete(s.vars, okVar)
return res, resok return res, resok
} }
@ -6049,12 +6053,12 @@ func (s *state) variable(name *Node, t *types.Type) *ssa.Value {
} }
func (s *state) mem() *ssa.Value { func (s *state) mem() *ssa.Value {
return s.variable(&memVar, types.TypeMem) return s.variable(memVar, types.TypeMem)
} }
func (s *state) addNamedValue(n *Node, v *ssa.Value) { func (s *state) addNamedValue(n *Node, v *ssa.Value) {
if n.Class() == Pxxx { if n.Class() == Pxxx {
// Don't track our marker nodes (&memVar etc.). // Don't track our marker nodes (memVar etc.).
return return
} }
if n.IsAutoTmp() { if n.IsAutoTmp() {
@ -7064,17 +7068,9 @@ func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t
} }
s := &types.Sym{Name: node.Sym.Name + suffix, Pkg: localpkg} s := &types.Sym{Name: node.Sym.Name + suffix, Pkg: localpkg}
n := newnamel(parent.N.(*Node).Pos, s)
n := &Node{
Name: new(Name),
Op: ONAME,
Pos: parent.N.(*Node).Pos,
}
n.Orig = n
s.Def = asTypesNode(n) s.Def = asTypesNode(n)
asNode(s.Def).Name.SetUsed(true) asNode(s.Def).Name.SetUsed(true)
n.Sym = s
n.Type = t n.Type = t
n.SetClass(PAUTO) n.SetClass(PAUTO)
n.Esc = EscNever n.Esc = EscNever

View file

@ -1692,8 +1692,8 @@ func typecheck1(n *Node, top int) (res *Node) {
n.Type = nil n.Type = nil
return n return n
} }
var why string op, why := convertop(n.Left.Op == OLITERAL, t, n.Type)
n.Op, why = convertop(n.Left.Op == OLITERAL, t, n.Type) n.Op = op
if n.Op == OXXX { if n.Op == OXXX {
if !n.Diag() && !n.Type.Broke() && !n.Left.Diag() { if !n.Diag() && !n.Type.Broke() && !n.Left.Diag() {
yyerror("cannot convert %L to type %v%s", n.Left, n.Type, why) yyerror("cannot convert %L to type %v%s", n.Left, n.Type, why)
@ -3021,7 +3021,8 @@ func typecheckarraylit(elemType *types.Type, bound int64, elts []*Node, ctx stri
var key, length int64 var key, length int64
for i, elt := range elts { for i, elt := range elts {
setlineno(elt) setlineno(elt)
vp := &elts[i] r := elts[i]
var kv *Node
if elt.Op == OKEY { if elt.Op == OKEY {
elt.Left = typecheck(elt.Left, ctxExpr) elt.Left = typecheck(elt.Left, ctxExpr)
key = indexconst(elt.Left) key = indexconst(elt.Left)
@ -3036,13 +3037,18 @@ func typecheckarraylit(elemType *types.Type, bound int64, elts []*Node, ctx stri
} }
key = -(1 << 30) // stay negative for a while key = -(1 << 30) // stay negative for a while
} }
vp = &elt.Right kv = elt
r = elt.Right
} }
r := *vp
r = pushtype(r, elemType) r = pushtype(r, elemType)
r = typecheck(r, ctxExpr) r = typecheck(r, ctxExpr)
*vp = assignconv(r, elemType, ctx) r = assignconv(r, elemType, ctx)
if kv != nil {
kv.Right = r
} else {
elts[i] = r
}
if key >= 0 { if key >= 0 {
if indices != nil { if indices != nil {