go/src/cmd/compile/internal/gc/alg.go
Russ Cox 6f27d29be0 [dev.regabi] cmd/compile: remove ir.Nod [generated]
Rewrite all uses of ir.Nod and friends to call the IR constructors directly.
This gives the results a more specific type and will play nicely with
introduction of more specific types throughout the code in a followup CL.

Passes buildall w/ toolstash -cmp.

[git-generate]
cd src/cmd/compile/internal/gc
rf '
ex . ../ir {
	import "cmd/compile/internal/ir"
	import "cmd/compile/internal/types"
	import "cmd/compile/internal/syntax"
	import "cmd/internal/src"

	var p *noder
	var orig syntax.Node
	var op ir.Op
	var l, r ir.Node
	var sym *types.Sym
	p.nod(orig, op, l, r) -> ir.NodAt(p.pos(orig), op, l, r)
	p.nodSym(orig, op, l, sym) -> nodlSym(p.pos(orig), op, l, sym)

	var xpos src.XPos
	var ns ir.Nodes
	npos(xpos, nodSym(op, l, sym)) -> nodlSym(xpos, op, l, sym)
	npos(xpos, liststmt(ns)) -> ir.NewBlockStmt(xpos, ns)
}
ex . ../ir {
	import "cmd/compile/internal/base"
	import "cmd/compile/internal/ir"
	import "cmd/compile/internal/types"

	var op ir.Op
	var l, r ir.Node
	ir.Nod(op, l, r) -> ir.NodAt(base.Pos, op, l, r)

	var sym *types.Sym
	nodSym(op, l, sym) -> nodlSym(base.Pos, op, l, sym)
}
ex . ../ir {
	import "cmd/compile/internal/ir"
	import "cmd/internal/src"

	# rf overlapping match handling is not quite good enough
	# for certain nested rewrites, so handle these two - which often contain other ir.NodAt calls - early.
	var l, r ir.Node
	var xpos src.XPos
	ir.NodAt(xpos, ir.OAS, l, r)              -> ir.NewAssignStmt(xpos, l, r)
	ir.NodAt(xpos, ir.OIF, l, nil)            -> ir.NewIfStmt(xpos, l, nil, nil)
}
ex . ../ir {
	import "cmd/compile/internal/ir"
	import "cmd/compile/internal/types"
	import "cmd/internal/src"

	var l, r ir.Node
	var sym *types.Sym
	var xpos src.XPos

	nodlSym(xpos, ir.ODOT, l, sym)                   -> ir.NewSelectorExpr(xpos, ir.ODOT, l, sym)
	nodlSym(xpos, ir.OXDOT, l, sym)                  -> ir.NewSelectorExpr(xpos, ir.OXDOT, l, sym)
	nodlSym(xpos, ir.ODOTPTR, l, sym)                -> ir.NewSelectorExpr(xpos, ir.ODOTPTR, l, sym)
	nodlSym(xpos, ir.OGOTO, nil, sym)                -> ir.NewBranchStmt(xpos, ir.OGOTO, sym)
	nodlSym(xpos, ir.ORETJMP, nil, sym)              -> ir.NewBranchStmt(xpos, ir.ORETJMP, sym)
	nodlSym(xpos, ir.OLABEL, nil, sym)               -> ir.NewLabelStmt(xpos, sym)
	nodlSym(xpos, ir.OSTRUCTKEY, l, sym)             -> ir.NewStructKeyExpr(xpos, sym, l)

	ir.NodAt(xpos, ir.OADD, l, r)             -> ir.NewBinaryExpr(xpos, ir.OADD, l, r)
	ir.NodAt(xpos, ir.OAND, l, r)             -> ir.NewBinaryExpr(xpos, ir.OAND, l, r)
	ir.NodAt(xpos, ir.OANDNOT, l, r)          -> ir.NewBinaryExpr(xpos, ir.OANDNOT, l, r)
	ir.NodAt(xpos, ir.ODIV, l, r)             -> ir.NewBinaryExpr(xpos, ir.ODIV, l, r)
	ir.NodAt(xpos, ir.OEQ, l, r)              -> ir.NewBinaryExpr(xpos, ir.OEQ, l, r)
	ir.NodAt(xpos, ir.OGE, l, r)              -> ir.NewBinaryExpr(xpos, ir.OGE, l, r)
	ir.NodAt(xpos, ir.OGT, l, r)              -> ir.NewBinaryExpr(xpos, ir.OGT, l, r)
	ir.NodAt(xpos, ir.OLE, l, r)              -> ir.NewBinaryExpr(xpos, ir.OLE, l, r)
	ir.NodAt(xpos, ir.OLSH, l, r)             -> ir.NewBinaryExpr(xpos, ir.OLSH, l, r)
	ir.NodAt(xpos, ir.OLT, l, r)              -> ir.NewBinaryExpr(xpos, ir.OLT, l, r)
	ir.NodAt(xpos, ir.OMOD, l, r)             -> ir.NewBinaryExpr(xpos, ir.OMOD, l, r)
	ir.NodAt(xpos, ir.OMUL, l, r)             -> ir.NewBinaryExpr(xpos, ir.OMUL, l, r)
	ir.NodAt(xpos, ir.ONE, l, r)              -> ir.NewBinaryExpr(xpos, ir.ONE, l, r)
	ir.NodAt(xpos, ir.OOR, l, r)              -> ir.NewBinaryExpr(xpos, ir.OOR, l, r)
	ir.NodAt(xpos, ir.ORSH, l, r)             -> ir.NewBinaryExpr(xpos, ir.ORSH, l, r)
	ir.NodAt(xpos, ir.OSUB, l, r)             -> ir.NewBinaryExpr(xpos, ir.OSUB, l, r)
	ir.NodAt(xpos, ir.OXOR, l, r)             -> ir.NewBinaryExpr(xpos, ir.OXOR, l, r)
	ir.NodAt(xpos, ir.OCOPY, l, r)            -> ir.NewBinaryExpr(xpos, ir.OCOPY, l, r)
	ir.NodAt(xpos, ir.OCOMPLEX, l, r)         -> ir.NewBinaryExpr(xpos, ir.OCOMPLEX, l, r)
	ir.NodAt(xpos, ir.OEFACE, l, r)           -> ir.NewBinaryExpr(xpos, ir.OEFACE, l, r)
	ir.NodAt(xpos, ir.OADDR, l, nil)          -> ir.NewAddrExpr(xpos, l)
	ir.NodAt(xpos, ir.OADDSTR, nil, nil)      -> ir.NewAddStringExpr(xpos, nil)
	ir.NodAt(xpos, ir.OANDAND, l, r)          -> ir.NewLogicalExpr(xpos, ir.OANDAND, l, r)
	ir.NodAt(xpos, ir.OOROR, l, r)            -> ir.NewLogicalExpr(xpos, ir.OOROR, l, r)
	ir.NodAt(xpos, ir.OARRAYLIT, nil, nil)    -> ir.NewCompLitExpr(xpos, ir.OARRAYLIT, nil, nil)
	ir.NodAt(xpos, ir.OCOMPLIT, nil, nil)     -> ir.NewCompLitExpr(xpos, ir.OCOMPLIT, nil, nil)
	ir.NodAt(xpos, ir.OMAPLIT, nil, nil)      -> ir.NewCompLitExpr(xpos, ir.OMAPLIT, nil, nil)
	ir.NodAt(xpos, ir.OSTRUCTLIT, nil, nil)   -> ir.NewCompLitExpr(xpos, ir.OSTRUCTLIT, nil, nil)
	ir.NodAt(xpos, ir.OSLICELIT, nil, nil)    -> ir.NewCompLitExpr(xpos, ir.OSLICELIT, nil, nil)
	ir.NodAt(xpos, ir.OARRAYLIT, nil, r)      -> ir.NewCompLitExpr(xpos, ir.OARRAYLIT, r.(ir.Ntype), nil)
	ir.NodAt(xpos, ir.OCOMPLIT, nil, r)       -> ir.NewCompLitExpr(xpos, ir.OCOMPLIT, r.(ir.Ntype), nil)
	ir.NodAt(xpos, ir.OMAPLIT, nil, r)        -> ir.NewCompLitExpr(xpos, ir.OMAPLIT, r.(ir.Ntype), nil)
	ir.NodAt(xpos, ir.OSTRUCTLIT, nil, r)     -> ir.NewCompLitExpr(xpos, ir.OSTRUCTLIT, r.(ir.Ntype), nil)
	ir.NodAt(xpos, ir.OSLICELIT, nil, r)      -> ir.NewCompLitExpr(xpos, ir.OSLICELIT, r.(ir.Ntype), nil)
	ir.NodAt(xpos, ir.OAS2, nil, nil)         -> ir.NewAssignListStmt(xpos, ir.OAS2, nil, nil)
	ir.NodAt(xpos, ir.OAS2DOTTYPE, nil, nil)  -> ir.NewAssignListStmt(xpos, ir.OAS2DOTTYPE, nil, nil)
	ir.NodAt(xpos, ir.OAS2FUNC, nil, nil)     -> ir.NewAssignListStmt(xpos, ir.OAS2FUNC, nil, nil)
	ir.NodAt(xpos, ir.OAS2MAPR, nil, nil)     -> ir.NewAssignListStmt(xpos, ir.OAS2MAPR, nil, nil)
	ir.NodAt(xpos, ir.OAS2RECV, nil, nil)     -> ir.NewAssignListStmt(xpos, ir.OAS2RECV, nil, nil)
	ir.NodAt(xpos, ir.OSELRECV2, nil, nil)    -> ir.NewAssignListStmt(xpos, ir.OSELRECV2, nil, nil)
	ir.NodAt(xpos, ir.OASOP, l, r)            -> ir.NewAssignOpStmt(xpos, ir.OXXX, l, r)
	ir.NodAt(xpos, ir.OBITNOT, l, nil)        -> ir.NewUnaryExpr(xpos, ir.OBITNOT, l)
	ir.NodAt(xpos, ir.ONEG, l, nil)           -> ir.NewUnaryExpr(xpos, ir.ONEG, l)
	ir.NodAt(xpos, ir.ONOT, l, nil)           -> ir.NewUnaryExpr(xpos, ir.ONOT, l)
	ir.NodAt(xpos, ir.OPLUS, l, nil)          -> ir.NewUnaryExpr(xpos, ir.OPLUS, l)
	ir.NodAt(xpos, ir.ORECV, l, nil)          -> ir.NewUnaryExpr(xpos, ir.ORECV, l)
	ir.NodAt(xpos, ir.OALIGNOF, l, nil)       -> ir.NewUnaryExpr(xpos, ir.OALIGNOF, l)
	ir.NodAt(xpos, ir.OCAP, l, nil)           -> ir.NewUnaryExpr(xpos, ir.OCAP, l)
	ir.NodAt(xpos, ir.OCLOSE, l, nil)         -> ir.NewUnaryExpr(xpos, ir.OCLOSE, l)
	ir.NodAt(xpos, ir.OIMAG, l, nil)          -> ir.NewUnaryExpr(xpos, ir.OIMAG, l)
	ir.NodAt(xpos, ir.OLEN, l, nil)           -> ir.NewUnaryExpr(xpos, ir.OLEN, l)
	ir.NodAt(xpos, ir.ONEW, l, nil)           -> ir.NewUnaryExpr(xpos, ir.ONEW, l)
	ir.NodAt(xpos, ir.ONEWOBJ, l, nil)        -> ir.NewUnaryExpr(xpos, ir.ONEWOBJ, l)
	ir.NodAt(xpos, ir.OOFFSETOF, l, nil)      -> ir.NewUnaryExpr(xpos, ir.OOFFSETOF, l)
	ir.NodAt(xpos, ir.OPANIC, l, nil)         -> ir.NewUnaryExpr(xpos, ir.OPANIC, l)
	ir.NodAt(xpos, ir.OREAL, l, nil)          -> ir.NewUnaryExpr(xpos, ir.OREAL, l)
	ir.NodAt(xpos, ir.OSIZEOF, l, nil)        -> ir.NewUnaryExpr(xpos, ir.OSIZEOF, l)
	ir.NodAt(xpos, ir.OCHECKNIL, l, nil)      -> ir.NewUnaryExpr(xpos, ir.OCHECKNIL, l)
	ir.NodAt(xpos, ir.OCFUNC, l, nil)         -> ir.NewUnaryExpr(xpos, ir.OCFUNC, l)
	ir.NodAt(xpos, ir.OIDATA, l, nil)         -> ir.NewUnaryExpr(xpos, ir.OIDATA, l)
	ir.NodAt(xpos, ir.OITAB, l, nil)          -> ir.NewUnaryExpr(xpos, ir.OITAB, l)
	ir.NodAt(xpos, ir.OSPTR, l, nil)          -> ir.NewUnaryExpr(xpos, ir.OSPTR, l)
	ir.NodAt(xpos, ir.OVARDEF, l, nil)        -> ir.NewUnaryExpr(xpos, ir.OVARDEF, l)
	ir.NodAt(xpos, ir.OVARKILL, l, nil)       -> ir.NewUnaryExpr(xpos, ir.OVARKILL, l)
	ir.NodAt(xpos, ir.OVARLIVE, l, nil)       -> ir.NewUnaryExpr(xpos, ir.OVARLIVE, l)
	ir.NodAt(xpos, ir.OBLOCK, nil, nil)       -> ir.NewBlockStmt(xpos, nil)
	ir.NodAt(xpos, ir.OBREAK, nil, nil)       -> ir.NewBranchStmt(xpos, ir.OBREAK, nil)
	ir.NodAt(xpos, ir.OCONTINUE, nil, nil)    -> ir.NewBranchStmt(xpos, ir.OCONTINUE, nil)
	ir.NodAt(xpos, ir.OFALL, nil, nil)        -> ir.NewBranchStmt(xpos, ir.OFALL, nil)
	ir.NodAt(xpos, ir.OGOTO, nil, nil)        -> ir.NewBranchStmt(xpos, ir.OGOTO, nil)
	ir.NodAt(xpos, ir.ORETJMP, nil, nil)      -> ir.NewBranchStmt(xpos, ir.ORETJMP, nil)
	ir.NodAt(xpos, ir.OCALL, l, nil)          -> ir.NewCallExpr(xpos, ir.OCALL, l, nil)
	ir.NodAt(xpos, ir.OCALLFUNC, l, nil)      -> ir.NewCallExpr(xpos, ir.OCALLFUNC, l, nil)
	ir.NodAt(xpos, ir.OCALLINTER, l, nil)     -> ir.NewCallExpr(xpos, ir.OCALLINTER, l, nil)
	ir.NodAt(xpos, ir.OCALLMETH, l, nil)      -> ir.NewCallExpr(xpos, ir.OCALLMETH, l, nil)
	ir.NodAt(xpos, ir.OAPPEND, l, nil)        -> ir.NewCallExpr(xpos, ir.OAPPEND, l, nil)
	ir.NodAt(xpos, ir.ODELETE, l, nil)        -> ir.NewCallExpr(xpos, ir.ODELETE, l, nil)
	ir.NodAt(xpos, ir.OGETG, l, nil)          -> ir.NewCallExpr(xpos, ir.OGETG, l, nil)
	ir.NodAt(xpos, ir.OMAKE, l, nil)          -> ir.NewCallExpr(xpos, ir.OMAKE, l, nil)
	ir.NodAt(xpos, ir.OPRINT, l, nil)         -> ir.NewCallExpr(xpos, ir.OPRINT, l, nil)
	ir.NodAt(xpos, ir.OPRINTN, l, nil)        -> ir.NewCallExpr(xpos, ir.OPRINTN, l, nil)
	ir.NodAt(xpos, ir.ORECOVER, l, nil)       -> ir.NewCallExpr(xpos, ir.ORECOVER, l, nil)
	ir.NodAt(xpos, ir.OCASE, nil, nil)        -> ir.NewCaseStmt(xpos, nil, nil)
	ir.NodAt(xpos, ir.OCONV, l, nil)          -> ir.NewConvExpr(xpos, ir.OCONV, nil, l)
	ir.NodAt(xpos, ir.OCONVIFACE, l, nil)     -> ir.NewConvExpr(xpos, ir.OCONVIFACE, nil, l)
	ir.NodAt(xpos, ir.OCONVNOP, l, nil)       -> ir.NewConvExpr(xpos, ir.OCONVNOP, nil, l)
	ir.NodAt(xpos, ir.ORUNESTR, l, nil)       -> ir.NewConvExpr(xpos, ir.ORUNESTR, nil, l)
	ir.NodAt(xpos, ir.ODCL, l, nil)           -> ir.NewDecl(xpos, ir.ODCL, l)
	ir.NodAt(xpos, ir.ODCLCONST, l, nil)      -> ir.NewDecl(xpos, ir.ODCLCONST, l)
	ir.NodAt(xpos, ir.ODCLTYPE, l, nil)       -> ir.NewDecl(xpos, ir.ODCLTYPE, l)
	ir.NodAt(xpos, ir.ODCLFUNC, nil, nil)     -> ir.NewFunc(xpos)
	ir.NodAt(xpos, ir.ODEFER, l, nil)         -> ir.NewGoDeferStmt(xpos, ir.ODEFER, l)
	ir.NodAt(xpos, ir.OGO, l, nil)            -> ir.NewGoDeferStmt(xpos, ir.OGO, l)
	ir.NodAt(xpos, ir.ODEREF, l, nil)         -> ir.NewStarExpr(xpos, l)
	ir.NodAt(xpos, ir.ODOT, l, nil)           -> ir.NewSelectorExpr(xpos, ir.ODOT, l, nil)
	ir.NodAt(xpos, ir.ODOTPTR, l, nil)        -> ir.NewSelectorExpr(xpos, ir.ODOTPTR, l, nil)
	ir.NodAt(xpos, ir.ODOTMETH, l, nil)       -> ir.NewSelectorExpr(xpos, ir.ODOTMETH, l, nil)
	ir.NodAt(xpos, ir.ODOTINTER, l, nil)      -> ir.NewSelectorExpr(xpos, ir.ODOTINTER, l, nil)
	ir.NodAt(xpos, ir.OXDOT, l, nil)          -> ir.NewSelectorExpr(xpos, ir.OXDOT, l, nil)
	ir.NodAt(xpos, ir.ODOTTYPE, l, nil)       -> ir.NewTypeAssertExpr(xpos, l, nil)
	ir.NodAt(xpos, ir.ODOTTYPE, l, r)         -> ir.NewTypeAssertExpr(xpos, l, r.(ir.Ntype))
	ir.NodAt(xpos, ir.OFOR, l, r)             -> ir.NewForStmt(xpos, nil, l, r, nil)
	ir.NodAt(xpos, ir.OINDEX, l, r)           -> ir.NewIndexExpr(xpos, l, r)
	ir.NodAt(xpos, ir.OINLMARK, nil, nil)     -> ir.NewInlineMarkStmt(xpos, types.BADWIDTH)
	ir.NodAt(xpos, ir.OKEY, l, r)             -> ir.NewKeyExpr(xpos, l, r)
	ir.NodAt(xpos, ir.OLABEL, nil, nil)       -> ir.NewLabelStmt(xpos, nil)
	ir.NodAt(xpos, ir.OMAKECHAN, l, r)        -> ir.NewMakeExpr(xpos, ir.OMAKECHAN, l, r)
	ir.NodAt(xpos, ir.OMAKEMAP, l, r)         -> ir.NewMakeExpr(xpos, ir.OMAKEMAP, l, r)
	ir.NodAt(xpos, ir.OMAKESLICE, l, r)       -> ir.NewMakeExpr(xpos, ir.OMAKESLICE, l, r)
	ir.NodAt(xpos, ir.OMAKESLICECOPY, l, r)   -> ir.NewMakeExpr(xpos, ir.OMAKESLICECOPY, l, r)
	ir.NodAt(xpos, ir.ONIL, nil, nil)         -> ir.NewNilExpr(xpos)
	ir.NodAt(xpos, ir.OPACK, nil, nil)        -> ir.NewPkgName(xpos, nil, nil)
	ir.NodAt(xpos, ir.OPAREN, l, nil)         -> ir.NewParenExpr(xpos, l)
	ir.NodAt(xpos, ir.ORANGE, nil, r)         -> ir.NewRangeStmt(xpos, nil, r, nil)
	ir.NodAt(xpos, ir.ORESULT, nil, nil)      -> ir.NewResultExpr(xpos, nil, types.BADWIDTH)
	ir.NodAt(xpos, ir.ORETURN, nil, nil)      -> ir.NewReturnStmt(xpos, nil)
	ir.NodAt(xpos, ir.OSELECT, nil, nil)      -> ir.NewSelectStmt(xpos, nil)
	ir.NodAt(xpos, ir.OSEND, l, r)            -> ir.NewSendStmt(xpos, l, r)
	ir.NodAt(xpos, ir.OSLICE, l, nil)         -> ir.NewSliceExpr(xpos, ir.OSLICE, l)
	ir.NodAt(xpos, ir.OSLICEARR, l, nil)      -> ir.NewSliceExpr(xpos, ir.OSLICEARR, l)
	ir.NodAt(xpos, ir.OSLICESTR, l, nil)      -> ir.NewSliceExpr(xpos, ir.OSLICESTR, l)
	ir.NodAt(xpos, ir.OSLICE3, l, nil)        -> ir.NewSliceExpr(xpos, ir.OSLICE3, l)
	ir.NodAt(xpos, ir.OSLICE3ARR, l, nil)     -> ir.NewSliceExpr(xpos, ir.OSLICE3ARR, l)
	ir.NodAt(xpos, ir.OSLICEHEADER, l, nil)   -> ir.NewSliceHeaderExpr(xpos, nil, l, nil, nil)
	ir.NodAt(xpos, ir.OSWITCH, l, nil)        -> ir.NewSwitchStmt(xpos, l, nil)
	ir.NodAt(xpos, ir.OINLCALL, nil, nil)     -> ir.NewInlinedCallExpr(xpos, nil, nil)
}

rm noder.nod noder.nodSym nodSym nodlSym ir.NodAt ir.Nod
'

Change-Id: Ibf1eb708de8463ae74ccc47d7966cc263a18295e
Reviewed-on: https://go-review.googlesource.com/c/go/+/277933
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-12-23 06:37:28 +00:00

929 lines
25 KiB
Go

// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/obj"
"fmt"
"sort"
)
// AlgKind describes the kind of algorithms used for comparing and
// hashing a Type.
type AlgKind int
//go:generate stringer -type AlgKind -trimprefix A
const (
// These values are known by runtime.
ANOEQ AlgKind = iota
AMEM0
AMEM8
AMEM16
AMEM32
AMEM64
AMEM128
ASTRING
AINTER
ANILINTER
AFLOAT32
AFLOAT64
ACPLX64
ACPLX128
// Type can be compared/hashed as regular memory.
AMEM AlgKind = 100
// Type needs special comparison/hashing functions.
ASPECIAL AlgKind = -1
)
// IsComparable reports whether t is a comparable type.
func IsComparable(t *types.Type) bool {
a, _ := algtype1(t)
return a != ANOEQ
}
// IsRegularMemory reports whether t can be compared/hashed as regular memory.
func IsRegularMemory(t *types.Type) bool {
a, _ := algtype1(t)
return a == AMEM
}
// IncomparableField returns an incomparable Field of struct Type t, if any.
func IncomparableField(t *types.Type) *types.Field {
for _, f := range t.FieldSlice() {
if !IsComparable(f.Type) {
return f
}
}
return nil
}
// EqCanPanic reports whether == on type t could panic (has an interface somewhere).
// t must be comparable.
func EqCanPanic(t *types.Type) bool {
switch t.Kind() {
default:
return false
case types.TINTER:
return true
case types.TARRAY:
return EqCanPanic(t.Elem())
case types.TSTRUCT:
for _, f := range t.FieldSlice() {
if !f.Sym.IsBlank() && EqCanPanic(f.Type) {
return true
}
}
return false
}
}
// algtype is like algtype1, except it returns the fixed-width AMEMxx variants
// instead of the general AMEM kind when possible.
func algtype(t *types.Type) AlgKind {
a, _ := algtype1(t)
if a == AMEM {
switch t.Width {
case 0:
return AMEM0
case 1:
return AMEM8
case 2:
return AMEM16
case 4:
return AMEM32
case 8:
return AMEM64
case 16:
return AMEM128
}
}
return a
}
// algtype1 returns the AlgKind used for comparing and hashing Type t.
// If it returns ANOEQ, it also returns the component type of t that
// makes it incomparable.
func algtype1(t *types.Type) (AlgKind, *types.Type) {
if t.Broke() {
return AMEM, nil
}
if t.Noalg() {
return ANOEQ, t
}
switch t.Kind() {
case types.TANY, types.TFORW:
// will be defined later.
return ANOEQ, t
case types.TINT8, types.TUINT8, types.TINT16, types.TUINT16,
types.TINT32, types.TUINT32, types.TINT64, types.TUINT64,
types.TINT, types.TUINT, types.TUINTPTR,
types.TBOOL, types.TPTR,
types.TCHAN, types.TUNSAFEPTR:
return AMEM, nil
case types.TFUNC, types.TMAP:
return ANOEQ, t
case types.TFLOAT32:
return AFLOAT32, nil
case types.TFLOAT64:
return AFLOAT64, nil
case types.TCOMPLEX64:
return ACPLX64, nil
case types.TCOMPLEX128:
return ACPLX128, nil
case types.TSTRING:
return ASTRING, nil
case types.TINTER:
if t.IsEmptyInterface() {
return ANILINTER, nil
}
return AINTER, nil
case types.TSLICE:
return ANOEQ, t
case types.TARRAY:
a, bad := algtype1(t.Elem())
switch a {
case AMEM:
return AMEM, nil
case ANOEQ:
return ANOEQ, bad
}
switch t.NumElem() {
case 0:
// We checked above that the element type is comparable.
return AMEM, nil
case 1:
// Single-element array is same as its lone element.
return a, nil
}
return ASPECIAL, nil
case types.TSTRUCT:
fields := t.FieldSlice()
// One-field struct is same as that one field alone.
if len(fields) == 1 && !fields[0].Sym.IsBlank() {
return algtype1(fields[0].Type)
}
ret := AMEM
for i, f := range fields {
// All fields must be comparable.
a, bad := algtype1(f.Type)
if a == ANOEQ {
return ANOEQ, bad
}
// Blank fields, padded fields, fields with non-memory
// equality need special compare.
if a != AMEM || f.Sym.IsBlank() || ispaddedfield(t, i) {
ret = ASPECIAL
}
}
return ret, nil
}
base.Fatalf("algtype1: unexpected type %v", t)
return 0, nil
}
// genhash returns a symbol which is the closure used to compute
// the hash of a value of type t.
// Note: the generated function must match runtime.typehash exactly.
func genhash(t *types.Type) *obj.LSym {
switch algtype(t) {
default:
// genhash is only called for types that have equality
base.Fatalf("genhash %v", t)
case AMEM0:
return sysClosure("memhash0")
case AMEM8:
return sysClosure("memhash8")
case AMEM16:
return sysClosure("memhash16")
case AMEM32:
return sysClosure("memhash32")
case AMEM64:
return sysClosure("memhash64")
case AMEM128:
return sysClosure("memhash128")
case ASTRING:
return sysClosure("strhash")
case AINTER:
return sysClosure("interhash")
case ANILINTER:
return sysClosure("nilinterhash")
case AFLOAT32:
return sysClosure("f32hash")
case AFLOAT64:
return sysClosure("f64hash")
case ACPLX64:
return sysClosure("c64hash")
case ACPLX128:
return sysClosure("c128hash")
case AMEM:
// For other sizes of plain memory, we build a closure
// that calls memhash_varlen. The size of the memory is
// encoded in the first slot of the closure.
closure := typeLookup(fmt.Sprintf(".hashfunc%d", t.Width)).Linksym()
if len(closure.P) > 0 { // already generated
return closure
}
if memhashvarlen == nil {
memhashvarlen = sysfunc("memhash_varlen")
}
ot := 0
ot = dsymptr(closure, ot, memhashvarlen, 0)
ot = duintptr(closure, ot, uint64(t.Width)) // size encoded in closure
ggloblsym(closure, int32(ot), obj.DUPOK|obj.RODATA)
return closure
case ASPECIAL:
break
}
closure := typesymprefix(".hashfunc", t).Linksym()
if len(closure.P) > 0 { // already generated
return closure
}
// Generate hash functions for subtypes.
// There are cases where we might not use these hashes,
// but in that case they will get dead-code eliminated.
// (And the closure generated by genhash will also get
// dead-code eliminated, as we call the subtype hashers
// directly.)
switch t.Kind() {
case types.TARRAY:
genhash(t.Elem())
case types.TSTRUCT:
for _, f := range t.FieldSlice() {
genhash(f.Type)
}
}
sym := typesymprefix(".hash", t)
if base.Flag.LowerR != 0 {
fmt.Printf("genhash %v %v %v\n", closure, sym, t)
}
base.Pos = autogeneratedPos // less confusing than end of input
dclcontext = ir.PEXTERN
// func sym(p *T, h uintptr) uintptr
args := []*ir.Field{
namedfield("p", types.NewPtr(t)),
namedfield("h", types.Types[types.TUINTPTR]),
}
results := []*ir.Field{anonfield(types.Types[types.TUINTPTR])}
tfn := ir.NewFuncType(base.Pos, nil, args, results)
fn := dclfunc(sym, tfn)
np := ir.AsNode(tfn.Type().Params().Field(0).Nname)
nh := ir.AsNode(tfn.Type().Params().Field(1).Nname)
switch t.Kind() {
case types.TARRAY:
// An array of pure memory would be handled by the
// standard algorithm, so the element type must not be
// pure memory.
hashel := hashfor(t.Elem())
// for i := 0; i < nelem; i++
ni := temp(types.Types[types.TINT])
init := ir.NewAssignStmt(base.Pos, ni, nodintconst(0))
cond := ir.NewBinaryExpr(base.Pos, ir.OLT, ni, nodintconst(t.NumElem()))
post := ir.NewAssignStmt(base.Pos, ni, ir.NewBinaryExpr(base.Pos, ir.OADD, ni, nodintconst(1)))
loop := ir.NewForStmt(base.Pos, nil, cond, post, nil)
loop.PtrInit().Append(init)
// h = hashel(&p[i], h)
call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil)
nx := ir.NewIndexExpr(base.Pos, np, ni)
nx.SetBounded(true)
na := nodAddr(nx)
call.PtrList().Append(na)
call.PtrList().Append(nh)
loop.PtrBody().Append(ir.NewAssignStmt(base.Pos, nh, call))
fn.PtrBody().Append(loop)
case types.TSTRUCT:
// Walk the struct using memhash for runs of AMEM
// and calling specific hash functions for the others.
for i, fields := 0, t.FieldSlice(); i < len(fields); {
f := fields[i]
// Skip blank fields.
if f.Sym.IsBlank() {
i++
continue
}
// Hash non-memory fields with appropriate hash function.
if !IsRegularMemory(f.Type) {
hashel := hashfor(f.Type)
call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil)
nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym) // TODO: fields from other packages?
na := nodAddr(nx)
call.PtrList().Append(na)
call.PtrList().Append(nh)
fn.PtrBody().Append(ir.NewAssignStmt(base.Pos, nh, call))
i++
continue
}
// Otherwise, hash a maximal length run of raw memory.
size, next := memrun(t, i)
// h = hashel(&p.first, size, h)
hashel := hashmem(f.Type)
call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil)
nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym) // TODO: fields from other packages?
na := nodAddr(nx)
call.PtrList().Append(na)
call.PtrList().Append(nh)
call.PtrList().Append(nodintconst(size))
fn.PtrBody().Append(ir.NewAssignStmt(base.Pos, nh, call))
i = next
}
}
r := ir.NewReturnStmt(base.Pos, nil)
r.PtrList().Append(nh)
fn.PtrBody().Append(r)
if base.Flag.LowerR != 0 {
ir.DumpList("genhash body", fn.Body())
}
funcbody()
fn.SetDupok(true)
typecheckFunc(fn)
Curfn = fn
typecheckslice(fn.Body().Slice(), ctxStmt)
Curfn = nil
if base.Debug.DclStack != 0 {
testdclstack()
}
fn.SetNilCheckDisabled(true)
Target.Decls = append(Target.Decls, fn)
// Build closure. It doesn't close over any variables, so
// it contains just the function pointer.
dsymptr(closure, 0, sym.Linksym(), 0)
ggloblsym(closure, int32(Widthptr), obj.DUPOK|obj.RODATA)
return closure
}
func hashfor(t *types.Type) ir.Node {
var sym *types.Sym
switch a, _ := algtype1(t); a {
case AMEM:
base.Fatalf("hashfor with AMEM type")
case AINTER:
sym = Runtimepkg.Lookup("interhash")
case ANILINTER:
sym = Runtimepkg.Lookup("nilinterhash")
case ASTRING:
sym = Runtimepkg.Lookup("strhash")
case AFLOAT32:
sym = Runtimepkg.Lookup("f32hash")
case AFLOAT64:
sym = Runtimepkg.Lookup("f64hash")
case ACPLX64:
sym = Runtimepkg.Lookup("c64hash")
case ACPLX128:
sym = Runtimepkg.Lookup("c128hash")
default:
// Note: the caller of hashfor ensured that this symbol
// exists and has a body by calling genhash for t.
sym = typesymprefix(".hash", t)
}
n := NewName(sym)
setNodeNameFunc(n)
n.SetType(functype(nil, []*ir.Field{
anonfield(types.NewPtr(t)),
anonfield(types.Types[types.TUINTPTR]),
}, []*ir.Field{
anonfield(types.Types[types.TUINTPTR]),
}))
return n
}
// sysClosure returns a closure which will call the
// given runtime function (with no closed-over variables).
func sysClosure(name string) *obj.LSym {
s := sysvar(name + "·f")
if len(s.P) == 0 {
f := sysfunc(name)
dsymptr(s, 0, f, 0)
ggloblsym(s, int32(Widthptr), obj.DUPOK|obj.RODATA)
}
return s
}
// geneq returns a symbol which is the closure used to compute
// equality for two objects of type t.
func geneq(t *types.Type) *obj.LSym {
switch algtype(t) {
case ANOEQ:
// The runtime will panic if it tries to compare
// a type with a nil equality function.
return nil
case AMEM0:
return sysClosure("memequal0")
case AMEM8:
return sysClosure("memequal8")
case AMEM16:
return sysClosure("memequal16")
case AMEM32:
return sysClosure("memequal32")
case AMEM64:
return sysClosure("memequal64")
case AMEM128:
return sysClosure("memequal128")
case ASTRING:
return sysClosure("strequal")
case AINTER:
return sysClosure("interequal")
case ANILINTER:
return sysClosure("nilinterequal")
case AFLOAT32:
return sysClosure("f32equal")
case AFLOAT64:
return sysClosure("f64equal")
case ACPLX64:
return sysClosure("c64equal")
case ACPLX128:
return sysClosure("c128equal")
case AMEM:
// make equality closure. The size of the type
// is encoded in the closure.
closure := typeLookup(fmt.Sprintf(".eqfunc%d", t.Width)).Linksym()
if len(closure.P) != 0 {
return closure
}
if memequalvarlen == nil {
memequalvarlen = sysvar("memequal_varlen") // asm func
}
ot := 0
ot = dsymptr(closure, ot, memequalvarlen, 0)
ot = duintptr(closure, ot, uint64(t.Width))
ggloblsym(closure, int32(ot), obj.DUPOK|obj.RODATA)
return closure
case ASPECIAL:
break
}
closure := typesymprefix(".eqfunc", t).Linksym()
if len(closure.P) > 0 { // already generated
return closure
}
sym := typesymprefix(".eq", t)
if base.Flag.LowerR != 0 {
fmt.Printf("geneq %v\n", t)
}
// Autogenerate code for equality of structs and arrays.
base.Pos = autogeneratedPos // less confusing than end of input
dclcontext = ir.PEXTERN
// func sym(p, q *T) bool
tfn := ir.NewFuncType(base.Pos, nil,
[]*ir.Field{namedfield("p", types.NewPtr(t)), namedfield("q", types.NewPtr(t))},
[]*ir.Field{namedfield("r", types.Types[types.TBOOL])})
fn := dclfunc(sym, tfn)
np := ir.AsNode(tfn.Type().Params().Field(0).Nname)
nq := ir.AsNode(tfn.Type().Params().Field(1).Nname)
nr := ir.AsNode(tfn.Type().Results().Field(0).Nname)
// Label to jump to if an equality test fails.
neq := autolabel(".neq")
// We reach here only for types that have equality but
// cannot be handled by the standard algorithms,
// so t must be either an array or a struct.
switch t.Kind() {
default:
base.Fatalf("geneq %v", t)
case types.TARRAY:
nelem := t.NumElem()
// checkAll generates code to check the equality of all array elements.
// If unroll is greater than nelem, checkAll generates:
//
// if eq(p[0], q[0]) && eq(p[1], q[1]) && ... {
// } else {
// return
// }
//
// And so on.
//
// Otherwise it generates:
//
// for i := 0; i < nelem; i++ {
// if eq(p[i], q[i]) {
// } else {
// goto neq
// }
// }
//
// TODO(josharian): consider doing some loop unrolling
// for larger nelem as well, processing a few elements at a time in a loop.
checkAll := func(unroll int64, last bool, eq func(pi, qi ir.Node) ir.Node) {
// checkIdx generates a node to check for equality at index i.
checkIdx := func(i ir.Node) ir.Node {
// pi := p[i]
pi := ir.NewIndexExpr(base.Pos, np, i)
pi.SetBounded(true)
pi.SetType(t.Elem())
// qi := q[i]
qi := ir.NewIndexExpr(base.Pos, nq, i)
qi.SetBounded(true)
qi.SetType(t.Elem())
return eq(pi, qi)
}
if nelem <= unroll {
if last {
// Do last comparison in a different manner.
nelem--
}
// Generate a series of checks.
for i := int64(0); i < nelem; i++ {
// if check {} else { goto neq }
nif := ir.NewIfStmt(base.Pos, checkIdx(nodintconst(i)), nil, nil)
nif.PtrRlist().Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq))
fn.PtrBody().Append(nif)
}
if last {
fn.PtrBody().Append(ir.NewAssignStmt(base.Pos, nr, checkIdx(nodintconst(nelem))))
}
} else {
// Generate a for loop.
// for i := 0; i < nelem; i++
i := temp(types.Types[types.TINT])
init := ir.NewAssignStmt(base.Pos, i, nodintconst(0))
cond := ir.NewBinaryExpr(base.Pos, ir.OLT, i, nodintconst(nelem))
post := ir.NewAssignStmt(base.Pos, i, ir.NewBinaryExpr(base.Pos, ir.OADD, i, nodintconst(1)))
loop := ir.NewForStmt(base.Pos, nil, cond, post, nil)
loop.PtrInit().Append(init)
// if eq(pi, qi) {} else { goto neq }
nif := ir.NewIfStmt(base.Pos, checkIdx(i), nil, nil)
nif.PtrRlist().Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq))
loop.PtrBody().Append(nif)
fn.PtrBody().Append(loop)
if last {
fn.PtrBody().Append(ir.NewAssignStmt(base.Pos, nr, nodbool(true)))
}
}
}
switch t.Elem().Kind() {
case types.TSTRING:
// Do two loops. First, check that all the lengths match (cheap).
// Second, check that all the contents match (expensive).
// TODO: when the array size is small, unroll the length match checks.
checkAll(3, false, func(pi, qi ir.Node) ir.Node {
// Compare lengths.
eqlen, _ := eqstring(pi, qi)
return eqlen
})
checkAll(1, true, func(pi, qi ir.Node) ir.Node {
// Compare contents.
_, eqmem := eqstring(pi, qi)
return eqmem
})
case types.TFLOAT32, types.TFLOAT64:
checkAll(2, true, func(pi, qi ir.Node) ir.Node {
// p[i] == q[i]
return ir.NewBinaryExpr(base.Pos, ir.OEQ, pi, qi)
})
// TODO: pick apart structs, do them piecemeal too
default:
checkAll(1, true, func(pi, qi ir.Node) ir.Node {
// p[i] == q[i]
return ir.NewBinaryExpr(base.Pos, ir.OEQ, pi, qi)
})
}
case types.TSTRUCT:
// Build a list of conditions to satisfy.
// The conditions are a list-of-lists. Conditions are reorderable
// within each inner list. The outer lists must be evaluated in order.
var conds [][]ir.Node
conds = append(conds, []ir.Node{})
and := func(n ir.Node) {
i := len(conds) - 1
conds[i] = append(conds[i], n)
}
// Walk the struct using memequal for runs of AMEM
// and calling specific equality tests for the others.
for i, fields := 0, t.FieldSlice(); i < len(fields); {
f := fields[i]
// Skip blank-named fields.
if f.Sym.IsBlank() {
i++
continue
}
// Compare non-memory fields with field equality.
if !IsRegularMemory(f.Type) {
if EqCanPanic(f.Type) {
// Enforce ordering by starting a new set of reorderable conditions.
conds = append(conds, []ir.Node{})
}
p := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym)
q := ir.NewSelectorExpr(base.Pos, ir.OXDOT, nq, f.Sym)
switch {
case f.Type.IsString():
eqlen, eqmem := eqstring(p, q)
and(eqlen)
and(eqmem)
default:
and(ir.NewBinaryExpr(base.Pos, ir.OEQ, p, q))
}
if EqCanPanic(f.Type) {
// Also enforce ordering after something that can panic.
conds = append(conds, []ir.Node{})
}
i++
continue
}
// Find maximal length run of memory-only fields.
size, next := memrun(t, i)
// TODO(rsc): All the calls to newname are wrong for
// cross-package unexported fields.
if s := fields[i:next]; len(s) <= 2 {
// Two or fewer fields: use plain field equality.
for _, f := range s {
and(eqfield(np, nq, f.Sym))
}
} else {
// More than two fields: use memequal.
and(eqmem(np, nq, f.Sym, size))
}
i = next
}
// Sort conditions to put runtime calls last.
// Preserve the rest of the ordering.
var flatConds []ir.Node
for _, c := range conds {
isCall := func(n ir.Node) bool {
return n.Op() == ir.OCALL || n.Op() == ir.OCALLFUNC
}
sort.SliceStable(c, func(i, j int) bool {
return !isCall(c[i]) && isCall(c[j])
})
flatConds = append(flatConds, c...)
}
if len(flatConds) == 0 {
fn.PtrBody().Append(ir.NewAssignStmt(base.Pos, nr, nodbool(true)))
} else {
for _, c := range flatConds[:len(flatConds)-1] {
// if cond {} else { goto neq }
n := ir.NewIfStmt(base.Pos, c, nil, nil)
n.PtrRlist().Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq))
fn.PtrBody().Append(n)
}
fn.PtrBody().Append(ir.NewAssignStmt(base.Pos, nr, flatConds[len(flatConds)-1]))
}
}
// ret:
// return
ret := autolabel(".ret")
fn.PtrBody().Append(ir.NewLabelStmt(base.Pos, ret))
fn.PtrBody().Append(ir.NewReturnStmt(base.Pos, nil))
// neq:
// r = false
// return (or goto ret)
fn.PtrBody().Append(ir.NewLabelStmt(base.Pos, neq))
fn.PtrBody().Append(ir.NewAssignStmt(base.Pos, nr, nodbool(false)))
if EqCanPanic(t) || anyCall(fn) {
// Epilogue is large, so share it with the equal case.
fn.PtrBody().Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, ret))
} else {
// Epilogue is small, so don't bother sharing.
fn.PtrBody().Append(ir.NewReturnStmt(base.Pos, nil))
}
// TODO(khr): the epilogue size detection condition above isn't perfect.
// We should really do a generic CL that shares epilogues across
// the board. See #24936.
if base.Flag.LowerR != 0 {
ir.DumpList("geneq body", fn.Body())
}
funcbody()
fn.SetDupok(true)
typecheckFunc(fn)
Curfn = fn
typecheckslice(fn.Body().Slice(), ctxStmt)
Curfn = nil
if base.Debug.DclStack != 0 {
testdclstack()
}
// Disable checknils while compiling this code.
// We are comparing a struct or an array,
// neither of which can be nil, and our comparisons
// are shallow.
fn.SetNilCheckDisabled(true)
Target.Decls = append(Target.Decls, fn)
// Generate a closure which points at the function we just generated.
dsymptr(closure, 0, sym.Linksym(), 0)
ggloblsym(closure, int32(Widthptr), obj.DUPOK|obj.RODATA)
return closure
}
func anyCall(fn *ir.Func) bool {
return ir.Any(fn, func(n ir.Node) bool {
// TODO(rsc): No methods?
op := n.Op()
return op == ir.OCALL || op == ir.OCALLFUNC
})
}
// eqfield returns the node
// p.field == q.field
func eqfield(p ir.Node, q ir.Node, field *types.Sym) ir.Node {
nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, p, field)
ny := ir.NewSelectorExpr(base.Pos, ir.OXDOT, q, field)
ne := ir.NewBinaryExpr(base.Pos, ir.OEQ, nx, ny)
return ne
}
// eqstring returns the nodes
// len(s) == len(t)
// and
// memequal(s.ptr, t.ptr, len(s))
// which can be used to construct string equality comparison.
// eqlen must be evaluated before eqmem, and shortcircuiting is required.
func eqstring(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) {
s = conv(s, types.Types[types.TSTRING])
t = conv(t, types.Types[types.TSTRING])
sptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, s)
tptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, t)
slen := conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, s), types.Types[types.TUINTPTR])
tlen := conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, t), types.Types[types.TUINTPTR])
fn := syslook("memequal")
fn = substArgTypes(fn, types.Types[types.TUINT8], types.Types[types.TUINT8])
call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, []ir.Node{sptr, tptr, ir.Copy(slen)})
TypecheckCall(call)
cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, slen, tlen)
cmp = typecheck(cmp, ctxExpr).(*ir.BinaryExpr)
cmp.SetType(types.Types[types.TBOOL])
return cmp, call
}
// eqinterface returns the nodes
// s.tab == t.tab (or s.typ == t.typ, as appropriate)
// and
// ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate)
// which can be used to construct interface equality comparison.
// eqtab must be evaluated before eqdata, and shortcircuiting is required.
func eqinterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) {
if !types.Identical(s.Type(), t.Type()) {
base.Fatalf("eqinterface %v %v", s.Type(), t.Type())
}
// func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool)
// func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool)
var fn ir.Node
if s.Type().IsEmptyInterface() {
fn = syslook("efaceeq")
} else {
fn = syslook("ifaceeq")
}
stab := ir.NewUnaryExpr(base.Pos, ir.OITAB, s)
ttab := ir.NewUnaryExpr(base.Pos, ir.OITAB, t)
sdata := ir.NewUnaryExpr(base.Pos, ir.OIDATA, s)
tdata := ir.NewUnaryExpr(base.Pos, ir.OIDATA, t)
sdata.SetType(types.Types[types.TUNSAFEPTR])
tdata.SetType(types.Types[types.TUNSAFEPTR])
sdata.SetTypecheck(1)
tdata.SetTypecheck(1)
call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, []ir.Node{stab, sdata, tdata})
TypecheckCall(call)
cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, stab, ttab)
cmp = typecheck(cmp, ctxExpr).(*ir.BinaryExpr)
cmp.SetType(types.Types[types.TBOOL])
return cmp, call
}
// eqmem returns the node
// memequal(&p.field, &q.field [, size])
func eqmem(p ir.Node, q ir.Node, field *types.Sym, size int64) ir.Node {
nx := typecheck(nodAddr(ir.NewSelectorExpr(base.Pos, ir.OXDOT, p, field)), ctxExpr)
ny := typecheck(nodAddr(ir.NewSelectorExpr(base.Pos, ir.OXDOT, q, field)), ctxExpr)
fn, needsize := eqmemfunc(size, nx.Type().Elem())
call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
call.PtrList().Append(nx)
call.PtrList().Append(ny)
if needsize {
call.PtrList().Append(nodintconst(size))
}
return call
}
func eqmemfunc(size int64, t *types.Type) (fn *ir.Name, needsize bool) {
switch size {
default:
fn = syslook("memequal")
needsize = true
case 1, 2, 4, 8, 16:
buf := fmt.Sprintf("memequal%d", int(size)*8)
fn = syslook(buf)
}
fn = substArgTypes(fn, t, t)
return fn, needsize
}
// memrun finds runs of struct fields for which memory-only algs are appropriate.
// t is the parent struct type, and start is the field index at which to start the run.
// size is the length in bytes of the memory included in the run.
// next is the index just after the end of the memory run.
func memrun(t *types.Type, start int) (size int64, next int) {
next = start
for {
next++
if next == t.NumFields() {
break
}
// Stop run after a padded field.
if ispaddedfield(t, next-1) {
break
}
// Also, stop before a blank or non-memory field.
if f := t.Field(next); f.Sym.IsBlank() || !IsRegularMemory(f.Type) {
break
}
}
return t.Field(next-1).End() - t.Field(start).Offset, next
}
// ispaddedfield reports whether the i'th field of struct type t is followed
// by padding.
func ispaddedfield(t *types.Type, i int) bool {
if !t.IsStruct() {
base.Fatalf("ispaddedfield called non-struct %v", t)
}
end := t.Width
if i+1 < t.NumFields() {
end = t.Field(i + 1).Offset
}
return t.Field(i).End() != end
}