[dev.regabi] cmd/compile: split out package reflectdata [generated]

[git-generate]

cd src/cmd/compile/internal/gc
rf '
	ex {
		import "cmd/compile/internal/base"
		thearch.LinkArch.Name -> base.Ctxt.Arch.Name
	}

	# Move out of reflect.go a few functions that should stay.
	mv addsignats obj.go
	mv deferstruct ssa.go

	# Export reflectdata API.
	mv zerosize ZeroSize
	mv hmap MapType
	mv bmap MapBucketType
	mv hiter MapIterType
	mv addsignat NeedRuntimeType
	mv typename TypePtr
	mv typenamesym TypeSym
	mv typesymprefix TypeSymPrefix
	mv itabsym ITabSym
	mv tracksym TrackSym
	mv zeroaddr ZeroAddr
	mv itabname ITabAddr
	mv ifaceMethodOffset InterfaceMethodOffset
	mv peekitabs CompileITabs
	mv addptabs CollectPTabs
	mv algtype AlgType
	mv dtypesym WriteType
	mv dumpbasictypes WriteBasicTypes
	mv dumpimportstrings WriteImportStrings
	mv dumpsignats WriteRuntimeTypes
	mv dumptabs WriteTabs
	mv eqinterface EqInterface
	mv eqstring EqString

	mv GCProg gcProg
	mv EqCanPanic eqCanPanic
	mv IsRegularMemory isRegularMemory
	mv Sig typeSig

	mv hashmem alg.go
	mv CollectPTabs genwrapper ZeroSize reflect.go
	mv alg.go reflect.go cmd/compile/internal/reflectdata
'

Change-Id: Iaae9da9e9fad5f772f5216004823ccff2ea8f139
Reviewed-on: https://go-review.googlesource.com/c/go/+/279475
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
This commit is contained in:
Russ Cox 2020-12-23 00:55:38 -05:00
parent 4dfb5d91a8
commit de65151e50
13 changed files with 418 additions and 406 deletions

View file

@ -7,6 +7,7 @@ package gc
import ( import (
"bufio" "bufio"
"cmd/compile/internal/base" "cmd/compile/internal/base"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/typecheck" "cmd/compile/internal/typecheck"
"cmd/compile/internal/types" "cmd/compile/internal/types"
"cmd/internal/obj" "cmd/internal/obj"
@ -38,10 +39,10 @@ func TestMain(m *testing.M) {
types.PtrSize = thearch.LinkArch.PtrSize types.PtrSize = thearch.LinkArch.PtrSize
types.RegSize = thearch.LinkArch.RegSize types.RegSize = thearch.LinkArch.RegSize
types.TypeLinkSym = func(t *types.Type) *obj.LSym { types.TypeLinkSym = func(t *types.Type) *obj.LSym {
return typenamesym(t).Linksym() return reflectdata.TypeSym(t).Linksym()
} }
types.TypeLinkSym = func(t *types.Type) *obj.LSym { types.TypeLinkSym = func(t *types.Type) *obj.LSym {
return typenamesym(t).Linksym() return reflectdata.TypeSym(t).Linksym()
} }
typecheck.Init() typecheck.Init()
os.Exit(m.Run()) os.Exit(m.Run())

View file

@ -12,8 +12,6 @@ import (
var pragcgobuf [][]string var pragcgobuf [][]string
var zerosize int64
// interface to back end // interface to back end
type Arch struct { type Arch struct {

View file

@ -15,6 +15,7 @@ import (
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
"cmd/compile/internal/logopt" "cmd/compile/internal/logopt"
"cmd/compile/internal/noder" "cmd/compile/internal/noder"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/ssa" "cmd/compile/internal/ssa"
"cmd/compile/internal/staticdata" "cmd/compile/internal/staticdata"
"cmd/compile/internal/typecheck" "cmd/compile/internal/typecheck"
@ -190,19 +191,19 @@ func Main(archInit func(*Arch)) {
types.RegSize = thearch.LinkArch.RegSize types.RegSize = thearch.LinkArch.RegSize
types.MaxWidth = thearch.MAXWIDTH types.MaxWidth = thearch.MAXWIDTH
types.TypeLinkSym = func(t *types.Type) *obj.LSym { types.TypeLinkSym = func(t *types.Type) *obj.LSym {
return typenamesym(t).Linksym() return reflectdata.TypeSym(t).Linksym()
} }
typecheck.Target = new(ir.Package) typecheck.Target = new(ir.Package)
typecheck.NeedFuncSym = staticdata.NeedFuncSym typecheck.NeedFuncSym = staticdata.NeedFuncSym
typecheck.NeedITab = func(t, iface *types.Type) { itabname(t, iface) } typecheck.NeedITab = func(t, iface *types.Type) { reflectdata.ITabAddr(t, iface) }
typecheck.NeedRuntimeType = addsignat // TODO(rsc): typenamesym for lock? typecheck.NeedRuntimeType = reflectdata.NeedRuntimeType // TODO(rsc): typenamesym for lock?
base.AutogeneratedPos = makePos(src.NewFileBase("<autogenerated>", "<autogenerated>"), 1, 0) base.AutogeneratedPos = makePos(src.NewFileBase("<autogenerated>", "<autogenerated>"), 1, 0)
types.TypeLinkSym = func(t *types.Type) *obj.LSym { types.TypeLinkSym = func(t *types.Type) *obj.LSym {
return typenamesym(t).Linksym() return reflectdata.TypeSym(t).Linksym()
} }
typecheck.Init() typecheck.Init()
@ -282,7 +283,7 @@ func Main(archInit func(*Arch)) {
// the right side of OCONVIFACE so that methods // the right side of OCONVIFACE so that methods
// can be de-virtualized during compilation. // can be de-virtualized during compilation.
ir.CurFunc = nil ir.CurFunc = nil
peekitabs() reflectdata.CompileITabs()
// Compile top level functions. // Compile top level functions.
// Don't use range--walk can add functions to Target.Decls. // Don't use range--walk can add functions to Target.Decls.

View file

@ -8,6 +8,7 @@ import (
"cmd/compile/internal/base" "cmd/compile/internal/base"
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
"cmd/compile/internal/objw" "cmd/compile/internal/objw"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/staticdata" "cmd/compile/internal/staticdata"
"cmd/compile/internal/typecheck" "cmd/compile/internal/typecheck"
"cmd/compile/internal/types" "cmd/compile/internal/types"
@ -112,14 +113,14 @@ func dumpdata() {
dumpglobls(typecheck.Target.Externs) dumpglobls(typecheck.Target.Externs)
staticdata.WriteFuncSyms() staticdata.WriteFuncSyms()
addptabs() reflectdata.CollectPTabs()
numExports := len(typecheck.Target.Exports) numExports := len(typecheck.Target.Exports)
addsignats(typecheck.Target.Externs) addsignats(typecheck.Target.Externs)
dumpsignats() reflectdata.WriteRuntimeTypes()
dumptabs() reflectdata.WriteTabs()
numPTabs, numITabs := CountTabs() numPTabs, numITabs := reflectdata.CountTabs()
dumpimportstrings() reflectdata.WriteImportStrings()
dumpbasictypes() reflectdata.WriteBasicTypes()
dumpembeds() dumpembeds()
// Calls to dumpsignats can generate functions, // Calls to dumpsignats can generate functions,
@ -138,7 +139,7 @@ func dumpdata() {
} }
numDecls = len(typecheck.Target.Decls) numDecls = len(typecheck.Target.Decls)
compileFunctions() compileFunctions()
dumpsignats() reflectdata.WriteRuntimeTypes()
if numDecls == len(typecheck.Target.Decls) { if numDecls == len(typecheck.Target.Decls) {
break break
} }
@ -147,9 +148,9 @@ func dumpdata() {
// Dump extra globals. // Dump extra globals.
dumpglobls(typecheck.Target.Externs[numExterns:]) dumpglobls(typecheck.Target.Externs[numExterns:])
if zerosize > 0 { if reflectdata.ZeroSize > 0 {
zero := ir.Pkgs.Map.Lookup("zero") zero := ir.Pkgs.Map.Lookup("zero")
objw.Global(zero.Linksym(), int32(zerosize), obj.DUPOK|obj.RODATA) objw.Global(zero.Linksym(), int32(reflectdata.ZeroSize), obj.DUPOK|obj.RODATA)
} }
addGCLocals() addGCLocals()
@ -157,7 +158,7 @@ func dumpdata() {
if numExports != len(typecheck.Target.Exports) { if numExports != len(typecheck.Target.Exports) {
base.Fatalf("Target.Exports changed after compile functions loop") base.Fatalf("Target.Exports changed after compile functions loop")
} }
newNumPTabs, newNumITabs := CountTabs() newNumPTabs, newNumITabs := reflectdata.CountTabs()
if newNumPTabs != numPTabs { if newNumPTabs != numPTabs {
base.Fatalf("ptabs changed after compile functions loop") base.Fatalf("ptabs changed after compile functions loop")
} }
@ -184,36 +185,6 @@ func dumpLinkerObj(bout *bio.Writer) {
obj.WriteObjFile(base.Ctxt, bout) obj.WriteObjFile(base.Ctxt, bout)
} }
func addptabs() {
if !base.Ctxt.Flag_dynlink || types.LocalPkg.Name != "main" {
return
}
for _, exportn := range typecheck.Target.Exports {
s := exportn.Sym()
nn := ir.AsNode(s.Def)
if nn == nil {
continue
}
if nn.Op() != ir.ONAME {
continue
}
n := nn.(*ir.Name)
if !types.IsExported(s.Name) {
continue
}
if s.Pkg.Name != "main" {
continue
}
if n.Type().Kind() == types.TFUNC && n.Class_ == ir.PFUNC {
// function
ptabs = append(ptabs, ptabEntry{s: s, t: s.Def.Type()})
} else {
// variable
ptabs = append(ptabs, ptabEntry{s: s, t: types.NewPtr(s.Def.Type())})
}
}
}
func dumpGlobal(n *ir.Name) { func dumpGlobal(n *ir.Name) {
if n.Type() == nil { if n.Type() == nil {
base.Fatalf("external %v nil type\n", n) base.Fatalf("external %v nil type\n", n)
@ -373,3 +344,12 @@ func dumpembeds() {
staticdata.WriteEmbed(v) staticdata.WriteEmbed(v)
} }
} }
func addsignats(dcls []ir.Node) {
// copy types from dcl list to signatset
for _, n := range dcls {
if n.Op() == ir.OTYPE {
reflectdata.NeedRuntimeType(n.Type())
}
}
}

View file

@ -8,6 +8,7 @@ import (
"cmd/compile/internal/base" "cmd/compile/internal/base"
"cmd/compile/internal/escape" "cmd/compile/internal/escape"
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/typecheck" "cmd/compile/internal/typecheck"
"cmd/compile/internal/types" "cmd/compile/internal/types"
"cmd/internal/src" "cmd/internal/src"
@ -882,7 +883,7 @@ func (o *Order) stmt(n ir.Node) {
// n.Prealloc is the temp for the iterator. // n.Prealloc is the temp for the iterator.
// hiter contains pointers and needs to be zeroed. // hiter contains pointers and needs to be zeroed.
n.Prealloc = o.newTemp(hiter(n.Type()), true) n.Prealloc = o.newTemp(reflectdata.MapIterType(n.Type()), true)
} }
o.exprListInPlace(n.Vars) o.exprListInPlace(n.Vars)
if orderBody { if orderBody {

View file

@ -9,6 +9,7 @@ import (
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
"cmd/compile/internal/liveness" "cmd/compile/internal/liveness"
"cmd/compile/internal/objw" "cmd/compile/internal/objw"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/ssa" "cmd/compile/internal/ssa"
"cmd/compile/internal/typecheck" "cmd/compile/internal/typecheck"
"cmd/compile/internal/types" "cmd/compile/internal/types"
@ -225,7 +226,7 @@ func compile(fn *ir.Func) {
switch n.Class_ { switch n.Class_ {
case ir.PPARAM, ir.PPARAMOUT, ir.PAUTO: case ir.PPARAM, ir.PPARAMOUT, ir.PAUTO:
if liveness.ShouldTrack(n) && n.Addrtaken() { if liveness.ShouldTrack(n) && n.Addrtaken() {
dtypesym(n.Type()) reflectdata.WriteType(n.Type())
// Also make sure we allocate a linker symbol // Also make sure we allocate a linker symbol
// for the stack object data, for the same reason. // for the stack object data, for the same reason.
if fn.LSym.Func().StackObjects == nil { if fn.LSym.Func().StackObjects == nil {

View file

@ -7,6 +7,7 @@ package gc
import ( import (
"cmd/compile/internal/base" "cmd/compile/internal/base"
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/typecheck" "cmd/compile/internal/typecheck"
"cmd/compile/internal/types" "cmd/compile/internal/types"
"cmd/internal/sys" "cmd/internal/sys"
@ -180,7 +181,7 @@ func walkrange(nrange *ir.RangeStmt) ir.Node {
fn := typecheck.LookupRuntime("mapiterinit") fn := typecheck.LookupRuntime("mapiterinit")
fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), th) fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), th)
init = append(init, mkcall1(fn, nil, nil, typename(t), ha, typecheck.NodAddr(hit))) init = append(init, mkcall1(fn, nil, nil, reflectdata.TypePtr(t), ha, typecheck.NodAddr(hit)))
nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym), typecheck.NodNil()) nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym), typecheck.NodNil())
fn = typecheck.LookupRuntime("mapiternext") fn = typecheck.LookupRuntime("mapiternext")
@ -383,7 +384,7 @@ func mapClear(m ir.Node) ir.Node {
// instantiate mapclear(typ *type, hmap map[any]any) // instantiate mapclear(typ *type, hmap map[any]any)
fn := typecheck.LookupRuntime("mapclear") fn := typecheck.LookupRuntime("mapclear")
fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem()) fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem())
n := mkcall1(fn, nil, nil, typename(t), m) n := mkcall1(fn, nil, nil, reflectdata.TypePtr(t), m)
return walkstmt(typecheck.Stmt(n)) return walkstmt(typecheck.Stmt(n))
} }

View file

@ -7,6 +7,7 @@ package gc
import ( import (
"cmd/compile/internal/base" "cmd/compile/internal/base"
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/staticdata" "cmd/compile/internal/staticdata"
"cmd/compile/internal/typecheck" "cmd/compile/internal/typecheck"
"cmd/compile/internal/types" "cmd/compile/internal/types"
@ -314,9 +315,9 @@ func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *type
var itab *ir.AddrExpr var itab *ir.AddrExpr
if typ.IsEmptyInterface() { if typ.IsEmptyInterface() {
itab = typename(val.Type()) itab = reflectdata.TypePtr(val.Type())
} else { } else {
itab = itabname(val.Type(), typ) itab = reflectdata.ITabAddr(val.Type(), typ)
} }
// Create a copy of l to modify while we emit data. // Create a copy of l to modify while we emit data.

View file

@ -20,6 +20,7 @@ import (
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
"cmd/compile/internal/liveness" "cmd/compile/internal/liveness"
"cmd/compile/internal/objw" "cmd/compile/internal/objw"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/ssa" "cmd/compile/internal/ssa"
"cmd/compile/internal/staticdata" "cmd/compile/internal/staticdata"
"cmd/compile/internal/typecheck" "cmd/compile/internal/typecheck"
@ -89,7 +90,7 @@ func initssaconfig() {
_ = types.NewPtr(types.Types[types.TINT64]) // *int64 _ = types.NewPtr(types.Types[types.TINT64]) // *int64
_ = types.NewPtr(types.ErrorType) // *error _ = types.NewPtr(types.ErrorType) // *error
types.NewPtrCacheEnabled = false types.NewPtrCacheEnabled = false
ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, *types_, base.Ctxt, base.Flag.N == 0) ssaConfig = ssa.NewConfig(base.Ctxt.Arch.Name, *types_, base.Ctxt, base.Flag.N == 0)
ssaConfig.SoftFloat = thearch.SoftFloat ssaConfig.SoftFloat = thearch.SoftFloat
ssaConfig.Race = base.Flag.Race ssaConfig.Race = base.Flag.Race
ssaCaches = make([]ssa.Cache, base.Flag.LowerC) ssaCaches = make([]ssa.Cache, base.Flag.LowerC)
@ -134,7 +135,7 @@ func initssaconfig() {
ir.Syms.Zerobase = typecheck.LookupRuntimeVar("zerobase") ir.Syms.Zerobase = typecheck.LookupRuntimeVar("zerobase")
// asm funcs with special ABI // asm funcs with special ABI
if thearch.LinkArch.Name == "amd64" { if base.Ctxt.Arch.Name == "amd64" {
GCWriteBarrierReg = map[int16]*obj.LSym{ GCWriteBarrierReg = map[int16]*obj.LSym{
x86.REG_AX: typecheck.LookupRuntimeFunc("gcWriteBarrier"), x86.REG_AX: typecheck.LookupRuntimeFunc("gcWriteBarrier"),
x86.REG_CX: typecheck.LookupRuntimeFunc("gcWriteBarrierCX"), x86.REG_CX: typecheck.LookupRuntimeFunc("gcWriteBarrierCX"),
@ -389,7 +390,7 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func {
s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.OpenCodedDeferDisallowed() s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.OpenCodedDeferDisallowed()
switch { switch {
case s.hasOpenDefers && (base.Ctxt.Flag_shared || base.Ctxt.Flag_dynlink) && thearch.LinkArch.Name == "386": case s.hasOpenDefers && (base.Ctxt.Flag_shared || base.Ctxt.Flag_dynlink) && base.Ctxt.Arch.Name == "386":
// Don't support open-coded defers for 386 ONLY when using shared // Don't support open-coded defers for 386 ONLY when using shared
// libraries, because there is extra code (added by rewriteToUseGot()) // libraries, because there is extra code (added by rewriteToUseGot())
// preceding the deferreturn/ret code that is generated by gencallret() // preceding the deferreturn/ret code that is generated by gencallret()
@ -6427,7 +6428,7 @@ func emitStackObjects(e *ssafn, pp *objw.Progs) {
if !types.TypeSym(v.Type()).Siggen() { if !types.TypeSym(v.Type()).Siggen() {
e.Fatalf(v.Pos(), "stack object's type symbol not generated for type %s", v.Type()) e.Fatalf(v.Pos(), "stack object's type symbol not generated for type %s", v.Type())
} }
off = objw.SymPtr(x, off, dtypesym(v.Type()), 0) off = objw.SymPtr(x, off, reflectdata.WriteType(v.Type()), 0)
} }
// Emit a funcdata pointing at the stack object data. // Emit a funcdata pointing at the stack object data.
@ -7247,7 +7248,7 @@ func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot {
} }
func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym { func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym {
return itabsym(it, offset) return reflectdata.ITabSym(it, offset)
} }
// SplitSlot returns a slot representing the data of parent starting at offset. // SplitSlot returns a slot representing the data of parent starting at offset.
@ -7411,3 +7412,44 @@ func max8(a, b int8) int8 {
} }
return b return b
} }
// deferstruct makes a runtime._defer structure, with additional space for
// stksize bytes of args.
func deferstruct(stksize int64) *types.Type {
makefield := func(name string, typ *types.Type) *types.Field {
// Unlike the global makefield function, this one needs to set Pkg
// because these types might be compared (in SSA CSE sorting).
// TODO: unify this makefield and the global one above.
sym := &types.Sym{Name: name, Pkg: types.LocalPkg}
return types.NewField(src.NoXPos, sym, typ)
}
argtype := types.NewArray(types.Types[types.TUINT8], stksize)
argtype.Width = stksize
argtype.Align = 1
// These fields must match the ones in runtime/runtime2.go:_defer and
// cmd/compile/internal/gc/ssa.go:(*state).call.
fields := []*types.Field{
makefield("siz", types.Types[types.TUINT32]),
makefield("started", types.Types[types.TBOOL]),
makefield("heap", types.Types[types.TBOOL]),
makefield("openDefer", types.Types[types.TBOOL]),
makefield("sp", types.Types[types.TUINTPTR]),
makefield("pc", types.Types[types.TUINTPTR]),
// Note: the types here don't really matter. Defer structures
// are always scanned explicitly during stack copying and GC,
// so we make them uintptr type even though they are real pointers.
makefield("fn", types.Types[types.TUINTPTR]),
makefield("_panic", types.Types[types.TUINTPTR]),
makefield("link", types.Types[types.TUINTPTR]),
makefield("framepc", types.Types[types.TUINTPTR]),
makefield("varp", types.Types[types.TUINTPTR]),
makefield("fd", types.Types[types.TUINTPTR]),
makefield("args", argtype),
}
// build struct holding the above fields
s := types.NewStruct(types.NoPkg, fields)
s.SetNoalg(true)
types.CalcStructSize(s)
return s
}

View file

@ -6,9 +6,8 @@ package gc
import ( import (
"cmd/compile/internal/base" "cmd/compile/internal/base"
"cmd/compile/internal/escape"
"cmd/compile/internal/inline"
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/typecheck" "cmd/compile/internal/typecheck"
"cmd/compile/internal/types" "cmd/compile/internal/types"
"cmd/internal/src" "cmd/internal/src"
@ -319,144 +318,9 @@ func cheapexpr(n ir.Node, init *ir.Nodes) ir.Node {
return copyexpr(n, n.Type(), init) return copyexpr(n, n.Type(), init)
} }
// Generate a wrapper function to convert from
// a receiver of type T to a receiver of type U.
// That is,
//
// func (t T) M() {
// ...
// }
//
// already exists; this function generates
//
// func (u U) M() {
// u.M()
// }
//
// where the types T and U are such that u.M() is valid
// and calls the T.M method.
// The resulting function is for use in method tables.
//
// rcvr - U
// method - M func (t T)(), a TFIELD type struct
// newnam - the eventual mangled name of this function
func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) {
if false && base.Flag.LowerR != 0 {
fmt.Printf("genwrapper rcvrtype=%v method=%v newnam=%v\n", rcvr, method, newnam)
}
// Only generate (*T).M wrappers for T.M in T's own package.
if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type &&
rcvr.Elem().Sym() != nil && rcvr.Elem().Sym().Pkg != types.LocalPkg {
return
}
// Only generate I.M wrappers for I in I's own package
// but keep doing it for error.Error (was issue #29304).
if rcvr.IsInterface() && rcvr.Sym() != nil && rcvr.Sym().Pkg != types.LocalPkg && rcvr != types.ErrorType {
return
}
base.Pos = base.AutogeneratedPos
typecheck.DeclContext = ir.PEXTERN
tfn := ir.NewFuncType(base.Pos,
ir.NewField(base.Pos, typecheck.Lookup(".this"), nil, rcvr),
typecheck.NewFuncParams(method.Type.Params(), true),
typecheck.NewFuncParams(method.Type.Results(), false))
fn := typecheck.DeclFunc(newnam, tfn)
fn.SetDupok(true)
nthis := ir.AsNode(tfn.Type().Recv().Nname)
methodrcvr := method.Type.Recv().Type
// generate nil pointer check for better error
if rcvr.IsPtr() && rcvr.Elem() == methodrcvr {
// generating wrapper from *T to T.
n := ir.NewIfStmt(base.Pos, nil, nil, nil)
n.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, nthis, typecheck.NodNil())
call := ir.NewCallExpr(base.Pos, ir.OCALL, typecheck.LookupRuntime("panicwrap"), nil)
n.Body = []ir.Node{call}
fn.Body.Append(n)
}
dot := typecheck.AddImplicitDots(ir.NewSelectorExpr(base.Pos, ir.OXDOT, nthis, method.Sym))
// generate call
// It's not possible to use a tail call when dynamic linking on ppc64le. The
// bad scenario is when a local call is made to the wrapper: the wrapper will
// call the implementation, which might be in a different module and so set
// the TOC to the appropriate value for that module. But if it returns
// directly to the wrapper's caller, nothing will reset it to the correct
// value for that function.
if !base.Flag.Cfg.Instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !types.IsInterfaceMethod(method.Type) && !(thearch.LinkArch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) {
// generate tail call: adjust pointer receiver and jump to embedded method.
left := dot.X // skip final .M
if !left.Type().IsPtr() {
left = typecheck.NodAddr(left)
}
as := ir.NewAssignStmt(base.Pos, nthis, typecheck.ConvNop(left, rcvr))
fn.Body.Append(as)
fn.Body.Append(ir.NewBranchStmt(base.Pos, ir.ORETJMP, ir.MethodSym(methodrcvr, method.Sym)))
} else {
fn.SetWrapper(true) // ignore frame for panic+recover matching
call := ir.NewCallExpr(base.Pos, ir.OCALL, dot, nil)
call.Args.Set(ir.ParamNames(tfn.Type()))
call.IsDDD = tfn.Type().IsVariadic()
if method.Type.NumResults() > 0 {
ret := ir.NewReturnStmt(base.Pos, nil)
ret.Results = []ir.Node{call}
fn.Body.Append(ret)
} else {
fn.Body.Append(call)
}
}
if false && base.Flag.LowerR != 0 {
ir.DumpList("genwrapper body", fn.Body)
}
typecheck.FinishFuncBody()
if base.Debug.DclStack != 0 {
types.CheckDclstack()
}
typecheck.Func(fn)
ir.CurFunc = fn
typecheck.Stmts(fn.Body)
// Inline calls within (*T).M wrappers. This is safe because we only
// generate those wrappers within the same compilation unit as (T).M.
// TODO(mdempsky): Investigate why we can't enable this more generally.
if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym() != nil {
inline.InlineCalls(fn)
}
escape.Batch([]*ir.Func{fn}, false)
ir.CurFunc = nil
typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
}
func hashmem(t *types.Type) ir.Node {
sym := ir.Pkgs.Runtime.Lookup("memhash")
n := typecheck.NewName(sym)
ir.MarkFunc(n)
n.SetType(typecheck.NewFuncType(nil, []*ir.Field{
ir.NewField(base.Pos, nil, nil, types.NewPtr(t)),
ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]),
ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]),
}, []*ir.Field{
ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]),
}))
return n
}
func ngotype(n ir.Node) *types.Sym { func ngotype(n ir.Node) *types.Sym {
if n.Type() != nil { if n.Type() != nil {
return typenamesym(n.Type()) return reflectdata.TypeSym(n.Type())
} }
return nil return nil
} }

View file

@ -8,6 +8,7 @@ import (
"cmd/compile/internal/base" "cmd/compile/internal/base"
"cmd/compile/internal/escape" "cmd/compile/internal/escape"
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/staticdata" "cmd/compile/internal/staticdata"
"cmd/compile/internal/typecheck" "cmd/compile/internal/typecheck"
"cmd/compile/internal/types" "cmd/compile/internal/types"
@ -594,12 +595,12 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node {
n := n.(*ir.TypeAssertExpr) n := n.(*ir.TypeAssertExpr)
n.X = walkexpr(n.X, init) n.X = walkexpr(n.X, init)
// Set up interface type addresses for back end. // Set up interface type addresses for back end.
n.Ntype = typename(n.Type()) n.Ntype = reflectdata.TypePtr(n.Type())
if n.Op() == ir.ODOTTYPE { if n.Op() == ir.ODOTTYPE {
n.Ntype.(*ir.AddrExpr).Alloc = typename(n.X.Type()) n.Ntype.(*ir.AddrExpr).Alloc = reflectdata.TypePtr(n.X.Type())
} }
if !n.Type().IsInterface() && !n.X.Type().IsEmptyInterface() { if !n.Type().IsInterface() && !n.X.Type().IsEmptyInterface() {
n.Itab = []ir.Node{itabname(n.Type(), n.X.Type())} n.Itab = []ir.Node{reflectdata.ITabAddr(n.Type(), n.X.Type())}
} }
return n return n
@ -781,7 +782,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node {
// Left in place for back end. // Left in place for back end.
// Do not add a new write barrier. // Do not add a new write barrier.
// Set up address of type for back end. // Set up address of type for back end.
r.(*ir.CallExpr).X = typename(r.Type().Elem()) r.(*ir.CallExpr).X = reflectdata.TypePtr(r.Type().Elem())
return as return as
} }
// Otherwise, lowered for race detector. // Otherwise, lowered for race detector.
@ -870,11 +871,11 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node {
var call *ir.CallExpr var call *ir.CallExpr
if w := t.Elem().Width; w <= zeroValSize { if w := t.Elem().Width; w <= zeroValSize {
fn := mapfn(mapaccess2[fast], t) fn := mapfn(mapaccess2[fast], t)
call = mkcall1(fn, fn.Type().Results(), init, typename(t), r.X, key) call = mkcall1(fn, fn.Type().Results(), init, reflectdata.TypePtr(t), r.X, key)
} else { } else {
fn := mapfn("mapaccess2_fat", t) fn := mapfn("mapaccess2_fat", t)
z := zeroaddr(w) z := reflectdata.ZeroAddr(w)
call = mkcall1(fn, fn.Type().Results(), init, typename(t), r.X, key, z) call = mkcall1(fn, fn.Type().Results(), init, reflectdata.TypePtr(t), r.X, key, z)
} }
// mapaccess2* returns a typed bool, but due to spec changes, // mapaccess2* returns a typed bool, but due to spec changes,
@ -915,7 +916,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node {
// order.stmt made sure key is addressable. // order.stmt made sure key is addressable.
key = typecheck.NodAddr(key) key = typecheck.NodAddr(key)
} }
return mkcall1(mapfndel(mapdelete[fast], t), nil, init, typename(t), map_, key) return mkcall1(mapfndel(mapdelete[fast], t), nil, init, reflectdata.TypePtr(t), map_, key)
case ir.OAS2DOTTYPE: case ir.OAS2DOTTYPE:
n := n.(*ir.AssignListStmt) n := n.(*ir.AssignListStmt)
@ -937,9 +938,9 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node {
// typeword generates the type word of the interface value. // typeword generates the type word of the interface value.
typeword := func() ir.Node { typeword := func() ir.Node {
if toType.IsEmptyInterface() { if toType.IsEmptyInterface() {
return typename(fromType) return reflectdata.TypePtr(fromType)
} }
return itabname(fromType, toType) return reflectdata.ITabAddr(fromType, toType)
} }
// Optimize convT2E or convT2I as a two-word copy when T is pointer-shaped. // Optimize convT2E or convT2I as a two-word copy when T is pointer-shaped.
@ -1048,7 +1049,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node {
var tab ir.Node var tab ir.Node
if fromType.IsInterface() { if fromType.IsInterface() {
// convI2I // convI2I
tab = typename(toType) tab = reflectdata.TypePtr(toType)
} else { } else {
// convT2x // convT2x
tab = typeword() tab = typeword()
@ -1218,7 +1219,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node {
// order.expr made sure key is addressable. // order.expr made sure key is addressable.
key = typecheck.NodAddr(key) key = typecheck.NodAddr(key)
} }
call = mkcall1(mapfn(mapassign[fast], t), nil, init, typename(t), map_, key) call = mkcall1(mapfn(mapassign[fast], t), nil, init, reflectdata.TypePtr(t), map_, key)
} else { } else {
// m[k] is not the target of an assignment. // m[k] is not the target of an assignment.
fast := mapfast(t) fast := mapfast(t)
@ -1229,10 +1230,10 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node {
} }
if w := t.Elem().Width; w <= zeroValSize { if w := t.Elem().Width; w <= zeroValSize {
call = mkcall1(mapfn(mapaccess1[fast], t), types.NewPtr(t.Elem()), init, typename(t), map_, key) call = mkcall1(mapfn(mapaccess1[fast], t), types.NewPtr(t.Elem()), init, reflectdata.TypePtr(t), map_, key)
} else { } else {
z := zeroaddr(w) z := reflectdata.ZeroAddr(w)
call = mkcall1(mapfn("mapaccess1_fat", t), types.NewPtr(t.Elem()), init, typename(t), map_, key, z) call = mkcall1(mapfn("mapaccess1_fat", t), types.NewPtr(t.Elem()), init, reflectdata.TypePtr(t), map_, key, z)
} }
} }
call.SetType(types.NewPtr(t.Elem())) call.SetType(types.NewPtr(t.Elem()))
@ -1340,12 +1341,12 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node {
argtype = types.Types[types.TINT] argtype = types.Types[types.TINT]
} }
return mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, typename(n.Type()), typecheck.Conv(size, argtype)) return mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, reflectdata.TypePtr(n.Type()), typecheck.Conv(size, argtype))
case ir.OMAKEMAP: case ir.OMAKEMAP:
n := n.(*ir.MakeExpr) n := n.(*ir.MakeExpr)
t := n.Type() t := n.Type()
hmapType := hmap(t) hmapType := reflectdata.MapType(t)
hint := n.Len hint := n.Len
// var h *hmap // var h *hmap
@ -1365,7 +1366,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node {
// Maximum key and elem size is 128 bytes, larger objects // Maximum key and elem size is 128 bytes, larger objects
// are stored with an indirection. So max bucket size is 2048+eps. // are stored with an indirection. So max bucket size is 2048+eps.
if !ir.IsConst(hint, constant.Int) || if !ir.IsConst(hint, constant.Int) ||
constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(BUCKETSIZE)) { constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(reflectdata.BUCKETSIZE)) {
// In case hint is larger than BUCKETSIZE runtime.makemap // In case hint is larger than BUCKETSIZE runtime.makemap
// will allocate the buckets on the heap, see #20184 // will allocate the buckets on the heap, see #20184
@ -1376,11 +1377,11 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node {
// h.buckets = b // h.buckets = b
// } // }
nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(BUCKETSIZE)), nil, nil) nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(reflectdata.BUCKETSIZE)), nil, nil)
nif.Likely = true nif.Likely = true
// var bv bmap // var bv bmap
bv := typecheck.Temp(bmap(t)) bv := typecheck.Temp(reflectdata.MapBucketType(t))
nif.Body.Append(ir.NewAssignStmt(base.Pos, bv, nil)) nif.Body.Append(ir.NewAssignStmt(base.Pos, bv, nil))
// b = &bv // b = &bv
@ -1394,7 +1395,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node {
} }
} }
if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(BUCKETSIZE)) { if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(reflectdata.BUCKETSIZE)) {
// Handling make(map[any]any) and // Handling make(map[any]any) and
// make(map[any]any, hint) where hint <= BUCKETSIZE // make(map[any]any, hint) where hint <= BUCKETSIZE
// special allows for faster map initialization and // special allows for faster map initialization and
@ -1442,7 +1443,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node {
fn := typecheck.LookupRuntime(fnname) fn := typecheck.LookupRuntime(fnname)
fn = typecheck.SubstArgTypes(fn, hmapType, t.Key(), t.Elem()) fn = typecheck.SubstArgTypes(fn, hmapType, t.Key(), t.Elem())
return mkcall1(fn, n.Type(), init, typename(n.Type()), typecheck.Conv(hint, argtype), h) return mkcall1(fn, n.Type(), init, reflectdata.TypePtr(n.Type()), typecheck.Conv(hint, argtype), h)
case ir.OMAKESLICE: case ir.OMAKESLICE:
n := n.(*ir.MakeExpr) n := n.(*ir.MakeExpr)
@ -1511,7 +1512,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node {
m.SetType(t) m.SetType(t)
fn := typecheck.LookupRuntime(fnname) fn := typecheck.LookupRuntime(fnname)
m.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), typecheck.Conv(len, argtype), typecheck.Conv(cap, argtype)) m.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), typecheck.Conv(len, argtype), typecheck.Conv(cap, argtype))
m.Ptr.MarkNonNil() m.Ptr.MarkNonNil()
m.LenCap = []ir.Node{typecheck.Conv(len, types.Types[types.TINT]), typecheck.Conv(cap, types.Types[types.TINT])} m.LenCap = []ir.Node{typecheck.Conv(len, types.Types[types.TINT]), typecheck.Conv(cap, types.Types[types.TINT])}
return walkexpr(typecheck.Expr(m), init) return walkexpr(typecheck.Expr(m), init)
@ -1565,7 +1566,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node {
// instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer // instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer
fn := typecheck.LookupRuntime("makeslicecopy") fn := typecheck.LookupRuntime("makeslicecopy")
s := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil) s := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil)
s.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), length, copylen, typecheck.Conv(copyptr, types.Types[types.TUNSAFEPTR])) s.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), length, copylen, typecheck.Conv(copyptr, types.Types[types.TUNSAFEPTR]))
s.Ptr.MarkNonNil() s.Ptr.MarkNonNil()
s.LenCap = []ir.Node{length, length} s.LenCap = []ir.Node{length, length}
s.SetType(t) s.SetType(t)
@ -1709,7 +1710,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node {
// markTypeUsedInInterface marks that type t is converted to an interface. // markTypeUsedInInterface marks that type t is converted to an interface.
// This information is used in the linker in dead method elimination. // This information is used in the linker in dead method elimination.
func markTypeUsedInInterface(t *types.Type, from *obj.LSym) { func markTypeUsedInInterface(t *types.Type, from *obj.LSym) {
tsym := typenamesym(t).Linksym() tsym := reflectdata.TypeSym(t).Linksym()
// Emit a marker relocation. The linker will know the type is converted // Emit a marker relocation. The linker will know the type is converted
// to an interface if "from" is reachable. // to an interface if "from" is reachable.
r := obj.Addrel(from) r := obj.Addrel(from)
@ -1722,13 +1723,13 @@ func markTypeUsedInInterface(t *types.Type, from *obj.LSym) {
func markUsedIfaceMethod(n *ir.CallExpr) { func markUsedIfaceMethod(n *ir.CallExpr) {
dot := n.X.(*ir.SelectorExpr) dot := n.X.(*ir.SelectorExpr)
ityp := dot.X.Type() ityp := dot.X.Type()
tsym := typenamesym(ityp).Linksym() tsym := reflectdata.TypeSym(ityp).Linksym()
r := obj.Addrel(ir.CurFunc.LSym) r := obj.Addrel(ir.CurFunc.LSym)
r.Sym = tsym r.Sym = tsym
// dot.Xoffset is the method index * Widthptr (the offset of code pointer // dot.Xoffset is the method index * Widthptr (the offset of code pointer
// in itab). // in itab).
midx := dot.Offset / int64(types.PtrSize) midx := dot.Offset / int64(types.PtrSize)
r.Add = ifaceMethodOffset(ityp, midx) r.Add = reflectdata.InterfaceMethodOffset(ityp, midx)
r.Type = objabi.R_USEIFACEMETHOD r.Type = objabi.R_USEIFACEMETHOD
} }
@ -2095,7 +2096,7 @@ func walkprint(nn *ir.CallExpr, init *ir.Nodes) ir.Node {
func callnew(t *types.Type) ir.Node { func callnew(t *types.Type) ir.Node {
types.CalcSize(t) types.CalcSize(t)
n := ir.NewUnaryExpr(base.Pos, ir.ONEWOBJ, typename(t)) n := ir.NewUnaryExpr(base.Pos, ir.ONEWOBJ, reflectdata.TypePtr(t))
n.SetType(types.NewPtr(t)) n.SetType(types.NewPtr(t))
n.SetTypecheck(1) n.SetTypecheck(1)
n.MarkNonNil() n.MarkNonNil()
@ -2589,7 +2590,7 @@ func mapfast(t *types.Type) int {
if t.Elem().Width > 128 { if t.Elem().Width > 128 {
return mapslow return mapslow
} }
switch algtype(t.Key()) { switch reflectdata.AlgType(t.Key()) {
case types.AMEM32: case types.AMEM32:
if !t.Key().HasPointers() { if !t.Key().HasPointers() {
return mapfast32 return mapfast32
@ -2733,7 +2734,7 @@ func appendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
fn = typecheck.SubstArgTypes(fn, elemtype, elemtype) fn = typecheck.SubstArgTypes(fn, elemtype, elemtype)
// s = growslice(T, s, n) // s = growslice(T, s, n)
nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn))} nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), reflectdata.TypePtr(elemtype), s, nn))}
nodes.Append(nif) nodes.Append(nif)
// s = s[:n] // s = s[:n]
@ -2756,7 +2757,7 @@ func appendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
fn = typecheck.SubstArgTypes(fn, l1.Type().Elem(), l2.Type().Elem()) fn = typecheck.SubstArgTypes(fn, l1.Type().Elem(), l2.Type().Elem())
ptr1, len1 := backingArrayPtrLen(cheapexpr(slice, &nodes)) ptr1, len1 := backingArrayPtrLen(cheapexpr(slice, &nodes))
ptr2, len2 := backingArrayPtrLen(l2) ptr2, len2 := backingArrayPtrLen(l2)
ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, typename(elemtype), ptr1, len1, ptr2, len2) ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, reflectdata.TypePtr(elemtype), ptr1, len1, ptr2, len2)
} else if base.Flag.Cfg.Instrumenting && !base.Flag.CompilingRuntime { } else if base.Flag.Cfg.Instrumenting && !base.Flag.CompilingRuntime {
// rely on runtime to instrument: // rely on runtime to instrument:
// copy(s[len(l1):], l2) // copy(s[len(l1):], l2)
@ -2903,7 +2904,7 @@ func extendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
fn = typecheck.SubstArgTypes(fn, elemtype, elemtype) fn = typecheck.SubstArgTypes(fn, elemtype, elemtype)
// s = growslice(T, s, n) // s = growslice(T, s, n)
nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn))} nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), reflectdata.TypePtr(elemtype), s, nn))}
nodes = append(nodes, nif) nodes = append(nodes, nif)
// s = s[:n] // s = s[:n]
@ -3025,7 +3026,7 @@ func walkappend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node {
fn := typecheck.LookupRuntime("growslice") // growslice(<type>, old []T, mincap int) (ret []T) fn := typecheck.LookupRuntime("growslice") // growslice(<type>, old []T, mincap int) (ret []T)
fn = typecheck.SubstArgTypes(fn, ns.Type().Elem(), ns.Type().Elem()) fn = typecheck.SubstArgTypes(fn, ns.Type().Elem(), ns.Type().Elem())
nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, ns, mkcall1(fn, ns.Type(), nif.PtrInit(), typename(ns.Type().Elem()), ns, nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, ns, mkcall1(fn, ns.Type(), nif.PtrInit(), reflectdata.TypePtr(ns.Type().Elem()), ns,
ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns), na)))} ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns), na)))}
l = append(l, nif) l = append(l, nif)
@ -3073,7 +3074,7 @@ func copyany(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node {
ptrL, lenL := backingArrayPtrLen(n.X) ptrL, lenL := backingArrayPtrLen(n.X)
n.Y = cheapexpr(n.Y, init) n.Y = cheapexpr(n.Y, init)
ptrR, lenR := backingArrayPtrLen(n.Y) ptrR, lenR := backingArrayPtrLen(n.Y)
return mkcall1(fn, n.Type(), init, typename(n.X.Type().Elem()), ptrL, lenL, ptrR, lenR) return mkcall1(fn, n.Type(), init, reflectdata.TypePtr(n.X.Type().Elem()), ptrL, lenL, ptrR, lenR)
} }
if runtimecall { if runtimecall {
@ -3146,7 +3147,7 @@ func eqfor(t *types.Type) (n ir.Node, needsize bool) {
n = typecheck.SubstArgTypes(n, t, t) n = typecheck.SubstArgTypes(n, t, t)
return n, true return n, true
case types.ASPECIAL: case types.ASPECIAL:
sym := typesymprefix(".eq", t) sym := reflectdata.TypeSymPrefix(".eq", t)
n := typecheck.NewName(sym) n := typecheck.NewName(sym)
ir.MarkFunc(n) ir.MarkFunc(n)
n.SetType(typecheck.NewFuncType(nil, []*ir.Field{ n.SetType(typecheck.NewFuncType(nil, []*ir.Field{
@ -3200,7 +3201,7 @@ func walkcompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
// l.tab != nil && l.tab._type == type(r) // l.tab != nil && l.tab._type == type(r)
var eqtype ir.Node var eqtype ir.Node
tab := ir.NewUnaryExpr(base.Pos, ir.OITAB, l) tab := ir.NewUnaryExpr(base.Pos, ir.OITAB, l)
rtyp := typename(r.Type()) rtyp := reflectdata.TypePtr(r.Type())
if l.Type().IsEmptyInterface() { if l.Type().IsEmptyInterface() {
tab.SetType(types.NewPtr(types.Types[types.TUINT8])) tab.SetType(types.NewPtr(types.Types[types.TUINT8]))
tab.SetTypecheck(1) tab.SetTypecheck(1)
@ -3424,7 +3425,7 @@ func tracecmpArg(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node {
func walkcompareInterface(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { func walkcompareInterface(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
n.Y = cheapexpr(n.Y, init) n.Y = cheapexpr(n.Y, init)
n.X = cheapexpr(n.X, init) n.X = cheapexpr(n.X, init)
eqtab, eqdata := eqinterface(n.X, n.Y) eqtab, eqdata := reflectdata.EqInterface(n.X, n.Y)
var cmp ir.Node var cmp ir.Node
if n.Op() == ir.OEQ { if n.Op() == ir.OEQ {
cmp = ir.NewLogicalExpr(base.Pos, ir.OANDAND, eqtab, eqdata) cmp = ir.NewLogicalExpr(base.Pos, ir.OANDAND, eqtab, eqdata)
@ -3538,7 +3539,7 @@ func walkcompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
// prepare for rewrite below // prepare for rewrite below
n.X = cheapexpr(n.X, init) n.X = cheapexpr(n.X, init)
n.Y = cheapexpr(n.Y, init) n.Y = cheapexpr(n.Y, init)
eqlen, eqmem := eqstring(n.X, n.Y) eqlen, eqmem := reflectdata.EqString(n.X, n.Y)
// quick check of len before full compare for == or !=. // quick check of len before full compare for == or !=.
// memequal then tests equality up to length len. // memequal then tests equality up to length len.
if n.Op() == ir.OEQ { if n.Op() == ir.OEQ {
@ -3728,7 +3729,7 @@ func usefield(n *ir.SelectorExpr) {
base.Errorf("tracked field must be exported (upper case)") base.Errorf("tracked field must be exported (upper case)")
} }
sym := tracksym(outer, field) sym := reflectdata.TrackSym(outer, field)
if ir.CurFunc.FieldTrack == nil { if ir.CurFunc.FieldTrack == nil {
ir.CurFunc.FieldTrack = make(map[*types.Sym]struct{}) ir.CurFunc.FieldTrack = make(map[*types.Sym]struct{})
} }
@ -3946,7 +3947,7 @@ func walkCheckPtrAlignment(n *ir.ConvExpr, init *ir.Nodes, count ir.Node) ir.Nod
} }
n.X = cheapexpr(n.X, init) n.X = cheapexpr(n.X, init)
init.Append(mkcall("checkptrAlignment", nil, init, typecheck.ConvNop(n.X, types.Types[types.TUNSAFEPTR]), typename(elem), typecheck.Conv(count, types.Types[types.TUINTPTR]))) init.Append(mkcall("checkptrAlignment", nil, init, typecheck.ConvNop(n.X, types.Types[types.TUNSAFEPTR]), reflectdata.TypePtr(elem), typecheck.Conv(count, types.Types[types.TUINTPTR])))
return n return n
} }

View file

@ -2,38 +2,39 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package gc package reflectdata
import ( import (
"fmt"
"sort"
"cmd/compile/internal/base" "cmd/compile/internal/base"
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
"cmd/compile/internal/objw" "cmd/compile/internal/objw"
"cmd/compile/internal/typecheck" "cmd/compile/internal/typecheck"
"cmd/compile/internal/types" "cmd/compile/internal/types"
"cmd/internal/obj" "cmd/internal/obj"
"fmt"
"sort"
) )
// IsRegularMemory reports whether t can be compared/hashed as regular memory. // isRegularMemory reports whether t can be compared/hashed as regular memory.
func IsRegularMemory(t *types.Type) bool { func isRegularMemory(t *types.Type) bool {
a, _ := types.AlgType(t) a, _ := types.AlgType(t)
return a == types.AMEM return a == types.AMEM
} }
// EqCanPanic reports whether == on type t could panic (has an interface somewhere). // eqCanPanic reports whether == on type t could panic (has an interface somewhere).
// t must be comparable. // t must be comparable.
func EqCanPanic(t *types.Type) bool { func eqCanPanic(t *types.Type) bool {
switch t.Kind() { switch t.Kind() {
default: default:
return false return false
case types.TINTER: case types.TINTER:
return true return true
case types.TARRAY: case types.TARRAY:
return EqCanPanic(t.Elem()) return eqCanPanic(t.Elem())
case types.TSTRUCT: case types.TSTRUCT:
for _, f := range t.FieldSlice() { for _, f := range t.FieldSlice() {
if !f.Sym.IsBlank() && EqCanPanic(f.Type) { if !f.Sym.IsBlank() && eqCanPanic(f.Type) {
return true return true
} }
} }
@ -41,9 +42,9 @@ func EqCanPanic(t *types.Type) bool {
} }
} }
// algtype is like algtype1, except it returns the fixed-width AMEMxx variants // AlgType is like algtype1, except it returns the fixed-width AMEMxx variants
// instead of the general AMEM kind when possible. // instead of the general AMEM kind when possible.
func algtype(t *types.Type) types.AlgKind { func AlgType(t *types.Type) types.AlgKind {
a, _ := types.AlgType(t) a, _ := types.AlgType(t)
if a == types.AMEM { if a == types.AMEM {
switch t.Width { switch t.Width {
@ -69,7 +70,7 @@ func algtype(t *types.Type) types.AlgKind {
// the hash of a value of type t. // the hash of a value of type t.
// Note: the generated function must match runtime.typehash exactly. // Note: the generated function must match runtime.typehash exactly.
func genhash(t *types.Type) *obj.LSym { func genhash(t *types.Type) *obj.LSym {
switch algtype(t) { switch AlgType(t) {
default: default:
// genhash is only called for types that have equality // genhash is only called for types that have equality
base.Fatalf("genhash %v", t) base.Fatalf("genhash %v", t)
@ -119,7 +120,7 @@ func genhash(t *types.Type) *obj.LSym {
break break
} }
closure := typesymprefix(".hashfunc", t).Linksym() closure := TypeSymPrefix(".hashfunc", t).Linksym()
if len(closure.P) > 0 { // already generated if len(closure.P) > 0 { // already generated
return closure return closure
} }
@ -139,7 +140,7 @@ func genhash(t *types.Type) *obj.LSym {
} }
} }
sym := typesymprefix(".hash", t) sym := TypeSymPrefix(".hash", t)
if base.Flag.LowerR != 0 { if base.Flag.LowerR != 0 {
fmt.Printf("genhash %v %v %v\n", closure, sym, t) fmt.Printf("genhash %v %v %v\n", closure, sym, t)
} }
@ -199,7 +200,7 @@ func genhash(t *types.Type) *obj.LSym {
} }
// Hash non-memory fields with appropriate hash function. // Hash non-memory fields with appropriate hash function.
if !IsRegularMemory(f.Type) { if !isRegularMemory(f.Type) {
hashel := hashfor(f.Type) hashel := hashfor(f.Type)
call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil) call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil)
nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym) // TODO: fields from other packages? nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym) // TODO: fields from other packages?
@ -283,7 +284,7 @@ func hashfor(t *types.Type) ir.Node {
default: default:
// Note: the caller of hashfor ensured that this symbol // Note: the caller of hashfor ensured that this symbol
// exists and has a body by calling genhash for t. // exists and has a body by calling genhash for t.
sym = typesymprefix(".hash", t) sym = TypeSymPrefix(".hash", t)
} }
n := typecheck.NewName(sym) n := typecheck.NewName(sym)
@ -312,7 +313,7 @@ func sysClosure(name string) *obj.LSym {
// geneq returns a symbol which is the closure used to compute // geneq returns a symbol which is the closure used to compute
// equality for two objects of type t. // equality for two objects of type t.
func geneq(t *types.Type) *obj.LSym { func geneq(t *types.Type) *obj.LSym {
switch algtype(t) { switch AlgType(t) {
case types.ANOEQ: case types.ANOEQ:
// The runtime will panic if it tries to compare // The runtime will panic if it tries to compare
// a type with a nil equality function. // a type with a nil equality function.
@ -362,11 +363,11 @@ func geneq(t *types.Type) *obj.LSym {
break break
} }
closure := typesymprefix(".eqfunc", t).Linksym() closure := TypeSymPrefix(".eqfunc", t).Linksym()
if len(closure.P) > 0 { // already generated if len(closure.P) > 0 { // already generated
return closure return closure
} }
sym := typesymprefix(".eq", t) sym := TypeSymPrefix(".eq", t)
if base.Flag.LowerR != 0 { if base.Flag.LowerR != 0 {
fmt.Printf("geneq %v\n", t) fmt.Printf("geneq %v\n", t)
} }
@ -476,12 +477,12 @@ func geneq(t *types.Type) *obj.LSym {
// TODO: when the array size is small, unroll the length match checks. // TODO: when the array size is small, unroll the length match checks.
checkAll(3, false, func(pi, qi ir.Node) ir.Node { checkAll(3, false, func(pi, qi ir.Node) ir.Node {
// Compare lengths. // Compare lengths.
eqlen, _ := eqstring(pi, qi) eqlen, _ := EqString(pi, qi)
return eqlen return eqlen
}) })
checkAll(1, true, func(pi, qi ir.Node) ir.Node { checkAll(1, true, func(pi, qi ir.Node) ir.Node {
// Compare contents. // Compare contents.
_, eqmem := eqstring(pi, qi) _, eqmem := EqString(pi, qi)
return eqmem return eqmem
}) })
case types.TFLOAT32, types.TFLOAT64: case types.TFLOAT32, types.TFLOAT64:
@ -520,8 +521,8 @@ func geneq(t *types.Type) *obj.LSym {
} }
// Compare non-memory fields with field equality. // Compare non-memory fields with field equality.
if !IsRegularMemory(f.Type) { if !isRegularMemory(f.Type) {
if EqCanPanic(f.Type) { if eqCanPanic(f.Type) {
// Enforce ordering by starting a new set of reorderable conditions. // Enforce ordering by starting a new set of reorderable conditions.
conds = append(conds, []ir.Node{}) conds = append(conds, []ir.Node{})
} }
@ -529,13 +530,13 @@ func geneq(t *types.Type) *obj.LSym {
q := ir.NewSelectorExpr(base.Pos, ir.OXDOT, nq, f.Sym) q := ir.NewSelectorExpr(base.Pos, ir.OXDOT, nq, f.Sym)
switch { switch {
case f.Type.IsString(): case f.Type.IsString():
eqlen, eqmem := eqstring(p, q) eqlen, eqmem := EqString(p, q)
and(eqlen) and(eqlen)
and(eqmem) and(eqmem)
default: default:
and(ir.NewBinaryExpr(base.Pos, ir.OEQ, p, q)) and(ir.NewBinaryExpr(base.Pos, ir.OEQ, p, q))
} }
if EqCanPanic(f.Type) { if eqCanPanic(f.Type) {
// Also enforce ordering after something that can panic. // Also enforce ordering after something that can panic.
conds = append(conds, []ir.Node{}) conds = append(conds, []ir.Node{})
} }
@ -597,7 +598,7 @@ func geneq(t *types.Type) *obj.LSym {
// return (or goto ret) // return (or goto ret)
fn.Body.Append(ir.NewLabelStmt(base.Pos, neq)) fn.Body.Append(ir.NewLabelStmt(base.Pos, neq))
fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(false))) fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(false)))
if EqCanPanic(t) || anyCall(fn) { if eqCanPanic(t) || anyCall(fn) {
// Epilogue is large, so share it with the equal case. // Epilogue is large, so share it with the equal case.
fn.Body.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, ret)) fn.Body.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, ret))
} else { } else {
@ -655,13 +656,13 @@ func eqfield(p ir.Node, q ir.Node, field *types.Sym) ir.Node {
return ne return ne
} }
// eqstring returns the nodes // EqString returns the nodes
// len(s) == len(t) // len(s) == len(t)
// and // and
// memequal(s.ptr, t.ptr, len(s)) // memequal(s.ptr, t.ptr, len(s))
// which can be used to construct string equality comparison. // which can be used to construct string equality comparison.
// eqlen must be evaluated before eqmem, and shortcircuiting is required. // eqlen must be evaluated before eqmem, and shortcircuiting is required.
func eqstring(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) { func EqString(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) {
s = typecheck.Conv(s, types.Types[types.TSTRING]) s = typecheck.Conv(s, types.Types[types.TSTRING])
t = typecheck.Conv(t, types.Types[types.TSTRING]) t = typecheck.Conv(t, types.Types[types.TSTRING])
sptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, s) sptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, s)
@ -680,13 +681,13 @@ func eqstring(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) {
return cmp, call return cmp, call
} }
// eqinterface returns the nodes // EqInterface returns the nodes
// s.tab == t.tab (or s.typ == t.typ, as appropriate) // s.tab == t.tab (or s.typ == t.typ, as appropriate)
// and // and
// ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate) // ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate)
// which can be used to construct interface equality comparison. // which can be used to construct interface equality comparison.
// eqtab must be evaluated before eqdata, and shortcircuiting is required. // eqtab must be evaluated before eqdata, and shortcircuiting is required.
func eqinterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) { func EqInterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) {
if !types.Identical(s.Type(), t.Type()) { if !types.Identical(s.Type(), t.Type()) {
base.Fatalf("eqinterface %v %v", s.Type(), t.Type()) base.Fatalf("eqinterface %v %v", s.Type(), t.Type())
} }
@ -764,9 +765,24 @@ func memrun(t *types.Type, start int) (size int64, next int) {
break break
} }
// Also, stop before a blank or non-memory field. // Also, stop before a blank or non-memory field.
if f := t.Field(next); f.Sym.IsBlank() || !IsRegularMemory(f.Type) { if f := t.Field(next); f.Sym.IsBlank() || !isRegularMemory(f.Type) {
break break
} }
} }
return t.Field(next-1).End() - t.Field(start).Offset, next return t.Field(next-1).End() - t.Field(start).Offset, next
} }
func hashmem(t *types.Type) ir.Node {
sym := ir.Pkgs.Runtime.Lookup("memhash")
n := typecheck.NewName(sym)
ir.MarkFunc(n)
n.SetType(typecheck.NewFuncType(nil, []*ir.Field{
ir.NewField(base.Pos, nil, nil, types.NewPtr(t)),
ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]),
ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]),
}, []*ir.Field{
ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]),
}))
return n
}

View file

@ -2,11 +2,19 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package gc package reflectdata
import ( import (
"fmt"
"os"
"sort"
"strings"
"sync"
"cmd/compile/internal/base" "cmd/compile/internal/base"
"cmd/compile/internal/bitvec" "cmd/compile/internal/bitvec"
"cmd/compile/internal/escape"
"cmd/compile/internal/inline"
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
"cmd/compile/internal/liveness" "cmd/compile/internal/liveness"
"cmd/compile/internal/objw" "cmd/compile/internal/objw"
@ -16,11 +24,6 @@ import (
"cmd/internal/obj" "cmd/internal/obj"
"cmd/internal/objabi" "cmd/internal/objabi"
"cmd/internal/src" "cmd/internal/src"
"fmt"
"os"
"sort"
"strings"
"sync"
) )
type itabEntry struct { type itabEntry struct {
@ -52,7 +55,7 @@ var (
ptabs []ptabEntry ptabs []ptabEntry
) )
type Sig struct { type typeSig struct {
name *types.Sym name *types.Sym
isym *types.Sym isym *types.Sym
tsym *types.Sym tsym *types.Sym
@ -87,8 +90,8 @@ func makefield(name string, t *types.Type) *types.Field {
return types.NewField(src.NoXPos, sym, t) return types.NewField(src.NoXPos, sym, t)
} }
// bmap makes the map bucket type given the type of the map. // MapBucketType makes the map bucket type given the type of the map.
func bmap(t *types.Type) *types.Type { func MapBucketType(t *types.Type) *types.Type {
if t.MapType().Bucket != nil { if t.MapType().Bucket != nil {
return t.MapType().Bucket return t.MapType().Bucket
} }
@ -194,14 +197,14 @@ func bmap(t *types.Type) *types.Type {
return bucket return bucket
} }
// hmap builds a type representing a Hmap structure for the given map type. // MapType builds a type representing a Hmap structure for the given map type.
// Make sure this stays in sync with runtime/map.go. // Make sure this stays in sync with runtime/map.go.
func hmap(t *types.Type) *types.Type { func MapType(t *types.Type) *types.Type {
if t.MapType().Hmap != nil { if t.MapType().Hmap != nil {
return t.MapType().Hmap return t.MapType().Hmap
} }
bmap := bmap(t) bmap := MapBucketType(t)
// build a struct: // build a struct:
// type hmap struct { // type hmap struct {
@ -243,15 +246,15 @@ func hmap(t *types.Type) *types.Type {
return hmap return hmap
} }
// hiter builds a type representing an Hiter structure for the given map type. // MapIterType builds a type representing an Hiter structure for the given map type.
// Make sure this stays in sync with runtime/map.go. // Make sure this stays in sync with runtime/map.go.
func hiter(t *types.Type) *types.Type { func MapIterType(t *types.Type) *types.Type {
if t.MapType().Hiter != nil { if t.MapType().Hiter != nil {
return t.MapType().Hiter return t.MapType().Hiter
} }
hmap := hmap(t) hmap := MapType(t)
bmap := bmap(t) bmap := MapBucketType(t)
// build a struct: // build a struct:
// type hiter struct { // type hiter struct {
@ -302,50 +305,9 @@ func hiter(t *types.Type) *types.Type {
return hiter return hiter
} }
// deferstruct makes a runtime._defer structure, with additional space for
// stksize bytes of args.
func deferstruct(stksize int64) *types.Type {
makefield := func(name string, typ *types.Type) *types.Field {
// Unlike the global makefield function, this one needs to set Pkg
// because these types might be compared (in SSA CSE sorting).
// TODO: unify this makefield and the global one above.
sym := &types.Sym{Name: name, Pkg: types.LocalPkg}
return types.NewField(src.NoXPos, sym, typ)
}
argtype := types.NewArray(types.Types[types.TUINT8], stksize)
argtype.Width = stksize
argtype.Align = 1
// These fields must match the ones in runtime/runtime2.go:_defer and
// cmd/compile/internal/gc/ssa.go:(*state).call.
fields := []*types.Field{
makefield("siz", types.Types[types.TUINT32]),
makefield("started", types.Types[types.TBOOL]),
makefield("heap", types.Types[types.TBOOL]),
makefield("openDefer", types.Types[types.TBOOL]),
makefield("sp", types.Types[types.TUINTPTR]),
makefield("pc", types.Types[types.TUINTPTR]),
// Note: the types here don't really matter. Defer structures
// are always scanned explicitly during stack copying and GC,
// so we make them uintptr type even though they are real pointers.
makefield("fn", types.Types[types.TUINTPTR]),
makefield("_panic", types.Types[types.TUINTPTR]),
makefield("link", types.Types[types.TUINTPTR]),
makefield("framepc", types.Types[types.TUINTPTR]),
makefield("varp", types.Types[types.TUINTPTR]),
makefield("fd", types.Types[types.TUINTPTR]),
makefield("args", argtype),
}
// build struct holding the above fields
s := types.NewStruct(types.NoPkg, fields)
s.SetNoalg(true)
types.CalcStructSize(s)
return s
}
// methods returns the methods of the non-interface type t, sorted by name. // methods returns the methods of the non-interface type t, sorted by name.
// Generates stub functions as needed. // Generates stub functions as needed.
func methods(t *types.Type) []*Sig { func methods(t *types.Type) []*typeSig {
// method type // method type
mt := types.ReceiverBaseType(t) mt := types.ReceiverBaseType(t)
@ -363,7 +325,7 @@ func methods(t *types.Type) []*Sig {
// make list of methods for t, // make list of methods for t,
// generating code if necessary. // generating code if necessary.
var ms []*Sig var ms []*typeSig
for _, f := range mt.AllMethods().Slice() { for _, f := range mt.AllMethods().Slice() {
if !f.IsMethod() { if !f.IsMethod() {
base.Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f) base.Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f)
@ -388,7 +350,7 @@ func methods(t *types.Type) []*Sig {
continue continue
} }
sig := &Sig{ sig := &typeSig{
name: method, name: method,
isym: ir.MethodSym(it, method), isym: ir.MethodSym(it, method),
tsym: ir.MethodSym(t, method), tsym: ir.MethodSym(t, method),
@ -418,8 +380,8 @@ func methods(t *types.Type) []*Sig {
} }
// imethods returns the methods of the interface type t, sorted by name. // imethods returns the methods of the interface type t, sorted by name.
func imethods(t *types.Type) []*Sig { func imethods(t *types.Type) []*typeSig {
var methods []*Sig var methods []*typeSig
for _, f := range t.Fields().Slice() { for _, f := range t.Fields().Slice() {
if f.Type.Kind() != types.TFUNC || f.Sym == nil { if f.Type.Kind() != types.TFUNC || f.Sym == nil {
continue continue
@ -434,7 +396,7 @@ func imethods(t *types.Type) []*Sig {
} }
} }
sig := &Sig{ sig := &typeSig{
name: f.Sym, name: f.Sym,
mtype: f.Type, mtype: f.Type,
type_: typecheck.NewMethodType(f.Type, nil), type_: typecheck.NewMethodType(f.Type, nil),
@ -622,7 +584,7 @@ func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int {
} }
for _, a := range m { for _, a := range m {
dtypesym(a.type_) WriteType(a.type_)
} }
ot = dgopkgpathOff(lsym, ot, typePkg(t)) ot = dgopkgpathOff(lsym, ot, typePkg(t))
@ -673,7 +635,7 @@ func dextratypeData(lsym *obj.LSym, ot int, t *types.Type) int {
nsym := dname(a.name.Name, "", pkg, exported) nsym := dname(a.name.Name, "", pkg, exported)
ot = objw.SymPtrOff(lsym, ot, nsym) ot = objw.SymPtrOff(lsym, ot, nsym)
ot = dmethodptrOff(lsym, ot, dtypesym(a.mtype)) ot = dmethodptrOff(lsym, ot, WriteType(a.mtype))
ot = dmethodptrOff(lsym, ot, a.isym.Linksym()) ot = dmethodptrOff(lsym, ot, a.isym.Linksym())
ot = dmethodptrOff(lsym, ot, a.tsym.Linksym()) ot = dmethodptrOff(lsym, ot, a.tsym.Linksym())
} }
@ -750,7 +712,7 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int {
if t.Sym() != nil || methods(tptr) != nil { if t.Sym() != nil || methods(tptr) != nil {
sptrWeak = false sptrWeak = false
} }
sptr = dtypesym(tptr) sptr = WriteType(tptr)
} }
gcsym, useGCProg, ptrdata := dgcsym(t) gcsym, useGCProg, ptrdata := dgcsym(t)
@ -782,7 +744,7 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int {
if t.Sym() != nil && t.Sym().Name != "" { if t.Sym() != nil && t.Sym().Name != "" {
tflag |= tflagNamed tflag |= tflagNamed
} }
if IsRegularMemory(t) { if isRegularMemory(t) {
tflag |= tflagRegularMemory tflag |= tflagRegularMemory
} }
@ -848,20 +810,20 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int {
return ot return ot
} }
// tracksym returns the symbol for tracking use of field/method f, assumed // TrackSym returns the symbol for tracking use of field/method f, assumed
// to be a member of struct/interface type t. // to be a member of struct/interface type t.
func tracksym(t *types.Type, f *types.Field) *types.Sym { func TrackSym(t *types.Type, f *types.Field) *types.Sym {
return ir.Pkgs.Track.Lookup(t.ShortString() + "." + f.Sym.Name) return ir.Pkgs.Track.Lookup(t.ShortString() + "." + f.Sym.Name)
} }
func typesymprefix(prefix string, t *types.Type) *types.Sym { func TypeSymPrefix(prefix string, t *types.Type) *types.Sym {
p := prefix + "." + t.ShortString() p := prefix + "." + t.ShortString()
s := types.TypeSymLookup(p) s := types.TypeSymLookup(p)
// This function is for looking up type-related generated functions // This function is for looking up type-related generated functions
// (e.g. eq and hash). Make sure they are indeed generated. // (e.g. eq and hash). Make sure they are indeed generated.
signatmu.Lock() signatmu.Lock()
addsignat(t) NeedRuntimeType(t)
signatmu.Unlock() signatmu.Unlock()
//print("algsym: %s -> %+S\n", p, s); //print("algsym: %s -> %+S\n", p, s);
@ -869,19 +831,19 @@ func typesymprefix(prefix string, t *types.Type) *types.Sym {
return s return s
} }
func typenamesym(t *types.Type) *types.Sym { func TypeSym(t *types.Type) *types.Sym {
if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() { if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() {
base.Fatalf("typenamesym %v", t) base.Fatalf("typenamesym %v", t)
} }
s := types.TypeSym(t) s := types.TypeSym(t)
signatmu.Lock() signatmu.Lock()
addsignat(t) NeedRuntimeType(t)
signatmu.Unlock() signatmu.Unlock()
return s return s
} }
func typename(t *types.Type) *ir.AddrExpr { func TypePtr(t *types.Type) *ir.AddrExpr {
s := typenamesym(t) s := TypeSym(t)
if s.Def == nil { if s.Def == nil {
n := ir.NewNameAt(src.NoXPos, s) n := ir.NewNameAt(src.NoXPos, s)
n.SetType(types.Types[types.TUINT8]) n.SetType(types.Types[types.TUINT8])
@ -896,7 +858,7 @@ func typename(t *types.Type) *ir.AddrExpr {
return n return n
} }
func itabname(t, itype *types.Type) *ir.AddrExpr { func ITabAddr(t, itype *types.Type) *ir.AddrExpr {
if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() { if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() {
base.Fatalf("itabname(%v, %v)", t, itype) base.Fatalf("itabname(%v, %v)", t, itype)
} }
@ -978,7 +940,7 @@ func formalType(t *types.Type) *types.Type {
return t return t
} }
func dtypesym(t *types.Type) *obj.LSym { func WriteType(t *types.Type) *obj.LSym {
t = formalType(t) t = formalType(t)
if t.IsUntyped() { if t.IsUntyped() {
base.Fatalf("dtypesym %v", t) base.Fatalf("dtypesym %v", t)
@ -1028,9 +990,9 @@ func dtypesym(t *types.Type) *obj.LSym {
case types.TARRAY: case types.TARRAY:
// ../../../../runtime/type.go:/arrayType // ../../../../runtime/type.go:/arrayType
s1 := dtypesym(t.Elem()) s1 := WriteType(t.Elem())
t2 := types.NewSlice(t.Elem()) t2 := types.NewSlice(t.Elem())
s2 := dtypesym(t2) s2 := WriteType(t2)
ot = dcommontype(lsym, t) ot = dcommontype(lsym, t)
ot = objw.SymPtr(lsym, ot, s1, 0) ot = objw.SymPtr(lsym, ot, s1, 0)
ot = objw.SymPtr(lsym, ot, s2, 0) ot = objw.SymPtr(lsym, ot, s2, 0)
@ -1039,14 +1001,14 @@ func dtypesym(t *types.Type) *obj.LSym {
case types.TSLICE: case types.TSLICE:
// ../../../../runtime/type.go:/sliceType // ../../../../runtime/type.go:/sliceType
s1 := dtypesym(t.Elem()) s1 := WriteType(t.Elem())
ot = dcommontype(lsym, t) ot = dcommontype(lsym, t)
ot = objw.SymPtr(lsym, ot, s1, 0) ot = objw.SymPtr(lsym, ot, s1, 0)
ot = dextratype(lsym, ot, t, 0) ot = dextratype(lsym, ot, t, 0)
case types.TCHAN: case types.TCHAN:
// ../../../../runtime/type.go:/chanType // ../../../../runtime/type.go:/chanType
s1 := dtypesym(t.Elem()) s1 := WriteType(t.Elem())
ot = dcommontype(lsym, t) ot = dcommontype(lsym, t)
ot = objw.SymPtr(lsym, ot, s1, 0) ot = objw.SymPtr(lsym, ot, s1, 0)
ot = objw.Uintptr(lsym, ot, uint64(t.ChanDir())) ot = objw.Uintptr(lsym, ot, uint64(t.ChanDir()))
@ -1054,15 +1016,15 @@ func dtypesym(t *types.Type) *obj.LSym {
case types.TFUNC: case types.TFUNC:
for _, t1 := range t.Recvs().Fields().Slice() { for _, t1 := range t.Recvs().Fields().Slice() {
dtypesym(t1.Type) WriteType(t1.Type)
} }
isddd := false isddd := false
for _, t1 := range t.Params().Fields().Slice() { for _, t1 := range t.Params().Fields().Slice() {
isddd = t1.IsDDD() isddd = t1.IsDDD()
dtypesym(t1.Type) WriteType(t1.Type)
} }
for _, t1 := range t.Results().Fields().Slice() { for _, t1 := range t.Results().Fields().Slice() {
dtypesym(t1.Type) WriteType(t1.Type)
} }
ot = dcommontype(lsym, t) ot = dcommontype(lsym, t)
@ -1082,20 +1044,20 @@ func dtypesym(t *types.Type) *obj.LSym {
// Array of rtype pointers follows funcType. // Array of rtype pointers follows funcType.
for _, t1 := range t.Recvs().Fields().Slice() { for _, t1 := range t.Recvs().Fields().Slice() {
ot = objw.SymPtr(lsym, ot, dtypesym(t1.Type), 0) ot = objw.SymPtr(lsym, ot, WriteType(t1.Type), 0)
} }
for _, t1 := range t.Params().Fields().Slice() { for _, t1 := range t.Params().Fields().Slice() {
ot = objw.SymPtr(lsym, ot, dtypesym(t1.Type), 0) ot = objw.SymPtr(lsym, ot, WriteType(t1.Type), 0)
} }
for _, t1 := range t.Results().Fields().Slice() { for _, t1 := range t.Results().Fields().Slice() {
ot = objw.SymPtr(lsym, ot, dtypesym(t1.Type), 0) ot = objw.SymPtr(lsym, ot, WriteType(t1.Type), 0)
} }
case types.TINTER: case types.TINTER:
m := imethods(t) m := imethods(t)
n := len(m) n := len(m)
for _, a := range m { for _, a := range m {
dtypesym(a.type_) WriteType(a.type_)
} }
// ../../../../runtime/type.go:/interfaceType // ../../../../runtime/type.go:/interfaceType
@ -1123,14 +1085,14 @@ func dtypesym(t *types.Type) *obj.LSym {
nsym := dname(a.name.Name, "", pkg, exported) nsym := dname(a.name.Name, "", pkg, exported)
ot = objw.SymPtrOff(lsym, ot, nsym) ot = objw.SymPtrOff(lsym, ot, nsym)
ot = objw.SymPtrOff(lsym, ot, dtypesym(a.type_)) ot = objw.SymPtrOff(lsym, ot, WriteType(a.type_))
} }
// ../../../../runtime/type.go:/mapType // ../../../../runtime/type.go:/mapType
case types.TMAP: case types.TMAP:
s1 := dtypesym(t.Key()) s1 := WriteType(t.Key())
s2 := dtypesym(t.Elem()) s2 := WriteType(t.Elem())
s3 := dtypesym(bmap(t)) s3 := WriteType(MapBucketType(t))
hasher := genhash(t.Key()) hasher := genhash(t.Key())
ot = dcommontype(lsym, t) ot = dcommontype(lsym, t)
@ -1154,7 +1116,7 @@ func dtypesym(t *types.Type) *obj.LSym {
} else { } else {
ot = objw.Uint8(lsym, ot, uint8(t.Elem().Width)) ot = objw.Uint8(lsym, ot, uint8(t.Elem().Width))
} }
ot = objw.Uint16(lsym, ot, uint16(bmap(t).Width)) ot = objw.Uint16(lsym, ot, uint16(MapBucketType(t).Width))
if types.IsReflexive(t.Key()) { if types.IsReflexive(t.Key()) {
flags |= 4 // reflexive key flags |= 4 // reflexive key
} }
@ -1177,7 +1139,7 @@ func dtypesym(t *types.Type) *obj.LSym {
} }
// ../../../../runtime/type.go:/ptrType // ../../../../runtime/type.go:/ptrType
s1 := dtypesym(t.Elem()) s1 := WriteType(t.Elem())
ot = dcommontype(lsym, t) ot = dcommontype(lsym, t)
ot = objw.SymPtr(lsym, ot, s1, 0) ot = objw.SymPtr(lsym, ot, s1, 0)
@ -1188,7 +1150,7 @@ func dtypesym(t *types.Type) *obj.LSym {
case types.TSTRUCT: case types.TSTRUCT:
fields := t.Fields().Slice() fields := t.Fields().Slice()
for _, t1 := range fields { for _, t1 := range fields {
dtypesym(t1.Type) WriteType(t1.Type)
} }
// All non-exported struct field names within a struct // All non-exported struct field names within a struct
@ -1216,7 +1178,7 @@ func dtypesym(t *types.Type) *obj.LSym {
for _, f := range fields { for _, f := range fields {
// ../../../../runtime/type.go:/structField // ../../../../runtime/type.go:/structField
ot = dnameField(lsym, ot, spkg, f) ot = dnameField(lsym, ot, spkg, f)
ot = objw.SymPtr(lsym, ot, dtypesym(f.Type), 0) ot = objw.SymPtr(lsym, ot, WriteType(f.Type), 0)
offsetAnon := uint64(f.Offset) << 1 offsetAnon := uint64(f.Offset) << 1
if offsetAnon>>1 != uint64(f.Offset) { if offsetAnon>>1 != uint64(f.Offset) {
base.Fatalf("%v: bad field offset for %s", t, f.Sym.Name) base.Fatalf("%v: bad field offset for %s", t, f.Sym.Name)
@ -1257,9 +1219,9 @@ func dtypesym(t *types.Type) *obj.LSym {
return lsym return lsym
} }
// ifaceMethodOffset returns the offset of the i-th method in the interface // InterfaceMethodOffset returns the offset of the i-th method in the interface
// type descriptor, ityp. // type descriptor, ityp.
func ifaceMethodOffset(ityp *types.Type, i int64) int64 { func InterfaceMethodOffset(ityp *types.Type, i int64) int64 {
// interface type descriptor layout is struct { // interface type descriptor layout is struct {
// _type // commonSize // _type // commonSize
// pkgpath // 1 word // pkgpath // 1 word
@ -1273,7 +1235,7 @@ func ifaceMethodOffset(ityp *types.Type, i int64) int64 {
// for each itabEntry, gather the methods on // for each itabEntry, gather the methods on
// the concrete type that implement the interface // the concrete type that implement the interface
func peekitabs() { func CompileITabs() {
for i := range itabs { for i := range itabs {
tab := &itabs[i] tab := &itabs[i]
methods := genfun(tab.t, tab.itype) methods := genfun(tab.t, tab.itype)
@ -1319,11 +1281,11 @@ func genfun(t, it *types.Type) []*obj.LSym {
return out return out
} }
// itabsym uses the information gathered in // ITabSym uses the information gathered in
// peekitabs to de-virtualize interface methods. // peekitabs to de-virtualize interface methods.
// Since this is called by the SSA backend, it shouldn't // Since this is called by the SSA backend, it shouldn't
// generate additional Nodes, Syms, etc. // generate additional Nodes, Syms, etc.
func itabsym(it *obj.LSym, offset int64) *obj.LSym { func ITabSym(it *obj.LSym, offset int64) *obj.LSym {
var syms []*obj.LSym var syms []*obj.LSym
if it == nil { if it == nil {
return nil return nil
@ -1348,24 +1310,15 @@ func itabsym(it *obj.LSym, offset int64) *obj.LSym {
return syms[methodnum] return syms[methodnum]
} }
// addsignat ensures that a runtime type descriptor is emitted for t. // NeedRuntimeType ensures that a runtime type descriptor is emitted for t.
func addsignat(t *types.Type) { func NeedRuntimeType(t *types.Type) {
if _, ok := signatset[t]; !ok { if _, ok := signatset[t]; !ok {
signatset[t] = struct{}{} signatset[t] = struct{}{}
signatslice = append(signatslice, t) signatslice = append(signatslice, t)
} }
} }
func addsignats(dcls []ir.Node) { func WriteRuntimeTypes() {
// copy types from dcl list to signatset
for _, n := range dcls {
if n.Op() == ir.OTYPE {
addsignat(n.Type())
}
}
}
func dumpsignats() {
// Process signatset. Use a loop, as dtypesym adds // Process signatset. Use a loop, as dtypesym adds
// entries to signatset while it is being processed. // entries to signatset while it is being processed.
signats := make([]typeAndStr, len(signatslice)) signats := make([]typeAndStr, len(signatslice))
@ -1380,15 +1333,15 @@ func dumpsignats() {
sort.Sort(typesByString(signats)) sort.Sort(typesByString(signats))
for _, ts := range signats { for _, ts := range signats {
t := ts.t t := ts.t
dtypesym(t) WriteType(t)
if t.Sym() != nil { if t.Sym() != nil {
dtypesym(types.NewPtr(t)) WriteType(types.NewPtr(t))
} }
} }
} }
} }
func dumptabs() { func WriteTabs() {
// process itabs // process itabs
for _, i := range itabs { for _, i := range itabs {
// dump empty itab symbol into i.sym // dump empty itab symbol into i.sym
@ -1399,8 +1352,8 @@ func dumptabs() {
// _ [4]byte // _ [4]byte
// fun [1]uintptr // variable sized // fun [1]uintptr // variable sized
// } // }
o := objw.SymPtr(i.lsym, 0, dtypesym(i.itype), 0) o := objw.SymPtr(i.lsym, 0, WriteType(i.itype), 0)
o = objw.SymPtr(i.lsym, o, dtypesym(i.t), 0) o = objw.SymPtr(i.lsym, o, WriteType(i.t), 0)
o = objw.Uint32(i.lsym, o, types.TypeHash(i.t)) // copy of type hash o = objw.Uint32(i.lsym, o, types.TypeHash(i.t)) // copy of type hash
o += 4 // skip unused field o += 4 // skip unused field
for _, fn := range genfun(i.t, i.itype) { for _, fn := range genfun(i.t, i.itype) {
@ -1423,7 +1376,7 @@ func dumptabs() {
// typ typeOff // pointer to symbol // typ typeOff // pointer to symbol
// } // }
nsym := dname(p.s.Name, "", nil, true) nsym := dname(p.s.Name, "", nil, true)
tsym := dtypesym(p.t) tsym := WriteType(p.t)
ot = objw.SymPtrOff(s, ot, nsym) ot = objw.SymPtrOff(s, ot, nsym)
ot = objw.SymPtrOff(s, ot, tsym) ot = objw.SymPtrOff(s, ot, tsym)
// Plugin exports symbols as interfaces. Mark their types // Plugin exports symbols as interfaces. Mark their types
@ -1441,14 +1394,14 @@ func dumptabs() {
} }
} }
func dumpimportstrings() { func WriteImportStrings() {
// generate import strings for imported packages // generate import strings for imported packages
for _, p := range types.ImportedPkgList() { for _, p := range types.ImportedPkgList() {
dimportpath(p) dimportpath(p)
} }
} }
func dumpbasictypes() { func WriteBasicTypes() {
// do basic types if compiling package runtime. // do basic types if compiling package runtime.
// they have to be in at least one package, // they have to be in at least one package,
// and runtime is always loaded implicitly, // and runtime is always loaded implicitly,
@ -1457,16 +1410,16 @@ func dumpbasictypes() {
// but using runtime means fewer copies in object files. // but using runtime means fewer copies in object files.
if base.Ctxt.Pkgpath == "runtime" { if base.Ctxt.Pkgpath == "runtime" {
for i := types.Kind(1); i <= types.TBOOL; i++ { for i := types.Kind(1); i <= types.TBOOL; i++ {
dtypesym(types.NewPtr(types.Types[i])) WriteType(types.NewPtr(types.Types[i]))
} }
dtypesym(types.NewPtr(types.Types[types.TSTRING])) WriteType(types.NewPtr(types.Types[types.TSTRING]))
dtypesym(types.NewPtr(types.Types[types.TUNSAFEPTR])) WriteType(types.NewPtr(types.Types[types.TUNSAFEPTR]))
// emit type structs for error and func(error) string. // emit type structs for error and func(error) string.
// The latter is the type of an auto-generated wrapper. // The latter is the type of an auto-generated wrapper.
dtypesym(types.NewPtr(types.ErrorType)) WriteType(types.NewPtr(types.ErrorType))
dtypesym(typecheck.NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, types.ErrorType)}, []*ir.Field{ir.NewField(base.Pos, nil, nil, types.Types[types.TSTRING])})) WriteType(typecheck.NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, types.ErrorType)}, []*ir.Field{ir.NewField(base.Pos, nil, nil, types.Types[types.TSTRING])}))
// add paths for runtime and main, which 6l imports implicitly. // add paths for runtime and main, which 6l imports implicitly.
dimportpath(ir.Pkgs.Runtime) dimportpath(ir.Pkgs.Runtime)
@ -1611,8 +1564,8 @@ func dgcprog(t *types.Type) (*obj.LSym, int64) {
if t.Width == types.BADWIDTH { if t.Width == types.BADWIDTH {
base.Fatalf("dgcprog: %v badwidth", t) base.Fatalf("dgcprog: %v badwidth", t)
} }
lsym := typesymprefix(".gcprog", t).Linksym() lsym := TypeSymPrefix(".gcprog", t).Linksym()
var p GCProg var p gcProg
p.init(lsym) p.init(lsym)
p.emit(t, 0) p.emit(t, 0)
offset := p.w.BitIndex() * int64(types.PtrSize) offset := p.w.BitIndex() * int64(types.PtrSize)
@ -1623,13 +1576,13 @@ func dgcprog(t *types.Type) (*obj.LSym, int64) {
return lsym, offset return lsym, offset
} }
type GCProg struct { type gcProg struct {
lsym *obj.LSym lsym *obj.LSym
symoff int symoff int
w gcprog.Writer w gcprog.Writer
} }
func (p *GCProg) init(lsym *obj.LSym) { func (p *gcProg) init(lsym *obj.LSym) {
p.lsym = lsym p.lsym = lsym
p.symoff = 4 // first 4 bytes hold program length p.symoff = 4 // first 4 bytes hold program length
p.w.Init(p.writeByte) p.w.Init(p.writeByte)
@ -1639,11 +1592,11 @@ func (p *GCProg) init(lsym *obj.LSym) {
} }
} }
func (p *GCProg) writeByte(x byte) { func (p *gcProg) writeByte(x byte) {
p.symoff = objw.Uint8(p.lsym, p.symoff, x) p.symoff = objw.Uint8(p.lsym, p.symoff, x)
} }
func (p *GCProg) end() { func (p *gcProg) end() {
p.w.End() p.w.End()
objw.Uint32(p.lsym, 0, uint32(p.symoff-4)) objw.Uint32(p.lsym, 0, uint32(p.symoff-4))
objw.Global(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL) objw.Global(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL)
@ -1652,7 +1605,7 @@ func (p *GCProg) end() {
} }
} }
func (p *GCProg) emit(t *types.Type, offset int64) { func (p *gcProg) emit(t *types.Type, offset int64) {
types.CalcSize(t) types.CalcSize(t)
if !t.HasPointers() { if !t.HasPointers() {
return return
@ -1707,14 +1660,14 @@ func (p *GCProg) emit(t *types.Type, offset int64) {
} }
} }
// zeroaddr returns the address of a symbol with at least // ZeroAddr returns the address of a symbol with at least
// size bytes of zeros. // size bytes of zeros.
func zeroaddr(size int64) ir.Node { func ZeroAddr(size int64) ir.Node {
if size >= 1<<31 { if size >= 1<<31 {
base.Fatalf("map elem too big %d", size) base.Fatalf("map elem too big %d", size)
} }
if zerosize < size { if ZeroSize < size {
zerosize = size ZeroSize = size
} }
s := ir.Pkgs.Map.Lookup("zero") s := ir.Pkgs.Map.Lookup("zero")
if s.Def == nil { if s.Def == nil {
@ -1729,3 +1682,155 @@ func zeroaddr(size int64) ir.Node {
z.SetTypecheck(1) z.SetTypecheck(1)
return z return z
} }
func CollectPTabs() {
if !base.Ctxt.Flag_dynlink || types.LocalPkg.Name != "main" {
return
}
for _, exportn := range typecheck.Target.Exports {
s := exportn.Sym()
nn := ir.AsNode(s.Def)
if nn == nil {
continue
}
if nn.Op() != ir.ONAME {
continue
}
n := nn.(*ir.Name)
if !types.IsExported(s.Name) {
continue
}
if s.Pkg.Name != "main" {
continue
}
if n.Type().Kind() == types.TFUNC && n.Class_ == ir.PFUNC {
// function
ptabs = append(ptabs, ptabEntry{s: s, t: s.Def.Type()})
} else {
// variable
ptabs = append(ptabs, ptabEntry{s: s, t: types.NewPtr(s.Def.Type())})
}
}
}
// Generate a wrapper function to convert from
// a receiver of type T to a receiver of type U.
// That is,
//
// func (t T) M() {
// ...
// }
//
// already exists; this function generates
//
// func (u U) M() {
// u.M()
// }
//
// where the types T and U are such that u.M() is valid
// and calls the T.M method.
// The resulting function is for use in method tables.
//
// rcvr - U
// method - M func (t T)(), a TFIELD type struct
// newnam - the eventual mangled name of this function
func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) {
if false && base.Flag.LowerR != 0 {
fmt.Printf("genwrapper rcvrtype=%v method=%v newnam=%v\n", rcvr, method, newnam)
}
// Only generate (*T).M wrappers for T.M in T's own package.
if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type &&
rcvr.Elem().Sym() != nil && rcvr.Elem().Sym().Pkg != types.LocalPkg {
return
}
// Only generate I.M wrappers for I in I's own package
// but keep doing it for error.Error (was issue #29304).
if rcvr.IsInterface() && rcvr.Sym() != nil && rcvr.Sym().Pkg != types.LocalPkg && rcvr != types.ErrorType {
return
}
base.Pos = base.AutogeneratedPos
typecheck.DeclContext = ir.PEXTERN
tfn := ir.NewFuncType(base.Pos,
ir.NewField(base.Pos, typecheck.Lookup(".this"), nil, rcvr),
typecheck.NewFuncParams(method.Type.Params(), true),
typecheck.NewFuncParams(method.Type.Results(), false))
fn := typecheck.DeclFunc(newnam, tfn)
fn.SetDupok(true)
nthis := ir.AsNode(tfn.Type().Recv().Nname)
methodrcvr := method.Type.Recv().Type
// generate nil pointer check for better error
if rcvr.IsPtr() && rcvr.Elem() == methodrcvr {
// generating wrapper from *T to T.
n := ir.NewIfStmt(base.Pos, nil, nil, nil)
n.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, nthis, typecheck.NodNil())
call := ir.NewCallExpr(base.Pos, ir.OCALL, typecheck.LookupRuntime("panicwrap"), nil)
n.Body = []ir.Node{call}
fn.Body.Append(n)
}
dot := typecheck.AddImplicitDots(ir.NewSelectorExpr(base.Pos, ir.OXDOT, nthis, method.Sym))
// generate call
// It's not possible to use a tail call when dynamic linking on ppc64le. The
// bad scenario is when a local call is made to the wrapper: the wrapper will
// call the implementation, which might be in a different module and so set
// the TOC to the appropriate value for that module. But if it returns
// directly to the wrapper's caller, nothing will reset it to the correct
// value for that function.
if !base.Flag.Cfg.Instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !types.IsInterfaceMethod(method.Type) && !(base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) {
// generate tail call: adjust pointer receiver and jump to embedded method.
left := dot.X // skip final .M
if !left.Type().IsPtr() {
left = typecheck.NodAddr(left)
}
as := ir.NewAssignStmt(base.Pos, nthis, typecheck.ConvNop(left, rcvr))
fn.Body.Append(as)
fn.Body.Append(ir.NewBranchStmt(base.Pos, ir.ORETJMP, ir.MethodSym(methodrcvr, method.Sym)))
} else {
fn.SetWrapper(true) // ignore frame for panic+recover matching
call := ir.NewCallExpr(base.Pos, ir.OCALL, dot, nil)
call.Args.Set(ir.ParamNames(tfn.Type()))
call.IsDDD = tfn.Type().IsVariadic()
if method.Type.NumResults() > 0 {
ret := ir.NewReturnStmt(base.Pos, nil)
ret.Results = []ir.Node{call}
fn.Body.Append(ret)
} else {
fn.Body.Append(call)
}
}
if false && base.Flag.LowerR != 0 {
ir.DumpList("genwrapper body", fn.Body)
}
typecheck.FinishFuncBody()
if base.Debug.DclStack != 0 {
types.CheckDclstack()
}
typecheck.Func(fn)
ir.CurFunc = fn
typecheck.Stmts(fn.Body)
// Inline calls within (*T).M wrappers. This is safe because we only
// generate those wrappers within the same compilation unit as (T).M.
// TODO(mdempsky): Investigate why we can't enable this more generally.
if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym() != nil {
inline.InlineCalls(fn)
}
escape.Batch([]*ir.Func{fn}, false)
ir.CurFunc = nil
typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
}
var ZeroSize int64