mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
To break up package gc, we need to put these calculations somewhere
lower in the import graph, either an existing or new package. Package types
already needs this code and is using hacks to get it without an import cycle.
We can remove the hacks and set up for the new package gc by moving the
code into package types itself.
[git-generate]
cd src/cmd/compile/internal/gc
rf '
# Remove old import cycle hacks in gc.
rm TypecheckInit:/types.Widthptr =/-0,/types.Dowidth =/+0 \
../ssa/export_test.go:/types.Dowidth =/-+
ex {
import "cmd/compile/internal/types"
types.Widthptr -> Widthptr
types.Dowidth -> dowidth
}
# Disable CalcSize in tests instead of base.Fatalf
sub dowidth:/base.Fatalf\("dowidth without betypeinit"\)/ \
// Assume this is a test. \
return
# Move size calculation into cmd/compile/internal/types
mv Widthptr PtrSize
mv Widthreg RegSize
mv slicePtrOffset SlicePtrOffset
mv sliceLenOffset SliceLenOffset
mv sliceCapOffset SliceCapOffset
mv sizeofSlice SliceSize
mv sizeofString StringSize
mv skipDowidthForTracing SkipSizeForTracing
mv dowidth CalcSize
mv checkwidth CheckSize
mv widstruct calcStructOffset
mv sizeCalculationDisabled CalcSizeDisabled
mv defercheckwidth DeferCheckSize
mv resumecheckwidth ResumeCheckSize
mv typeptrdata PtrDataSize
mv \
PtrSize RegSize SlicePtrOffset SkipSizeForTracing typePos align.go PtrDataSize \
size.go
mv size.go cmd/compile/internal/types
'
: # Remove old import cycle hacks in types.
cd ../types
rf '
ex {
Widthptr -> PtrSize
Dowidth -> CalcSize
}
rm Widthptr Dowidth
'
Change-Id: Ib96cdc6bda2617235480c29392ea5cfb20f60cd8
Reviewed-on: https://go-review.googlesource.com/c/go/+/279234
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
1405 lines
36 KiB
Go
1405 lines
36 KiB
Go
// Copyright 2009 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
package gc
|
|
|
|
import (
|
|
"cmd/compile/internal/base"
|
|
"cmd/compile/internal/ir"
|
|
"cmd/compile/internal/types"
|
|
"cmd/internal/src"
|
|
"fmt"
|
|
"sort"
|
|
"strconv"
|
|
"strings"
|
|
"sync"
|
|
"unicode"
|
|
"unicode/utf8"
|
|
)
|
|
|
|
// largeStack is info about a function whose stack frame is too large (rare).
|
|
type largeStack struct {
|
|
locals int64
|
|
args int64
|
|
callee int64
|
|
pos src.XPos
|
|
}
|
|
|
|
var (
|
|
largeStackFramesMu sync.Mutex // protects largeStackFrames
|
|
largeStackFrames []largeStack
|
|
)
|
|
|
|
func lookup(name string) *types.Sym {
|
|
return types.LocalPkg.Lookup(name)
|
|
}
|
|
|
|
// lookupN looks up the symbol starting with prefix and ending with
|
|
// the decimal n. If prefix is too long, lookupN panics.
|
|
func lookupN(prefix string, n int) *types.Sym {
|
|
var buf [20]byte // plenty long enough for all current users
|
|
copy(buf[:], prefix)
|
|
b := strconv.AppendInt(buf[:len(prefix)], int64(n), 10)
|
|
return types.LocalPkg.LookupBytes(b)
|
|
}
|
|
|
|
// autolabel generates a new Name node for use with
|
|
// an automatically generated label.
|
|
// prefix is a short mnemonic (e.g. ".s" for switch)
|
|
// to help with debugging.
|
|
// It should begin with "." to avoid conflicts with
|
|
// user labels.
|
|
func autolabel(prefix string) *types.Sym {
|
|
if prefix[0] != '.' {
|
|
base.Fatalf("autolabel prefix must start with '.', have %q", prefix)
|
|
}
|
|
fn := ir.CurFunc
|
|
if ir.CurFunc == nil {
|
|
base.Fatalf("autolabel outside function")
|
|
}
|
|
n := fn.Label
|
|
fn.Label++
|
|
return lookupN(prefix, int(n))
|
|
}
|
|
|
|
// dotImports tracks all PkgNames that have been dot-imported.
|
|
var dotImports []*ir.PkgName
|
|
|
|
// dotImportRefs maps idents introduced by importDot back to the
|
|
// ir.PkgName they were dot-imported through.
|
|
var dotImportRefs map[*ir.Ident]*ir.PkgName
|
|
|
|
// find all the exported symbols in package referenced by PkgName,
|
|
// and make them available in the current package
|
|
func importDot(pack *ir.PkgName) {
|
|
if dotImportRefs == nil {
|
|
dotImportRefs = make(map[*ir.Ident]*ir.PkgName)
|
|
}
|
|
|
|
opkg := pack.Pkg
|
|
for _, s := range opkg.Syms {
|
|
if s.Def == nil {
|
|
if _, ok := declImporter[s]; !ok {
|
|
continue
|
|
}
|
|
}
|
|
if !types.IsExported(s.Name) || strings.ContainsRune(s.Name, 0xb7) { // 0xb7 = center dot
|
|
continue
|
|
}
|
|
s1 := lookup(s.Name)
|
|
if s1.Def != nil {
|
|
pkgerror := fmt.Sprintf("during import %q", opkg.Path)
|
|
redeclare(base.Pos, s1, pkgerror)
|
|
continue
|
|
}
|
|
|
|
id := ir.NewIdent(src.NoXPos, s)
|
|
dotImportRefs[id] = pack
|
|
s1.Def = id
|
|
s1.Block = 1
|
|
}
|
|
|
|
dotImports = append(dotImports, pack)
|
|
}
|
|
|
|
// checkDotImports reports errors for any unused dot imports.
|
|
func checkDotImports() {
|
|
for _, pack := range dotImports {
|
|
if !pack.Used {
|
|
base.ErrorfAt(pack.Pos(), "imported and not used: %q", pack.Pkg.Path)
|
|
}
|
|
}
|
|
|
|
// No longer needed; release memory.
|
|
dotImports = nil
|
|
dotImportRefs = nil
|
|
}
|
|
|
|
// nodAddr returns a node representing &n at base.Pos.
|
|
func nodAddr(n ir.Node) *ir.AddrExpr {
|
|
return nodAddrAt(base.Pos, n)
|
|
}
|
|
|
|
// nodAddrPos returns a node representing &n at position pos.
|
|
func nodAddrAt(pos src.XPos, n ir.Node) *ir.AddrExpr {
|
|
return ir.NewAddrExpr(pos, n)
|
|
}
|
|
|
|
// newname returns a new ONAME Node associated with symbol s.
|
|
func NewName(s *types.Sym) *ir.Name {
|
|
n := ir.NewNameAt(base.Pos, s)
|
|
n.Curfn = ir.CurFunc
|
|
return n
|
|
}
|
|
|
|
func nodnil() ir.Node {
|
|
n := ir.NewNilExpr(base.Pos)
|
|
n.SetType(types.Types[types.TNIL])
|
|
return n
|
|
}
|
|
|
|
func isptrto(t *types.Type, et types.Kind) bool {
|
|
if t == nil {
|
|
return false
|
|
}
|
|
if !t.IsPtr() {
|
|
return false
|
|
}
|
|
t = t.Elem()
|
|
if t == nil {
|
|
return false
|
|
}
|
|
if t.Kind() != et {
|
|
return false
|
|
}
|
|
return true
|
|
}
|
|
|
|
// Is type src assignment compatible to type dst?
|
|
// If so, return op code to use in conversion.
|
|
// If not, return OXXX. In this case, the string return parameter may
|
|
// hold a reason why. In all other cases, it'll be the empty string.
|
|
func assignop(src, dst *types.Type) (ir.Op, string) {
|
|
if src == dst {
|
|
return ir.OCONVNOP, ""
|
|
}
|
|
if src == nil || dst == nil || src.Kind() == types.TFORW || dst.Kind() == types.TFORW || src.Underlying() == nil || dst.Underlying() == nil {
|
|
return ir.OXXX, ""
|
|
}
|
|
|
|
// 1. src type is identical to dst.
|
|
if types.Identical(src, dst) {
|
|
return ir.OCONVNOP, ""
|
|
}
|
|
|
|
// 2. src and dst have identical underlying types
|
|
// and either src or dst is not a named type or
|
|
// both are empty interface types.
|
|
// For assignable but different non-empty interface types,
|
|
// we want to recompute the itab. Recomputing the itab ensures
|
|
// that itabs are unique (thus an interface with a compile-time
|
|
// type I has an itab with interface type I).
|
|
if types.Identical(src.Underlying(), dst.Underlying()) {
|
|
if src.IsEmptyInterface() {
|
|
// Conversion between two empty interfaces
|
|
// requires no code.
|
|
return ir.OCONVNOP, ""
|
|
}
|
|
if (src.Sym() == nil || dst.Sym() == nil) && !src.IsInterface() {
|
|
// Conversion between two types, at least one unnamed,
|
|
// needs no conversion. The exception is nonempty interfaces
|
|
// which need to have their itab updated.
|
|
return ir.OCONVNOP, ""
|
|
}
|
|
}
|
|
|
|
// 3. dst is an interface type and src implements dst.
|
|
if dst.IsInterface() && src.Kind() != types.TNIL {
|
|
var missing, have *types.Field
|
|
var ptr int
|
|
if implements(src, dst, &missing, &have, &ptr) {
|
|
// Call itabname so that (src, dst)
|
|
// gets added to itabs early, which allows
|
|
// us to de-virtualize calls through this
|
|
// type/interface pair later. See peekitabs in reflect.go
|
|
if types.IsDirectIface(src) && !dst.IsEmptyInterface() {
|
|
NeedITab(src, dst)
|
|
}
|
|
|
|
return ir.OCONVIFACE, ""
|
|
}
|
|
|
|
// we'll have complained about this method anyway, suppress spurious messages.
|
|
if have != nil && have.Sym == missing.Sym && (have.Type.Broke() || missing.Type.Broke()) {
|
|
return ir.OCONVIFACE, ""
|
|
}
|
|
|
|
var why string
|
|
if isptrto(src, types.TINTER) {
|
|
why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", src)
|
|
} else if have != nil && have.Sym == missing.Sym && have.Nointerface() {
|
|
why = fmt.Sprintf(":\n\t%v does not implement %v (%v method is marked 'nointerface')", src, dst, missing.Sym)
|
|
} else if have != nil && have.Sym == missing.Sym {
|
|
why = fmt.Sprintf(":\n\t%v does not implement %v (wrong type for %v method)\n"+
|
|
"\t\thave %v%S\n\t\twant %v%S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
|
|
} else if ptr != 0 {
|
|
why = fmt.Sprintf(":\n\t%v does not implement %v (%v method has pointer receiver)", src, dst, missing.Sym)
|
|
} else if have != nil {
|
|
why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)\n"+
|
|
"\t\thave %v%S\n\t\twant %v%S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
|
|
} else {
|
|
why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)", src, dst, missing.Sym)
|
|
}
|
|
|
|
return ir.OXXX, why
|
|
}
|
|
|
|
if isptrto(dst, types.TINTER) {
|
|
why := fmt.Sprintf(":\n\t%v is pointer to interface, not interface", dst)
|
|
return ir.OXXX, why
|
|
}
|
|
|
|
if src.IsInterface() && dst.Kind() != types.TBLANK {
|
|
var missing, have *types.Field
|
|
var ptr int
|
|
var why string
|
|
if implements(dst, src, &missing, &have, &ptr) {
|
|
why = ": need type assertion"
|
|
}
|
|
return ir.OXXX, why
|
|
}
|
|
|
|
// 4. src is a bidirectional channel value, dst is a channel type,
|
|
// src and dst have identical element types, and
|
|
// either src or dst is not a named type.
|
|
if src.IsChan() && src.ChanDir() == types.Cboth && dst.IsChan() {
|
|
if types.Identical(src.Elem(), dst.Elem()) && (src.Sym() == nil || dst.Sym() == nil) {
|
|
return ir.OCONVNOP, ""
|
|
}
|
|
}
|
|
|
|
// 5. src is the predeclared identifier nil and dst is a nillable type.
|
|
if src.Kind() == types.TNIL {
|
|
switch dst.Kind() {
|
|
case types.TPTR,
|
|
types.TFUNC,
|
|
types.TMAP,
|
|
types.TCHAN,
|
|
types.TINTER,
|
|
types.TSLICE:
|
|
return ir.OCONVNOP, ""
|
|
}
|
|
}
|
|
|
|
// 6. rule about untyped constants - already converted by defaultlit.
|
|
|
|
// 7. Any typed value can be assigned to the blank identifier.
|
|
if dst.Kind() == types.TBLANK {
|
|
return ir.OCONVNOP, ""
|
|
}
|
|
|
|
return ir.OXXX, ""
|
|
}
|
|
|
|
// Can we convert a value of type src to a value of type dst?
|
|
// If so, return op code to use in conversion (maybe OCONVNOP).
|
|
// If not, return OXXX. In this case, the string return parameter may
|
|
// hold a reason why. In all other cases, it'll be the empty string.
|
|
// srcConstant indicates whether the value of type src is a constant.
|
|
func convertop(srcConstant bool, src, dst *types.Type) (ir.Op, string) {
|
|
if src == dst {
|
|
return ir.OCONVNOP, ""
|
|
}
|
|
if src == nil || dst == nil {
|
|
return ir.OXXX, ""
|
|
}
|
|
|
|
// Conversions from regular to go:notinheap are not allowed
|
|
// (unless it's unsafe.Pointer). These are runtime-specific
|
|
// rules.
|
|
// (a) Disallow (*T) to (*U) where T is go:notinheap but U isn't.
|
|
if src.IsPtr() && dst.IsPtr() && dst.Elem().NotInHeap() && !src.Elem().NotInHeap() {
|
|
why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable), but %v is not", dst.Elem(), src.Elem())
|
|
return ir.OXXX, why
|
|
}
|
|
// (b) Disallow string to []T where T is go:notinheap.
|
|
if src.IsString() && dst.IsSlice() && dst.Elem().NotInHeap() && (dst.Elem().Kind() == types.ByteType.Kind() || dst.Elem().Kind() == types.RuneType.Kind()) {
|
|
why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable)", dst.Elem())
|
|
return ir.OXXX, why
|
|
}
|
|
|
|
// 1. src can be assigned to dst.
|
|
op, why := assignop(src, dst)
|
|
if op != ir.OXXX {
|
|
return op, why
|
|
}
|
|
|
|
// The rules for interfaces are no different in conversions
|
|
// than assignments. If interfaces are involved, stop now
|
|
// with the good message from assignop.
|
|
// Otherwise clear the error.
|
|
if src.IsInterface() || dst.IsInterface() {
|
|
return ir.OXXX, why
|
|
}
|
|
|
|
// 2. Ignoring struct tags, src and dst have identical underlying types.
|
|
if types.IdenticalIgnoreTags(src.Underlying(), dst.Underlying()) {
|
|
return ir.OCONVNOP, ""
|
|
}
|
|
|
|
// 3. src and dst are unnamed pointer types and, ignoring struct tags,
|
|
// their base types have identical underlying types.
|
|
if src.IsPtr() && dst.IsPtr() && src.Sym() == nil && dst.Sym() == nil {
|
|
if types.IdenticalIgnoreTags(src.Elem().Underlying(), dst.Elem().Underlying()) {
|
|
return ir.OCONVNOP, ""
|
|
}
|
|
}
|
|
|
|
// 4. src and dst are both integer or floating point types.
|
|
if (src.IsInteger() || src.IsFloat()) && (dst.IsInteger() || dst.IsFloat()) {
|
|
if types.SimType[src.Kind()] == types.SimType[dst.Kind()] {
|
|
return ir.OCONVNOP, ""
|
|
}
|
|
return ir.OCONV, ""
|
|
}
|
|
|
|
// 5. src and dst are both complex types.
|
|
if src.IsComplex() && dst.IsComplex() {
|
|
if types.SimType[src.Kind()] == types.SimType[dst.Kind()] {
|
|
return ir.OCONVNOP, ""
|
|
}
|
|
return ir.OCONV, ""
|
|
}
|
|
|
|
// Special case for constant conversions: any numeric
|
|
// conversion is potentially okay. We'll validate further
|
|
// within evconst. See #38117.
|
|
if srcConstant && (src.IsInteger() || src.IsFloat() || src.IsComplex()) && (dst.IsInteger() || dst.IsFloat() || dst.IsComplex()) {
|
|
return ir.OCONV, ""
|
|
}
|
|
|
|
// 6. src is an integer or has type []byte or []rune
|
|
// and dst is a string type.
|
|
if src.IsInteger() && dst.IsString() {
|
|
return ir.ORUNESTR, ""
|
|
}
|
|
|
|
if src.IsSlice() && dst.IsString() {
|
|
if src.Elem().Kind() == types.ByteType.Kind() {
|
|
return ir.OBYTES2STR, ""
|
|
}
|
|
if src.Elem().Kind() == types.RuneType.Kind() {
|
|
return ir.ORUNES2STR, ""
|
|
}
|
|
}
|
|
|
|
// 7. src is a string and dst is []byte or []rune.
|
|
// String to slice.
|
|
if src.IsString() && dst.IsSlice() {
|
|
if dst.Elem().Kind() == types.ByteType.Kind() {
|
|
return ir.OSTR2BYTES, ""
|
|
}
|
|
if dst.Elem().Kind() == types.RuneType.Kind() {
|
|
return ir.OSTR2RUNES, ""
|
|
}
|
|
}
|
|
|
|
// 8. src is a pointer or uintptr and dst is unsafe.Pointer.
|
|
if (src.IsPtr() || src.IsUintptr()) && dst.IsUnsafePtr() {
|
|
return ir.OCONVNOP, ""
|
|
}
|
|
|
|
// 9. src is unsafe.Pointer and dst is a pointer or uintptr.
|
|
if src.IsUnsafePtr() && (dst.IsPtr() || dst.IsUintptr()) {
|
|
return ir.OCONVNOP, ""
|
|
}
|
|
|
|
// src is map and dst is a pointer to corresponding hmap.
|
|
// This rule is needed for the implementation detail that
|
|
// go gc maps are implemented as a pointer to a hmap struct.
|
|
if src.Kind() == types.TMAP && dst.IsPtr() &&
|
|
src.MapType().Hmap == dst.Elem() {
|
|
return ir.OCONVNOP, ""
|
|
}
|
|
|
|
return ir.OXXX, ""
|
|
}
|
|
|
|
func assignconv(n ir.Node, t *types.Type, context string) ir.Node {
|
|
return assignconvfn(n, t, func() string { return context })
|
|
}
|
|
|
|
// Convert node n for assignment to type t.
|
|
func assignconvfn(n ir.Node, t *types.Type, context func() string) ir.Node {
|
|
if n == nil || n.Type() == nil || n.Type().Broke() {
|
|
return n
|
|
}
|
|
|
|
if t.Kind() == types.TBLANK && n.Type().Kind() == types.TNIL {
|
|
base.Errorf("use of untyped nil")
|
|
}
|
|
|
|
n = convlit1(n, t, false, context)
|
|
if n.Type() == nil {
|
|
return n
|
|
}
|
|
if t.Kind() == types.TBLANK {
|
|
return n
|
|
}
|
|
|
|
// Convert ideal bool from comparison to plain bool
|
|
// if the next step is non-bool (like interface{}).
|
|
if n.Type() == types.UntypedBool && !t.IsBoolean() {
|
|
if n.Op() == ir.ONAME || n.Op() == ir.OLITERAL {
|
|
r := ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, n)
|
|
r.SetType(types.Types[types.TBOOL])
|
|
r.SetTypecheck(1)
|
|
r.SetImplicit(true)
|
|
n = r
|
|
}
|
|
}
|
|
|
|
if types.Identical(n.Type(), t) {
|
|
return n
|
|
}
|
|
|
|
op, why := assignop(n.Type(), t)
|
|
if op == ir.OXXX {
|
|
base.Errorf("cannot use %L as type %v in %s%s", n, t, context(), why)
|
|
op = ir.OCONV
|
|
}
|
|
|
|
r := ir.NewConvExpr(base.Pos, op, t, n)
|
|
r.SetTypecheck(1)
|
|
r.SetImplicit(true)
|
|
return r
|
|
}
|
|
|
|
// backingArrayPtrLen extracts the pointer and length from a slice or string.
|
|
// This constructs two nodes referring to n, so n must be a cheapexpr.
|
|
func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) {
|
|
var init ir.Nodes
|
|
c := cheapexpr(n, &init)
|
|
if c != n || len(init) != 0 {
|
|
base.Fatalf("backingArrayPtrLen not cheap: %v", n)
|
|
}
|
|
ptr = ir.NewUnaryExpr(base.Pos, ir.OSPTR, n)
|
|
if n.Type().IsString() {
|
|
ptr.SetType(types.Types[types.TUINT8].PtrTo())
|
|
} else {
|
|
ptr.SetType(n.Type().Elem().PtrTo())
|
|
}
|
|
length = ir.NewUnaryExpr(base.Pos, ir.OLEN, n)
|
|
length.SetType(types.Types[types.TINT])
|
|
return ptr, length
|
|
}
|
|
|
|
func syslook(name string) *ir.Name {
|
|
s := ir.Pkgs.Runtime.Lookup(name)
|
|
if s == nil || s.Def == nil {
|
|
base.Fatalf("syslook: can't find runtime.%s", name)
|
|
}
|
|
return ir.AsNode(s.Def).(*ir.Name)
|
|
}
|
|
|
|
// updateHasCall checks whether expression n contains any function
|
|
// calls and sets the n.HasCall flag if so.
|
|
func updateHasCall(n ir.Node) {
|
|
if n == nil {
|
|
return
|
|
}
|
|
n.SetHasCall(calcHasCall(n))
|
|
}
|
|
|
|
func calcHasCall(n ir.Node) bool {
|
|
if len(n.Init()) != 0 {
|
|
// TODO(mdempsky): This seems overly conservative.
|
|
return true
|
|
}
|
|
|
|
switch n.Op() {
|
|
default:
|
|
base.Fatalf("calcHasCall %+v", n)
|
|
panic("unreachable")
|
|
|
|
case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.OTYPE, ir.ONAMEOFFSET:
|
|
if n.HasCall() {
|
|
base.Fatalf("OLITERAL/ONAME/OTYPE should never have calls: %+v", n)
|
|
}
|
|
return false
|
|
case ir.OCALL, ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
|
|
return true
|
|
case ir.OANDAND, ir.OOROR:
|
|
// hard with instrumented code
|
|
n := n.(*ir.LogicalExpr)
|
|
if base.Flag.Cfg.Instrumenting {
|
|
return true
|
|
}
|
|
return n.X.HasCall() || n.Y.HasCall()
|
|
case ir.OINDEX, ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR,
|
|
ir.ODEREF, ir.ODOTPTR, ir.ODOTTYPE, ir.ODIV, ir.OMOD:
|
|
// These ops might panic, make sure they are done
|
|
// before we start marshaling args for a call. See issue 16760.
|
|
return true
|
|
|
|
// When using soft-float, these ops might be rewritten to function calls
|
|
// so we ensure they are evaluated first.
|
|
case ir.OADD, ir.OSUB, ir.OMUL:
|
|
n := n.(*ir.BinaryExpr)
|
|
if thearch.SoftFloat && (types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) {
|
|
return true
|
|
}
|
|
return n.X.HasCall() || n.Y.HasCall()
|
|
case ir.ONEG:
|
|
n := n.(*ir.UnaryExpr)
|
|
if thearch.SoftFloat && (types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) {
|
|
return true
|
|
}
|
|
return n.X.HasCall()
|
|
case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
|
|
n := n.(*ir.BinaryExpr)
|
|
if thearch.SoftFloat && (types.IsFloat[n.X.Type().Kind()] || types.IsComplex[n.X.Type().Kind()]) {
|
|
return true
|
|
}
|
|
return n.X.HasCall() || n.Y.HasCall()
|
|
case ir.OCONV:
|
|
n := n.(*ir.ConvExpr)
|
|
if thearch.SoftFloat && ((types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) || (types.IsFloat[n.X.Type().Kind()] || types.IsComplex[n.X.Type().Kind()])) {
|
|
return true
|
|
}
|
|
return n.X.HasCall()
|
|
|
|
case ir.OAND, ir.OANDNOT, ir.OLSH, ir.OOR, ir.ORSH, ir.OXOR, ir.OCOPY, ir.OCOMPLEX, ir.OEFACE:
|
|
n := n.(*ir.BinaryExpr)
|
|
return n.X.HasCall() || n.Y.HasCall()
|
|
|
|
case ir.OAS:
|
|
n := n.(*ir.AssignStmt)
|
|
return n.X.HasCall() || n.Y != nil && n.Y.HasCall()
|
|
|
|
case ir.OADDR:
|
|
n := n.(*ir.AddrExpr)
|
|
return n.X.HasCall()
|
|
case ir.OPAREN:
|
|
n := n.(*ir.ParenExpr)
|
|
return n.X.HasCall()
|
|
case ir.OBITNOT, ir.ONOT, ir.OPLUS, ir.ORECV,
|
|
ir.OALIGNOF, ir.OCAP, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.ONEW,
|
|
ir.OOFFSETOF, ir.OPANIC, ir.OREAL, ir.OSIZEOF,
|
|
ir.OCHECKNIL, ir.OCFUNC, ir.OIDATA, ir.OITAB, ir.ONEWOBJ, ir.OSPTR, ir.OVARDEF, ir.OVARKILL, ir.OVARLIVE:
|
|
n := n.(*ir.UnaryExpr)
|
|
return n.X.HasCall()
|
|
case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER:
|
|
n := n.(*ir.SelectorExpr)
|
|
return n.X.HasCall()
|
|
|
|
case ir.OGETG, ir.OCLOSUREREAD, ir.OMETHEXPR:
|
|
return false
|
|
|
|
// TODO(rsc): These look wrong in various ways but are what calcHasCall has always done.
|
|
case ir.OADDSTR:
|
|
// TODO(rsc): This used to check left and right, which are not part of OADDSTR.
|
|
return false
|
|
case ir.OBLOCK:
|
|
// TODO(rsc): Surely the block's statements matter.
|
|
return false
|
|
case ir.OCONVIFACE, ir.OCONVNOP, ir.OBYTES2STR, ir.OBYTES2STRTMP, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2BYTESTMP, ir.OSTR2RUNES, ir.ORUNESTR:
|
|
// TODO(rsc): Some conversions are themselves calls, no?
|
|
n := n.(*ir.ConvExpr)
|
|
return n.X.HasCall()
|
|
case ir.ODOTTYPE2:
|
|
// TODO(rsc): Shouldn't this be up with ODOTTYPE above?
|
|
n := n.(*ir.TypeAssertExpr)
|
|
return n.X.HasCall()
|
|
case ir.OSLICEHEADER:
|
|
// TODO(rsc): What about len and cap?
|
|
n := n.(*ir.SliceHeaderExpr)
|
|
return n.Ptr.HasCall()
|
|
case ir.OAS2DOTTYPE, ir.OAS2FUNC:
|
|
// TODO(rsc): Surely we need to check List and Rlist.
|
|
return false
|
|
}
|
|
}
|
|
|
|
func badtype(op ir.Op, tl, tr *types.Type) {
|
|
var s string
|
|
if tl != nil {
|
|
s += fmt.Sprintf("\n\t%v", tl)
|
|
}
|
|
if tr != nil {
|
|
s += fmt.Sprintf("\n\t%v", tr)
|
|
}
|
|
|
|
// common mistake: *struct and *interface.
|
|
if tl != nil && tr != nil && tl.IsPtr() && tr.IsPtr() {
|
|
if tl.Elem().IsStruct() && tr.Elem().IsInterface() {
|
|
s += "\n\t(*struct vs *interface)"
|
|
} else if tl.Elem().IsInterface() && tr.Elem().IsStruct() {
|
|
s += "\n\t(*interface vs *struct)"
|
|
}
|
|
}
|
|
|
|
base.Errorf("illegal types for operand: %v%s", op, s)
|
|
}
|
|
|
|
// brcom returns !(op).
|
|
// For example, brcom(==) is !=.
|
|
func brcom(op ir.Op) ir.Op {
|
|
switch op {
|
|
case ir.OEQ:
|
|
return ir.ONE
|
|
case ir.ONE:
|
|
return ir.OEQ
|
|
case ir.OLT:
|
|
return ir.OGE
|
|
case ir.OGT:
|
|
return ir.OLE
|
|
case ir.OLE:
|
|
return ir.OGT
|
|
case ir.OGE:
|
|
return ir.OLT
|
|
}
|
|
base.Fatalf("brcom: no com for %v\n", op)
|
|
return op
|
|
}
|
|
|
|
// brrev returns reverse(op).
|
|
// For example, Brrev(<) is >.
|
|
func brrev(op ir.Op) ir.Op {
|
|
switch op {
|
|
case ir.OEQ:
|
|
return ir.OEQ
|
|
case ir.ONE:
|
|
return ir.ONE
|
|
case ir.OLT:
|
|
return ir.OGT
|
|
case ir.OGT:
|
|
return ir.OLT
|
|
case ir.OLE:
|
|
return ir.OGE
|
|
case ir.OGE:
|
|
return ir.OLE
|
|
}
|
|
base.Fatalf("brrev: no rev for %v\n", op)
|
|
return op
|
|
}
|
|
|
|
// return side effect-free n, appending side effects to init.
|
|
// result is assignable if n is.
|
|
func safeexpr(n ir.Node, init *ir.Nodes) ir.Node {
|
|
if n == nil {
|
|
return nil
|
|
}
|
|
|
|
if len(n.Init()) != 0 {
|
|
walkstmtlist(n.Init())
|
|
init.Append(n.PtrInit().Take()...)
|
|
}
|
|
|
|
switch n.Op() {
|
|
case ir.ONAME, ir.OLITERAL, ir.ONIL, ir.ONAMEOFFSET:
|
|
return n
|
|
|
|
case ir.OLEN, ir.OCAP:
|
|
n := n.(*ir.UnaryExpr)
|
|
l := safeexpr(n.X, init)
|
|
if l == n.X {
|
|
return n
|
|
}
|
|
a := ir.Copy(n).(*ir.UnaryExpr)
|
|
a.X = l
|
|
return walkexpr(typecheck(a, ctxExpr), init)
|
|
|
|
case ir.ODOT, ir.ODOTPTR:
|
|
n := n.(*ir.SelectorExpr)
|
|
l := safeexpr(n.X, init)
|
|
if l == n.X {
|
|
return n
|
|
}
|
|
a := ir.Copy(n).(*ir.SelectorExpr)
|
|
a.X = l
|
|
return walkexpr(typecheck(a, ctxExpr), init)
|
|
|
|
case ir.ODEREF:
|
|
n := n.(*ir.StarExpr)
|
|
l := safeexpr(n.X, init)
|
|
if l == n.X {
|
|
return n
|
|
}
|
|
a := ir.Copy(n).(*ir.StarExpr)
|
|
a.X = l
|
|
return walkexpr(typecheck(a, ctxExpr), init)
|
|
|
|
case ir.OINDEX, ir.OINDEXMAP:
|
|
n := n.(*ir.IndexExpr)
|
|
l := safeexpr(n.X, init)
|
|
r := safeexpr(n.Index, init)
|
|
if l == n.X && r == n.Index {
|
|
return n
|
|
}
|
|
a := ir.Copy(n).(*ir.IndexExpr)
|
|
a.X = l
|
|
a.Index = r
|
|
return walkexpr(typecheck(a, ctxExpr), init)
|
|
|
|
case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT:
|
|
n := n.(*ir.CompLitExpr)
|
|
if isStaticCompositeLiteral(n) {
|
|
return n
|
|
}
|
|
}
|
|
|
|
// make a copy; must not be used as an lvalue
|
|
if ir.IsAssignable(n) {
|
|
base.Fatalf("missing lvalue case in safeexpr: %v", n)
|
|
}
|
|
return cheapexpr(n, init)
|
|
}
|
|
|
|
func copyexpr(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node {
|
|
l := temp(t)
|
|
appendWalkStmt(init, ir.NewAssignStmt(base.Pos, l, n))
|
|
return l
|
|
}
|
|
|
|
// return side-effect free and cheap n, appending side effects to init.
|
|
// result may not be assignable.
|
|
func cheapexpr(n ir.Node, init *ir.Nodes) ir.Node {
|
|
switch n.Op() {
|
|
case ir.ONAME, ir.OLITERAL, ir.ONIL:
|
|
return n
|
|
}
|
|
|
|
return copyexpr(n, n.Type(), init)
|
|
}
|
|
|
|
// Code to resolve elided DOTs in embedded types.
|
|
|
|
// A Dlist stores a pointer to a TFIELD Type embedded within
|
|
// a TSTRUCT or TINTER Type.
|
|
type Dlist struct {
|
|
field *types.Field
|
|
}
|
|
|
|
// dotlist is used by adddot1 to record the path of embedded fields
|
|
// used to access a target field or method.
|
|
// Must be non-nil so that dotpath returns a non-nil slice even if d is zero.
|
|
var dotlist = make([]Dlist, 10)
|
|
|
|
// lookdot0 returns the number of fields or methods named s associated
|
|
// with Type t. If exactly one exists, it will be returned in *save
|
|
// (if save is not nil).
|
|
func lookdot0(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) int {
|
|
u := t
|
|
if u.IsPtr() {
|
|
u = u.Elem()
|
|
}
|
|
|
|
c := 0
|
|
if u.IsStruct() || u.IsInterface() {
|
|
for _, f := range u.Fields().Slice() {
|
|
if f.Sym == s || (ignorecase && f.IsMethod() && strings.EqualFold(f.Sym.Name, s.Name)) {
|
|
if save != nil {
|
|
*save = f
|
|
}
|
|
c++
|
|
}
|
|
}
|
|
}
|
|
|
|
u = t
|
|
if t.Sym() != nil && t.IsPtr() && !t.Elem().IsPtr() {
|
|
// If t is a defined pointer type, then x.m is shorthand for (*x).m.
|
|
u = t.Elem()
|
|
}
|
|
u = types.ReceiverBaseType(u)
|
|
if u != nil {
|
|
for _, f := range u.Methods().Slice() {
|
|
if f.Embedded == 0 && (f.Sym == s || (ignorecase && strings.EqualFold(f.Sym.Name, s.Name))) {
|
|
if save != nil {
|
|
*save = f
|
|
}
|
|
c++
|
|
}
|
|
}
|
|
}
|
|
|
|
return c
|
|
}
|
|
|
|
// adddot1 returns the number of fields or methods named s at depth d in Type t.
|
|
// If exactly one exists, it will be returned in *save (if save is not nil),
|
|
// and dotlist will contain the path of embedded fields traversed to find it,
|
|
// in reverse order. If none exist, more will indicate whether t contains any
|
|
// embedded fields at depth d, so callers can decide whether to retry at
|
|
// a greater depth.
|
|
func adddot1(s *types.Sym, t *types.Type, d int, save **types.Field, ignorecase bool) (c int, more bool) {
|
|
if t.Recur() {
|
|
return
|
|
}
|
|
t.SetRecur(true)
|
|
defer t.SetRecur(false)
|
|
|
|
var u *types.Type
|
|
d--
|
|
if d < 0 {
|
|
// We've reached our target depth. If t has any fields/methods
|
|
// named s, then we're done. Otherwise, we still need to check
|
|
// below for embedded fields.
|
|
c = lookdot0(s, t, save, ignorecase)
|
|
if c != 0 {
|
|
return c, false
|
|
}
|
|
}
|
|
|
|
u = t
|
|
if u.IsPtr() {
|
|
u = u.Elem()
|
|
}
|
|
if !u.IsStruct() && !u.IsInterface() {
|
|
return c, false
|
|
}
|
|
|
|
for _, f := range u.Fields().Slice() {
|
|
if f.Embedded == 0 || f.Sym == nil {
|
|
continue
|
|
}
|
|
if d < 0 {
|
|
// Found an embedded field at target depth.
|
|
return c, true
|
|
}
|
|
a, more1 := adddot1(s, f.Type, d, save, ignorecase)
|
|
if a != 0 && c == 0 {
|
|
dotlist[d].field = f
|
|
}
|
|
c += a
|
|
if more1 {
|
|
more = true
|
|
}
|
|
}
|
|
|
|
return c, more
|
|
}
|
|
|
|
// dotpath computes the unique shortest explicit selector path to fully qualify
|
|
// a selection expression x.f, where x is of type t and f is the symbol s.
|
|
// If no such path exists, dotpath returns nil.
|
|
// If there are multiple shortest paths to the same depth, ambig is true.
|
|
func dotpath(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) (path []Dlist, ambig bool) {
|
|
// The embedding of types within structs imposes a tree structure onto
|
|
// types: structs parent the types they embed, and types parent their
|
|
// fields or methods. Our goal here is to find the shortest path to
|
|
// a field or method named s in the subtree rooted at t. To accomplish
|
|
// that, we iteratively perform depth-first searches of increasing depth
|
|
// until we either find the named field/method or exhaust the tree.
|
|
for d := 0; ; d++ {
|
|
if d > len(dotlist) {
|
|
dotlist = append(dotlist, Dlist{})
|
|
}
|
|
if c, more := adddot1(s, t, d, save, ignorecase); c == 1 {
|
|
return dotlist[:d], false
|
|
} else if c > 1 {
|
|
return nil, true
|
|
} else if !more {
|
|
return nil, false
|
|
}
|
|
}
|
|
}
|
|
|
|
// in T.field
|
|
// find missing fields that
|
|
// will give shortest unique addressing.
|
|
// modify the tree with missing type names.
|
|
func adddot(n *ir.SelectorExpr) *ir.SelectorExpr {
|
|
n.X = typecheck(n.X, ctxType|ctxExpr)
|
|
if n.X.Diag() {
|
|
n.SetDiag(true)
|
|
}
|
|
t := n.X.Type()
|
|
if t == nil {
|
|
return n
|
|
}
|
|
|
|
if n.X.Op() == ir.OTYPE {
|
|
return n
|
|
}
|
|
|
|
s := n.Sel
|
|
if s == nil {
|
|
return n
|
|
}
|
|
|
|
switch path, ambig := dotpath(s, t, nil, false); {
|
|
case path != nil:
|
|
// rebuild elided dots
|
|
for c := len(path) - 1; c >= 0; c-- {
|
|
dot := ir.NewSelectorExpr(base.Pos, ir.ODOT, n.X, path[c].field.Sym)
|
|
dot.SetImplicit(true)
|
|
dot.SetType(path[c].field.Type)
|
|
n.X = dot
|
|
}
|
|
case ambig:
|
|
base.Errorf("ambiguous selector %v", n)
|
|
n.X = nil
|
|
}
|
|
|
|
return n
|
|
}
|
|
|
|
// Code to help generate trampoline functions for methods on embedded
|
|
// types. These are approx the same as the corresponding adddot
|
|
// routines except that they expect to be called with unique tasks and
|
|
// they return the actual methods.
|
|
|
|
type Symlink struct {
|
|
field *types.Field
|
|
}
|
|
|
|
var slist []Symlink
|
|
|
|
func expand0(t *types.Type) {
|
|
u := t
|
|
if u.IsPtr() {
|
|
u = u.Elem()
|
|
}
|
|
|
|
if u.IsInterface() {
|
|
for _, f := range u.Fields().Slice() {
|
|
if f.Sym.Uniq() {
|
|
continue
|
|
}
|
|
f.Sym.SetUniq(true)
|
|
slist = append(slist, Symlink{field: f})
|
|
}
|
|
|
|
return
|
|
}
|
|
|
|
u = types.ReceiverBaseType(t)
|
|
if u != nil {
|
|
for _, f := range u.Methods().Slice() {
|
|
if f.Sym.Uniq() {
|
|
continue
|
|
}
|
|
f.Sym.SetUniq(true)
|
|
slist = append(slist, Symlink{field: f})
|
|
}
|
|
}
|
|
}
|
|
|
|
func expand1(t *types.Type, top bool) {
|
|
if t.Recur() {
|
|
return
|
|
}
|
|
t.SetRecur(true)
|
|
|
|
if !top {
|
|
expand0(t)
|
|
}
|
|
|
|
u := t
|
|
if u.IsPtr() {
|
|
u = u.Elem()
|
|
}
|
|
|
|
if u.IsStruct() || u.IsInterface() {
|
|
for _, f := range u.Fields().Slice() {
|
|
if f.Embedded == 0 {
|
|
continue
|
|
}
|
|
if f.Sym == nil {
|
|
continue
|
|
}
|
|
expand1(f.Type, false)
|
|
}
|
|
}
|
|
|
|
t.SetRecur(false)
|
|
}
|
|
|
|
func expandmeth(t *types.Type) {
|
|
if t == nil || t.AllMethods().Len() != 0 {
|
|
return
|
|
}
|
|
|
|
// mark top-level method symbols
|
|
// so that expand1 doesn't consider them.
|
|
for _, f := range t.Methods().Slice() {
|
|
f.Sym.SetUniq(true)
|
|
}
|
|
|
|
// generate all reachable methods
|
|
slist = slist[:0]
|
|
expand1(t, true)
|
|
|
|
// check each method to be uniquely reachable
|
|
var ms []*types.Field
|
|
for i, sl := range slist {
|
|
slist[i].field = nil
|
|
sl.field.Sym.SetUniq(false)
|
|
|
|
var f *types.Field
|
|
path, _ := dotpath(sl.field.Sym, t, &f, false)
|
|
if path == nil {
|
|
continue
|
|
}
|
|
|
|
// dotpath may have dug out arbitrary fields, we only want methods.
|
|
if !f.IsMethod() {
|
|
continue
|
|
}
|
|
|
|
// add it to the base type method list
|
|
f = f.Copy()
|
|
f.Embedded = 1 // needs a trampoline
|
|
for _, d := range path {
|
|
if d.field.Type.IsPtr() {
|
|
f.Embedded = 2
|
|
break
|
|
}
|
|
}
|
|
ms = append(ms, f)
|
|
}
|
|
|
|
for _, f := range t.Methods().Slice() {
|
|
f.Sym.SetUniq(false)
|
|
}
|
|
|
|
ms = append(ms, t.Methods().Slice()...)
|
|
sort.Sort(types.MethodsByName(ms))
|
|
t.AllMethods().Set(ms)
|
|
}
|
|
|
|
// Given funarg struct list, return list of fn args.
|
|
func structargs(tl *types.Type, mustname bool) []*ir.Field {
|
|
var args []*ir.Field
|
|
gen := 0
|
|
for _, t := range tl.Fields().Slice() {
|
|
s := t.Sym
|
|
if mustname && (s == nil || s.Name == "_") {
|
|
// invent a name so that we can refer to it in the trampoline
|
|
s = lookupN(".anon", gen)
|
|
gen++
|
|
}
|
|
a := ir.NewField(base.Pos, s, nil, t.Type)
|
|
a.Pos = t.Pos
|
|
a.IsDDD = t.IsDDD()
|
|
args = append(args, a)
|
|
}
|
|
|
|
return args
|
|
}
|
|
|
|
// Generate a wrapper function to convert from
|
|
// a receiver of type T to a receiver of type U.
|
|
// That is,
|
|
//
|
|
// func (t T) M() {
|
|
// ...
|
|
// }
|
|
//
|
|
// already exists; this function generates
|
|
//
|
|
// func (u U) M() {
|
|
// u.M()
|
|
// }
|
|
//
|
|
// where the types T and U are such that u.M() is valid
|
|
// and calls the T.M method.
|
|
// The resulting function is for use in method tables.
|
|
//
|
|
// rcvr - U
|
|
// method - M func (t T)(), a TFIELD type struct
|
|
// newnam - the eventual mangled name of this function
|
|
func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) {
|
|
if false && base.Flag.LowerR != 0 {
|
|
fmt.Printf("genwrapper rcvrtype=%v method=%v newnam=%v\n", rcvr, method, newnam)
|
|
}
|
|
|
|
// Only generate (*T).M wrappers for T.M in T's own package.
|
|
if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type &&
|
|
rcvr.Elem().Sym() != nil && rcvr.Elem().Sym().Pkg != types.LocalPkg {
|
|
return
|
|
}
|
|
|
|
// Only generate I.M wrappers for I in I's own package
|
|
// but keep doing it for error.Error (was issue #29304).
|
|
if rcvr.IsInterface() && rcvr.Sym() != nil && rcvr.Sym().Pkg != types.LocalPkg && rcvr != types.ErrorType {
|
|
return
|
|
}
|
|
|
|
base.Pos = base.AutogeneratedPos
|
|
dclcontext = ir.PEXTERN
|
|
|
|
tfn := ir.NewFuncType(base.Pos,
|
|
ir.NewField(base.Pos, lookup(".this"), nil, rcvr),
|
|
structargs(method.Type.Params(), true),
|
|
structargs(method.Type.Results(), false))
|
|
|
|
fn := dclfunc(newnam, tfn)
|
|
fn.SetDupok(true)
|
|
|
|
nthis := ir.AsNode(tfn.Type().Recv().Nname)
|
|
|
|
methodrcvr := method.Type.Recv().Type
|
|
|
|
// generate nil pointer check for better error
|
|
if rcvr.IsPtr() && rcvr.Elem() == methodrcvr {
|
|
// generating wrapper from *T to T.
|
|
n := ir.NewIfStmt(base.Pos, nil, nil, nil)
|
|
n.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, nthis, nodnil())
|
|
call := ir.NewCallExpr(base.Pos, ir.OCALL, syslook("panicwrap"), nil)
|
|
n.Body = []ir.Node{call}
|
|
fn.Body.Append(n)
|
|
}
|
|
|
|
dot := adddot(ir.NewSelectorExpr(base.Pos, ir.OXDOT, nthis, method.Sym))
|
|
|
|
// generate call
|
|
// It's not possible to use a tail call when dynamic linking on ppc64le. The
|
|
// bad scenario is when a local call is made to the wrapper: the wrapper will
|
|
// call the implementation, which might be in a different module and so set
|
|
// the TOC to the appropriate value for that module. But if it returns
|
|
// directly to the wrapper's caller, nothing will reset it to the correct
|
|
// value for that function.
|
|
if !base.Flag.Cfg.Instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !types.IsInterfaceMethod(method.Type) && !(thearch.LinkArch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) {
|
|
// generate tail call: adjust pointer receiver and jump to embedded method.
|
|
left := dot.X // skip final .M
|
|
if !left.Type().IsPtr() {
|
|
left = nodAddr(left)
|
|
}
|
|
as := ir.NewAssignStmt(base.Pos, nthis, convnop(left, rcvr))
|
|
fn.Body.Append(as)
|
|
fn.Body.Append(ir.NewBranchStmt(base.Pos, ir.ORETJMP, ir.MethodSym(methodrcvr, method.Sym)))
|
|
} else {
|
|
fn.SetWrapper(true) // ignore frame for panic+recover matching
|
|
call := ir.NewCallExpr(base.Pos, ir.OCALL, dot, nil)
|
|
call.Args.Set(ir.ParamNames(tfn.Type()))
|
|
call.IsDDD = tfn.Type().IsVariadic()
|
|
if method.Type.NumResults() > 0 {
|
|
ret := ir.NewReturnStmt(base.Pos, nil)
|
|
ret.Results = []ir.Node{call}
|
|
fn.Body.Append(ret)
|
|
} else {
|
|
fn.Body.Append(call)
|
|
}
|
|
}
|
|
|
|
if false && base.Flag.LowerR != 0 {
|
|
ir.DumpList("genwrapper body", fn.Body)
|
|
}
|
|
|
|
funcbody()
|
|
if base.Debug.DclStack != 0 {
|
|
types.CheckDclstack()
|
|
}
|
|
|
|
typecheckFunc(fn)
|
|
ir.CurFunc = fn
|
|
typecheckslice(fn.Body, ctxStmt)
|
|
|
|
// Inline calls within (*T).M wrappers. This is safe because we only
|
|
// generate those wrappers within the same compilation unit as (T).M.
|
|
// TODO(mdempsky): Investigate why we can't enable this more generally.
|
|
if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym() != nil {
|
|
inlcalls(fn)
|
|
}
|
|
escapeFuncs([]*ir.Func{fn}, false)
|
|
|
|
ir.CurFunc = nil
|
|
Target.Decls = append(Target.Decls, fn)
|
|
}
|
|
|
|
func hashmem(t *types.Type) ir.Node {
|
|
sym := ir.Pkgs.Runtime.Lookup("memhash")
|
|
|
|
n := NewName(sym)
|
|
ir.MarkFunc(n)
|
|
n.SetType(functype(nil, []*ir.Field{
|
|
ir.NewField(base.Pos, nil, nil, types.NewPtr(t)),
|
|
ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]),
|
|
ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]),
|
|
}, []*ir.Field{
|
|
ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]),
|
|
}))
|
|
return n
|
|
}
|
|
|
|
func ifacelookdot(s *types.Sym, t *types.Type, ignorecase bool) (m *types.Field, followptr bool) {
|
|
if t == nil {
|
|
return nil, false
|
|
}
|
|
|
|
path, ambig := dotpath(s, t, &m, ignorecase)
|
|
if path == nil {
|
|
if ambig {
|
|
base.Errorf("%v.%v is ambiguous", t, s)
|
|
}
|
|
return nil, false
|
|
}
|
|
|
|
for _, d := range path {
|
|
if d.field.Type.IsPtr() {
|
|
followptr = true
|
|
break
|
|
}
|
|
}
|
|
|
|
if !m.IsMethod() {
|
|
base.Errorf("%v.%v is a field, not a method", t, s)
|
|
return nil, followptr
|
|
}
|
|
|
|
return m, followptr
|
|
}
|
|
|
|
func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool {
|
|
t0 := t
|
|
if t == nil {
|
|
return false
|
|
}
|
|
|
|
if t.IsInterface() {
|
|
i := 0
|
|
tms := t.Fields().Slice()
|
|
for _, im := range iface.Fields().Slice() {
|
|
for i < len(tms) && tms[i].Sym != im.Sym {
|
|
i++
|
|
}
|
|
if i == len(tms) {
|
|
*m = im
|
|
*samename = nil
|
|
*ptr = 0
|
|
return false
|
|
}
|
|
tm := tms[i]
|
|
if !types.Identical(tm.Type, im.Type) {
|
|
*m = im
|
|
*samename = tm
|
|
*ptr = 0
|
|
return false
|
|
}
|
|
}
|
|
|
|
return true
|
|
}
|
|
|
|
t = types.ReceiverBaseType(t)
|
|
var tms []*types.Field
|
|
if t != nil {
|
|
expandmeth(t)
|
|
tms = t.AllMethods().Slice()
|
|
}
|
|
i := 0
|
|
for _, im := range iface.Fields().Slice() {
|
|
if im.Broke() {
|
|
continue
|
|
}
|
|
for i < len(tms) && tms[i].Sym != im.Sym {
|
|
i++
|
|
}
|
|
if i == len(tms) {
|
|
*m = im
|
|
*samename, _ = ifacelookdot(im.Sym, t, true)
|
|
*ptr = 0
|
|
return false
|
|
}
|
|
tm := tms[i]
|
|
if tm.Nointerface() || !types.Identical(tm.Type, im.Type) {
|
|
*m = im
|
|
*samename = tm
|
|
*ptr = 0
|
|
return false
|
|
}
|
|
followptr := tm.Embedded == 2
|
|
|
|
// if pointer receiver in method,
|
|
// the method does not exist for value types.
|
|
rcvr := tm.Type.Recv().Type
|
|
if rcvr.IsPtr() && !t0.IsPtr() && !followptr && !types.IsInterfaceMethod(tm.Type) {
|
|
if false && base.Flag.LowerR != 0 {
|
|
base.Errorf("interface pointer mismatch")
|
|
}
|
|
|
|
*m = im
|
|
*samename = nil
|
|
*ptr = 1
|
|
return false
|
|
}
|
|
}
|
|
|
|
return true
|
|
}
|
|
|
|
func ngotype(n ir.Node) *types.Sym {
|
|
if n.Type() != nil {
|
|
return typenamesym(n.Type())
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// The linker uses the magic symbol prefixes "go." and "type."
|
|
// Avoid potential confusion between import paths and symbols
|
|
// by rejecting these reserved imports for now. Also, people
|
|
// "can do weird things in GOPATH and we'd prefer they didn't
|
|
// do _that_ weird thing" (per rsc). See also #4257.
|
|
var reservedimports = []string{
|
|
"go",
|
|
"type",
|
|
}
|
|
|
|
func isbadimport(path string, allowSpace bool) bool {
|
|
if strings.Contains(path, "\x00") {
|
|
base.Errorf("import path contains NUL")
|
|
return true
|
|
}
|
|
|
|
for _, ri := range reservedimports {
|
|
if path == ri {
|
|
base.Errorf("import path %q is reserved and cannot be used", path)
|
|
return true
|
|
}
|
|
}
|
|
|
|
for _, r := range path {
|
|
if r == utf8.RuneError {
|
|
base.Errorf("import path contains invalid UTF-8 sequence: %q", path)
|
|
return true
|
|
}
|
|
|
|
if r < 0x20 || r == 0x7f {
|
|
base.Errorf("import path contains control character: %q", path)
|
|
return true
|
|
}
|
|
|
|
if r == '\\' {
|
|
base.Errorf("import path contains backslash; use slash: %q", path)
|
|
return true
|
|
}
|
|
|
|
if !allowSpace && unicode.IsSpace(r) {
|
|
base.Errorf("import path contains space character: %q", path)
|
|
return true
|
|
}
|
|
|
|
if strings.ContainsRune("!\"#$%&'()*,:;<=>?[]^`{|}", r) {
|
|
base.Errorf("import path contains invalid character '%c': %q", r, path)
|
|
return true
|
|
}
|
|
}
|
|
|
|
return false
|
|
}
|
|
|
|
// itabType loads the _type field from a runtime.itab struct.
|
|
func itabType(itab ir.Node) ir.Node {
|
|
typ := ir.NewSelectorExpr(base.Pos, ir.ODOTPTR, itab, nil)
|
|
typ.SetType(types.NewPtr(types.Types[types.TUINT8]))
|
|
typ.SetTypecheck(1)
|
|
typ.Offset = int64(types.PtrSize) // offset of _type in runtime.itab
|
|
typ.SetBounded(true) // guaranteed not to fault
|
|
return typ
|
|
}
|
|
|
|
// ifaceData loads the data field from an interface.
|
|
// The concrete type must be known to have type t.
|
|
// It follows the pointer if !isdirectiface(t).
|
|
func ifaceData(pos src.XPos, n ir.Node, t *types.Type) ir.Node {
|
|
if t.IsInterface() {
|
|
base.Fatalf("ifaceData interface: %v", t)
|
|
}
|
|
ptr := ir.NewUnaryExpr(pos, ir.OIDATA, n)
|
|
if types.IsDirectIface(t) {
|
|
ptr.SetType(t)
|
|
ptr.SetTypecheck(1)
|
|
return ptr
|
|
}
|
|
ptr.SetType(types.NewPtr(t))
|
|
ptr.SetTypecheck(1)
|
|
ind := ir.NewStarExpr(pos, ptr)
|
|
ind.SetType(t)
|
|
ind.SetTypecheck(1)
|
|
ind.SetBounded(true)
|
|
return ind
|
|
}
|