2021-06-22 01:26:34 -07:00
|
|
|
// Copyright 2018 The Go Authors. All rights reserved.
|
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
|
|
package escape
|
|
|
|
|
|
|
|
|
|
import (
|
|
|
|
|
"cmd/compile/internal/base"
|
|
|
|
|
"cmd/compile/internal/ir"
|
|
|
|
|
"cmd/compile/internal/typecheck"
|
|
|
|
|
"cmd/compile/internal/types"
|
2021-06-21 22:35:01 -07:00
|
|
|
"cmd/internal/src"
|
2021-06-22 01:26:34 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
// call evaluates a call expressions, including builtin calls. ks
|
|
|
|
|
// should contain the holes representing where the function callee's
|
2021-06-22 20:53:14 -07:00
|
|
|
// results flows.
|
|
|
|
|
func (e *escape) call(ks []hole, call ir.Node) {
|
2021-06-21 22:35:01 -07:00
|
|
|
var init ir.Nodes
|
|
|
|
|
e.callCommon(ks, call, &init, nil)
|
|
|
|
|
if len(init) != 0 {
|
|
|
|
|
call.(*ir.CallExpr).PtrInit().Append(init...)
|
|
|
|
|
}
|
2021-06-22 20:53:14 -07:00
|
|
|
}
|
2021-06-22 01:26:34 -07:00
|
|
|
|
2021-06-21 22:35:01 -07:00
|
|
|
func (e *escape) callCommon(ks []hole, call ir.Node, init *ir.Nodes, wrapper *ir.Func) {
|
|
|
|
|
|
|
|
|
|
// argumentPragma handles escape analysis of argument *argp to the
|
|
|
|
|
// given hole. If the function callee is known, pragma is the
|
|
|
|
|
// function's pragma flags; otherwise 0.
|
|
|
|
|
argumentFunc := func(fn *ir.Name, k hole, argp *ir.Node) {
|
|
|
|
|
e.rewriteArgument(argp, init, call, fn, wrapper)
|
2021-06-22 01:26:34 -07:00
|
|
|
|
2021-06-22 20:53:14 -07:00
|
|
|
e.expr(k.note(call, "call parameter"), *argp)
|
2021-06-22 01:26:34 -07:00
|
|
|
}
|
|
|
|
|
|
2021-06-21 22:35:01 -07:00
|
|
|
argument := func(k hole, argp *ir.Node) {
|
|
|
|
|
argumentFunc(nil, k, argp)
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-22 01:26:34 -07:00
|
|
|
switch call.Op() {
|
|
|
|
|
default:
|
|
|
|
|
ir.Dump("esc", call)
|
|
|
|
|
base.Fatalf("unexpected call op: %v", call.Op())
|
|
|
|
|
|
|
|
|
|
case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
|
|
|
|
|
call := call.(*ir.CallExpr)
|
|
|
|
|
typecheck.FixVariadicCall(call)
|
|
|
|
|
|
|
|
|
|
// Pick out the function callee, if statically known.
|
2021-06-21 22:35:01 -07:00
|
|
|
//
|
|
|
|
|
// TODO(mdempsky): Change fn from *ir.Name to *ir.Func, but some
|
|
|
|
|
// functions (e.g., runtime builtins, method wrappers, generated
|
|
|
|
|
// eq/hash functions) don't have it set. Investigate whether
|
|
|
|
|
// that's a concern.
|
2021-06-22 01:26:34 -07:00
|
|
|
var fn *ir.Name
|
|
|
|
|
switch call.Op() {
|
|
|
|
|
case ir.OCALLFUNC:
|
|
|
|
|
switch v := ir.StaticValue(call.X); {
|
|
|
|
|
case v.Op() == ir.ONAME && v.(*ir.Name).Class == ir.PFUNC:
|
|
|
|
|
fn = v.(*ir.Name)
|
|
|
|
|
case v.Op() == ir.OCLOSURE:
|
|
|
|
|
fn = v.(*ir.ClosureExpr).Func.Nname
|
|
|
|
|
}
|
|
|
|
|
case ir.OCALLMETH:
|
|
|
|
|
fn = ir.MethodExprName(call.X)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fntype := call.X.Type()
|
|
|
|
|
if fn != nil {
|
|
|
|
|
fntype = fn.Type()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if ks != nil && fn != nil && e.inMutualBatch(fn) {
|
|
|
|
|
for i, result := range fn.Type().Results().FieldSlice() {
|
|
|
|
|
e.expr(ks[i], ir.AsNode(result.Nname))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if r := fntype.Recv(); r != nil {
|
2021-06-21 22:35:01 -07:00
|
|
|
dot := call.X.(*ir.SelectorExpr)
|
|
|
|
|
argumentFunc(fn, e.tagHole(ks, fn, r), &dot.X)
|
2021-06-22 01:26:34 -07:00
|
|
|
} else {
|
|
|
|
|
// Evaluate callee function expression.
|
2021-06-21 22:35:01 -07:00
|
|
|
//
|
|
|
|
|
// Note: We use argument and not argumentFunc, because call.X
|
|
|
|
|
// here may be an argument to runtime.{new,defer}proc, but it's
|
|
|
|
|
// not an argument to fn itself.
|
2021-06-22 20:53:14 -07:00
|
|
|
argument(e.discardHole(), &call.X)
|
2021-06-22 01:26:34 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
args := call.Args
|
|
|
|
|
for i, param := range fntype.Params().FieldSlice() {
|
2021-06-21 22:35:01 -07:00
|
|
|
argumentFunc(fn, e.tagHole(ks, fn, param), &args[i])
|
2021-06-22 01:26:34 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case ir.OAPPEND:
|
|
|
|
|
call := call.(*ir.CallExpr)
|
|
|
|
|
args := call.Args
|
|
|
|
|
|
|
|
|
|
// Appendee slice may flow directly to the result, if
|
|
|
|
|
// it has enough capacity. Alternatively, a new heap
|
|
|
|
|
// slice might be allocated, and all slice elements
|
|
|
|
|
// might flow to heap.
|
|
|
|
|
appendeeK := ks[0]
|
|
|
|
|
if args[0].Type().Elem().HasPointers() {
|
|
|
|
|
appendeeK = e.teeHole(appendeeK, e.heapHole().deref(call, "appendee slice"))
|
|
|
|
|
}
|
2021-06-22 20:53:14 -07:00
|
|
|
argument(appendeeK, &args[0])
|
2021-06-22 01:26:34 -07:00
|
|
|
|
|
|
|
|
if call.IsDDD {
|
|
|
|
|
appendedK := e.discardHole()
|
|
|
|
|
if args[1].Type().IsSlice() && args[1].Type().Elem().HasPointers() {
|
|
|
|
|
appendedK = e.heapHole().deref(call, "appended slice...")
|
|
|
|
|
}
|
2021-06-22 20:53:14 -07:00
|
|
|
argument(appendedK, &args[1])
|
2021-06-22 01:26:34 -07:00
|
|
|
} else {
|
2021-06-22 20:53:14 -07:00
|
|
|
for i := 1; i < len(args); i++ {
|
|
|
|
|
argument(e.heapHole(), &args[i])
|
2021-06-22 01:26:34 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case ir.OCOPY:
|
|
|
|
|
call := call.(*ir.BinaryExpr)
|
2021-06-22 20:53:14 -07:00
|
|
|
argument(e.discardHole(), &call.X)
|
2021-06-22 01:26:34 -07:00
|
|
|
|
|
|
|
|
copiedK := e.discardHole()
|
|
|
|
|
if call.Y.Type().IsSlice() && call.Y.Type().Elem().HasPointers() {
|
|
|
|
|
copiedK = e.heapHole().deref(call, "copied slice")
|
|
|
|
|
}
|
2021-06-22 20:53:14 -07:00
|
|
|
argument(copiedK, &call.Y)
|
2021-06-22 01:26:34 -07:00
|
|
|
|
|
|
|
|
case ir.OPANIC:
|
|
|
|
|
call := call.(*ir.UnaryExpr)
|
2021-06-22 20:53:14 -07:00
|
|
|
argument(e.heapHole(), &call.X)
|
2021-06-22 01:26:34 -07:00
|
|
|
|
|
|
|
|
case ir.OCOMPLEX:
|
|
|
|
|
call := call.(*ir.BinaryExpr)
|
2021-06-22 20:53:14 -07:00
|
|
|
argument(e.discardHole(), &call.X)
|
|
|
|
|
argument(e.discardHole(), &call.Y)
|
[dev.typeparams] cmd/compile: desugar ORECOVER into ORECOVERFP
Currently ORECOVER is a single operation that both (1) calculates
the (logical) caller frame pointer and (2) calls runtime.gorecover.
This is normally fine, but it's inconvenient for regabi, which wants
to wrap "defer recover()" into "defer func() { recover() }" and
needs (1) and (2) to happen at different times.
The current solution is to apply walkRecover early to split it into
the two steps, but calling it during order is a minor layering
violation. It works well today because the order and walk phases are
closely related anyway and walkRecover is relatively simple, but it
won't work for go/defer wrapping earlier into the frontend.
This CL adds a new, lower-level ORECOVERFP primitive, which represents
just part (2); and OGETCALLER{PC,SP} primitives, which provide a way
to compute (1) in the frontend too.
OGETCALLERPC isn't needed/used today, but it seems worth including for
completeness. Maybe it will be useful at some point for intrinsifying
runtime.getcaller{pc,sp}, like we already do for runtime.getg.
Change-Id: Iaa8ae51e09306c45c147b6759a5b7c24dcc317ca
Reviewed-on: https://go-review.googlesource.com/c/go/+/330192
Run-TryBot: Matthew Dempsky <mdempsky@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Trust: Matthew Dempsky <mdempsky@google.com>
Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com>
2021-06-22 13:44:18 -07:00
|
|
|
|
2021-06-22 01:26:34 -07:00
|
|
|
case ir.ODELETE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
|
|
|
|
|
call := call.(*ir.CallExpr)
|
[dev.typeparams] cmd/compile: desugar ORECOVER into ORECOVERFP
Currently ORECOVER is a single operation that both (1) calculates
the (logical) caller frame pointer and (2) calls runtime.gorecover.
This is normally fine, but it's inconvenient for regabi, which wants
to wrap "defer recover()" into "defer func() { recover() }" and
needs (1) and (2) to happen at different times.
The current solution is to apply walkRecover early to split it into
the two steps, but calling it during order is a minor layering
violation. It works well today because the order and walk phases are
closely related anyway and walkRecover is relatively simple, but it
won't work for go/defer wrapping earlier into the frontend.
This CL adds a new, lower-level ORECOVERFP primitive, which represents
just part (2); and OGETCALLER{PC,SP} primitives, which provide a way
to compute (1) in the frontend too.
OGETCALLERPC isn't needed/used today, but it seems worth including for
completeness. Maybe it will be useful at some point for intrinsifying
runtime.getcaller{pc,sp}, like we already do for runtime.getg.
Change-Id: Iaa8ae51e09306c45c147b6759a5b7c24dcc317ca
Reviewed-on: https://go-review.googlesource.com/c/go/+/330192
Run-TryBot: Matthew Dempsky <mdempsky@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Trust: Matthew Dempsky <mdempsky@google.com>
Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com>
2021-06-22 13:44:18 -07:00
|
|
|
fixRecoverCall(call)
|
2021-06-22 20:53:14 -07:00
|
|
|
for i := range call.Args {
|
|
|
|
|
argument(e.discardHole(), &call.Args[i])
|
2021-06-22 01:26:34 -07:00
|
|
|
}
|
[dev.typeparams] cmd/compile: desugar ORECOVER into ORECOVERFP
Currently ORECOVER is a single operation that both (1) calculates
the (logical) caller frame pointer and (2) calls runtime.gorecover.
This is normally fine, but it's inconvenient for regabi, which wants
to wrap "defer recover()" into "defer func() { recover() }" and
needs (1) and (2) to happen at different times.
The current solution is to apply walkRecover early to split it into
the two steps, but calling it during order is a minor layering
violation. It works well today because the order and walk phases are
closely related anyway and walkRecover is relatively simple, but it
won't work for go/defer wrapping earlier into the frontend.
This CL adds a new, lower-level ORECOVERFP primitive, which represents
just part (2); and OGETCALLER{PC,SP} primitives, which provide a way
to compute (1) in the frontend too.
OGETCALLERPC isn't needed/used today, but it seems worth including for
completeness. Maybe it will be useful at some point for intrinsifying
runtime.getcaller{pc,sp}, like we already do for runtime.getg.
Change-Id: Iaa8ae51e09306c45c147b6759a5b7c24dcc317ca
Reviewed-on: https://go-review.googlesource.com/c/go/+/330192
Run-TryBot: Matthew Dempsky <mdempsky@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Trust: Matthew Dempsky <mdempsky@google.com>
Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com>
2021-06-22 13:44:18 -07:00
|
|
|
|
2021-06-22 01:26:34 -07:00
|
|
|
case ir.OLEN, ir.OCAP, ir.OREAL, ir.OIMAG, ir.OCLOSE:
|
|
|
|
|
call := call.(*ir.UnaryExpr)
|
2021-06-22 20:53:14 -07:00
|
|
|
argument(e.discardHole(), &call.X)
|
2021-06-22 01:26:34 -07:00
|
|
|
|
|
|
|
|
case ir.OUNSAFEADD, ir.OUNSAFESLICE:
|
|
|
|
|
call := call.(*ir.BinaryExpr)
|
2021-06-22 20:53:14 -07:00
|
|
|
argument(ks[0], &call.X)
|
|
|
|
|
argument(e.discardHole(), &call.Y)
|
2021-06-22 01:26:34 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-21 22:35:01 -07:00
|
|
|
// goDeferStmt analyzes a "go" or "defer" statement.
|
|
|
|
|
//
|
|
|
|
|
// In the process, it also normalizes the statement to always use a
|
|
|
|
|
// simple function call with no arguments and no results. For example,
|
|
|
|
|
// it rewrites:
|
|
|
|
|
//
|
|
|
|
|
// defer f(x, y)
|
|
|
|
|
//
|
|
|
|
|
// into:
|
|
|
|
|
//
|
|
|
|
|
// x1, y1 := x, y
|
|
|
|
|
// defer func() { f(x1, y1) }()
|
2021-06-22 20:53:14 -07:00
|
|
|
func (e *escape) goDeferStmt(n *ir.GoDeferStmt) {
|
2021-06-21 22:35:01 -07:00
|
|
|
k := e.heapHole()
|
|
|
|
|
if n.Op() == ir.ODEFER && e.loopDepth == 1 {
|
|
|
|
|
// Top-level defer arguments don't escape to the heap,
|
|
|
|
|
// but they do need to last until they're invoked.
|
|
|
|
|
k = e.later(e.discardHole())
|
|
|
|
|
|
2021-06-22 20:53:14 -07:00
|
|
|
// force stack allocation of defer record, unless
|
|
|
|
|
// open-coded defers are used (see ssa.go)
|
|
|
|
|
n.SetEsc(ir.EscNever)
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-21 22:35:01 -07:00
|
|
|
call := n.Call
|
|
|
|
|
|
|
|
|
|
init := n.PtrInit()
|
|
|
|
|
init.Append(ir.TakeInit(call)...)
|
|
|
|
|
e.stmts(*init)
|
|
|
|
|
|
|
|
|
|
// If the function is already a zero argument/result function call,
|
|
|
|
|
// just escape analyze it normally.
|
|
|
|
|
if call, ok := call.(*ir.CallExpr); ok && call.Op() == ir.OCALLFUNC {
|
|
|
|
|
if sig := call.X.Type(); sig.NumParams()+sig.NumResults() == 0 {
|
|
|
|
|
if clo, ok := call.X.(*ir.ClosureExpr); ok && n.Op() == ir.OGO {
|
|
|
|
|
clo.IsGoWrap = true
|
|
|
|
|
}
|
|
|
|
|
e.expr(k, call.X)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Create a new no-argument function that we'll hand off to defer.
|
|
|
|
|
fn := ir.NewClosureFunc(n.Pos(), true)
|
|
|
|
|
fn.SetWrapper(true)
|
|
|
|
|
fn.Nname.SetType(types.NewSignature(types.LocalPkg, nil, nil, nil, nil))
|
|
|
|
|
fn.Body = []ir.Node{call}
|
|
|
|
|
|
|
|
|
|
clo := fn.OClosure
|
|
|
|
|
if n.Op() == ir.OGO {
|
|
|
|
|
clo.IsGoWrap = true
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
e.callCommon(nil, call, init, fn)
|
|
|
|
|
e.closures = append(e.closures, closure{e.spill(k, clo), clo})
|
|
|
|
|
|
|
|
|
|
// Create new top level call to closure.
|
|
|
|
|
n.Call = ir.NewCallExpr(call.Pos(), ir.OCALL, clo, nil)
|
|
|
|
|
ir.WithFunc(e.curfn, func() {
|
|
|
|
|
typecheck.Stmt(n.Call)
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// rewriteArgument rewrites the argument *argp of the given call expression.
|
|
|
|
|
// fn is the static callee function, if known.
|
|
|
|
|
// wrapper is the go/defer wrapper function for call, if any.
|
|
|
|
|
func (e *escape) rewriteArgument(argp *ir.Node, init *ir.Nodes, call ir.Node, fn *ir.Name, wrapper *ir.Func) {
|
|
|
|
|
var pragma ir.PragmaFlag
|
|
|
|
|
if fn != nil && fn.Func != nil {
|
|
|
|
|
pragma = fn.Func.Pragma
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// unsafeUintptr rewrites "uintptr(ptr)" arguments to syscall-like
|
|
|
|
|
// functions, so that ptr is kept alive and/or escaped as
|
|
|
|
|
// appropriate. unsafeUintptr also reports whether it modified arg0.
|
|
|
|
|
unsafeUintptr := func(arg0 ir.Node) bool {
|
|
|
|
|
if pragma&(ir.UintptrKeepAlive|ir.UintptrEscapes) == 0 {
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If the argument is really a pointer being converted to uintptr,
|
|
|
|
|
// arrange for the pointer to be kept alive until the call returns,
|
|
|
|
|
// by copying it into a temp and marking that temp
|
|
|
|
|
// still alive when we pop the temp stack.
|
|
|
|
|
if arg0.Op() != ir.OCONVNOP || !arg0.Type().IsUintptr() {
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
arg := arg0.(*ir.ConvExpr)
|
|
|
|
|
|
|
|
|
|
if !arg.X.Type().IsUnsafePtr() {
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Create and declare a new pointer-typed temp variable.
|
|
|
|
|
tmp := e.wrapExpr(arg.Pos(), &arg.X, init, call, wrapper)
|
|
|
|
|
|
|
|
|
|
if pragma&ir.UintptrEscapes != 0 {
|
|
|
|
|
e.flow(e.heapHole().note(arg, "//go:uintptrescapes"), e.oldLoc(tmp))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if pragma&ir.UintptrKeepAlive != 0 {
|
|
|
|
|
call := call.(*ir.CallExpr)
|
|
|
|
|
|
|
|
|
|
// SSA implements CallExpr.KeepAlive using OpVarLive, which
|
|
|
|
|
// doesn't support PAUTOHEAP variables. I tried changing it to
|
|
|
|
|
// use OpKeepAlive, but that ran into issues of its own.
|
|
|
|
|
// For now, the easy solution is to explicitly copy to (yet
|
|
|
|
|
// another) new temporary variable.
|
|
|
|
|
keep := tmp
|
|
|
|
|
if keep.Class == ir.PAUTOHEAP {
|
|
|
|
|
keep = e.copyExpr(arg.Pos(), tmp, call.PtrInit(), wrapper, false)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
keep.SetAddrtaken(true) // ensure SSA keeps the tmp variable
|
|
|
|
|
call.KeepAlive = append(call.KeepAlive, keep)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
visit := func(pos src.XPos, argp *ir.Node) {
|
|
|
|
|
if unsafeUintptr(*argp) {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if wrapper != nil {
|
|
|
|
|
e.wrapExpr(pos, argp, init, call, wrapper)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Peel away any slice lits.
|
|
|
|
|
if arg := *argp; arg.Op() == ir.OSLICELIT {
|
|
|
|
|
list := arg.(*ir.CompLitExpr).List
|
|
|
|
|
for i := range list {
|
|
|
|
|
visit(arg.Pos(), &list[i])
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
visit(call.Pos(), argp)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// wrapExpr replaces *exprp with a temporary variable copy. If wrapper
|
|
|
|
|
// is non-nil, the variable will be captured for use within that
|
|
|
|
|
// function.
|
|
|
|
|
func (e *escape) wrapExpr(pos src.XPos, exprp *ir.Node, init *ir.Nodes, call ir.Node, wrapper *ir.Func) *ir.Name {
|
|
|
|
|
tmp := e.copyExpr(pos, *exprp, init, e.curfn, true)
|
|
|
|
|
|
|
|
|
|
if wrapper != nil {
|
|
|
|
|
// Currently for "defer i.M()" if i is nil it panics at the point
|
|
|
|
|
// of defer statement, not when deferred function is called. We
|
|
|
|
|
// need to do the nil check outside of the wrapper.
|
|
|
|
|
if call.Op() == ir.OCALLINTER && exprp == &call.(*ir.CallExpr).X.(*ir.SelectorExpr).X {
|
|
|
|
|
check := ir.NewUnaryExpr(pos, ir.OCHECKNIL, ir.NewUnaryExpr(pos, ir.OITAB, tmp))
|
|
|
|
|
init.Append(typecheck.Stmt(check))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
e.oldLoc(tmp).captured = true
|
|
|
|
|
|
|
|
|
|
cv := ir.NewClosureVar(pos, wrapper, tmp)
|
|
|
|
|
cv.SetType(tmp.Type())
|
|
|
|
|
tmp = typecheck.Expr(cv).(*ir.Name)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
*exprp = tmp
|
|
|
|
|
return tmp
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// copyExpr creates and returns a new temporary variable within fn;
|
|
|
|
|
// appends statements to init to declare and initialize it to expr;
|
|
|
|
|
// and escape analyzes the data flow if analyze is true.
|
|
|
|
|
func (e *escape) copyExpr(pos src.XPos, expr ir.Node, init *ir.Nodes, fn *ir.Func, analyze bool) *ir.Name {
|
|
|
|
|
if ir.HasUniquePos(expr) {
|
|
|
|
|
pos = expr.Pos()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
tmp := typecheck.TempAt(pos, fn, expr.Type())
|
|
|
|
|
|
|
|
|
|
stmts := []ir.Node{
|
|
|
|
|
ir.NewDecl(pos, ir.ODCL, tmp),
|
|
|
|
|
ir.NewAssignStmt(pos, tmp, expr),
|
|
|
|
|
}
|
|
|
|
|
typecheck.Stmts(stmts)
|
|
|
|
|
init.Append(stmts...)
|
|
|
|
|
|
|
|
|
|
if analyze {
|
|
|
|
|
e.newLoc(tmp, false)
|
|
|
|
|
e.stmts(stmts)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return tmp
|
2021-06-22 20:53:14 -07:00
|
|
|
}
|
|
|
|
|
|
2021-06-22 01:26:34 -07:00
|
|
|
// tagHole returns a hole for evaluating an argument passed to param.
|
|
|
|
|
// ks should contain the holes representing where the function
|
|
|
|
|
// callee's results flows. fn is the statically-known callee function,
|
|
|
|
|
// if any.
|
|
|
|
|
func (e *escape) tagHole(ks []hole, fn *ir.Name, param *types.Field) hole {
|
|
|
|
|
// If this is a dynamic call, we can't rely on param.Note.
|
|
|
|
|
if fn == nil {
|
|
|
|
|
return e.heapHole()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if e.inMutualBatch(fn) {
|
|
|
|
|
return e.addr(ir.AsNode(param.Nname))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Call to previously tagged function.
|
|
|
|
|
|
|
|
|
|
var tagKs []hole
|
|
|
|
|
|
|
|
|
|
esc := parseLeaks(param.Note)
|
|
|
|
|
if x := esc.Heap(); x >= 0 {
|
|
|
|
|
tagKs = append(tagKs, e.heapHole().shift(x))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if ks != nil {
|
|
|
|
|
for i := 0; i < numEscResults; i++ {
|
|
|
|
|
if x := esc.Result(i); x >= 0 {
|
|
|
|
|
tagKs = append(tagKs, ks[i].shift(x))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return e.teeHole(tagKs...)
|
|
|
|
|
}
|