2021-06-22 01:26:34 -07:00
|
|
|
// Copyright 2018 The Go Authors. All rights reserved.
|
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
|
|
package escape
|
|
|
|
|
|
|
|
|
|
import (
|
|
|
|
|
"cmd/compile/internal/base"
|
|
|
|
|
"cmd/compile/internal/ir"
|
|
|
|
|
"cmd/compile/internal/typecheck"
|
|
|
|
|
"cmd/compile/internal/types"
|
2021-06-21 22:35:01 -07:00
|
|
|
"cmd/internal/src"
|
2021-06-22 01:26:34 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
// call evaluates a call expressions, including builtin calls. ks
|
|
|
|
|
// should contain the holes representing where the function callee's
|
2021-06-22 20:53:14 -07:00
|
|
|
// results flows.
|
|
|
|
|
func (e *escape) call(ks []hole, call ir.Node) {
|
2023-08-16 15:16:19 -07:00
|
|
|
argument := func(k hole, arg ir.Node) {
|
|
|
|
|
// TODO(mdempsky): Should be "call argument".
|
|
|
|
|
e.expr(k.note(call, "call parameter"), arg)
|
cmd/compile: fix wrong escape analysis for go/defer generic calls
For go/defer calls like "defer f(x, y)", the compiler rewrites it to:
x1, y1 := x, y
defer func() { f(x1, y1) }()
However, if "f" needs runtime type information, the "RType" field will
refer to the outer ".dict" param, causing wrong liveness analysis.
To fix this, if "f" refers to outer ".dict", the dict param will be
copied to an autotmp, and "f" will refer to this autotmp instead.
Fixes #58341
Change-Id: I238b6e75441442b5540d39bc818205398e80c94d
Reviewed-on: https://go-review.googlesource.com/c/go/+/466035
Reviewed-by: David Chase <drchase@google.com>
Auto-Submit: Cuong Manh Le <cuong.manhle.vn@gmail.com>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
Run-TryBot: Cuong Manh Le <cuong.manhle.vn@gmail.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2023-02-07 19:28:15 +07:00
|
|
|
}
|
|
|
|
|
|
2021-06-22 01:26:34 -07:00
|
|
|
switch call.Op() {
|
|
|
|
|
default:
|
|
|
|
|
ir.Dump("esc", call)
|
|
|
|
|
base.Fatalf("unexpected call op: %v", call.Op())
|
|
|
|
|
|
2023-08-11 17:29:08 -07:00
|
|
|
case ir.OCALLFUNC, ir.OCALLINTER:
|
2021-06-22 01:26:34 -07:00
|
|
|
call := call.(*ir.CallExpr)
|
2022-12-20 11:23:23 -08:00
|
|
|
typecheck.AssertFixedCall(call)
|
2021-06-22 01:26:34 -07:00
|
|
|
|
|
|
|
|
// Pick out the function callee, if statically known.
|
2021-06-21 22:35:01 -07:00
|
|
|
//
|
|
|
|
|
// TODO(mdempsky): Change fn from *ir.Name to *ir.Func, but some
|
|
|
|
|
// functions (e.g., runtime builtins, method wrappers, generated
|
|
|
|
|
// eq/hash functions) don't have it set. Investigate whether
|
|
|
|
|
// that's a concern.
|
2021-06-22 01:26:34 -07:00
|
|
|
var fn *ir.Name
|
|
|
|
|
switch call.Op() {
|
|
|
|
|
case ir.OCALLFUNC:
|
2023-10-04 15:22:49 -07:00
|
|
|
v := ir.StaticValue(call.Fun)
|
2023-08-11 18:21:22 -07:00
|
|
|
fn = ir.StaticCalleeName(v)
|
2021-06-22 01:26:34 -07:00
|
|
|
}
|
|
|
|
|
|
2023-10-04 15:22:49 -07:00
|
|
|
fntype := call.Fun.Type()
|
2021-06-22 01:26:34 -07:00
|
|
|
if fn != nil {
|
|
|
|
|
fntype = fn.Type()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if ks != nil && fn != nil && e.inMutualBatch(fn) {
|
2023-08-20 10:05:29 -07:00
|
|
|
for i, result := range fn.Type().Results() {
|
2023-08-15 16:45:52 -07:00
|
|
|
e.expr(ks[i], result.Nname.(*ir.Name))
|
2021-06-22 01:26:34 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-08-16 15:16:19 -07:00
|
|
|
var recvArg ir.Node
|
2021-06-23 00:31:16 -07:00
|
|
|
if call.Op() == ir.OCALLFUNC {
|
2021-06-22 01:26:34 -07:00
|
|
|
// Evaluate callee function expression.
|
2023-08-15 16:45:52 -07:00
|
|
|
calleeK := e.discardHole()
|
|
|
|
|
if fn == nil { // unknown callee
|
|
|
|
|
for _, k := range ks {
|
|
|
|
|
if k.dst != &e.blankLoc {
|
|
|
|
|
// The results flow somewhere, but we don't statically
|
|
|
|
|
// know the callee function. If a closure flows here, we
|
|
|
|
|
// need to conservatively assume its results might flow to
|
|
|
|
|
// the heap.
|
2023-08-16 15:16:19 -07:00
|
|
|
calleeK = e.calleeHole().note(call, "callee operand")
|
2023-08-15 16:45:52 -07:00
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2023-10-04 15:22:49 -07:00
|
|
|
e.expr(calleeK, call.Fun)
|
2021-06-23 00:31:16 -07:00
|
|
|
} else {
|
2023-10-04 15:22:49 -07:00
|
|
|
recvArg = call.Fun.(*ir.SelectorExpr).X
|
2023-08-16 15:16:19 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// argumentParam handles escape analysis of assigning a call
|
|
|
|
|
// argument to its corresponding parameter.
|
|
|
|
|
argumentParam := func(param *types.Field, arg ir.Node) {
|
|
|
|
|
e.rewriteArgument(arg, call, fn)
|
|
|
|
|
argument(e.tagHole(ks, fn, param), arg)
|
2021-06-22 01:26:34 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
args := call.Args
|
2023-08-16 15:16:19 -07:00
|
|
|
if recvParam := fntype.Recv(); recvParam != nil {
|
|
|
|
|
if recvArg == nil {
|
2023-06-13 23:01:11 +00:00
|
|
|
// Function call using method expression. Receiver argument is
|
2021-06-23 00:31:16 -07:00
|
|
|
// at the front of the regular arguments list.
|
2023-08-16 15:16:19 -07:00
|
|
|
recvArg, args = args[0], args[1:]
|
2021-06-23 00:31:16 -07:00
|
|
|
}
|
|
|
|
|
|
2023-08-16 15:16:19 -07:00
|
|
|
argumentParam(recvParam, recvArg)
|
2021-06-23 00:31:16 -07:00
|
|
|
}
|
|
|
|
|
|
2023-08-20 10:05:29 -07:00
|
|
|
for i, param := range fntype.Params() {
|
2023-08-16 15:16:19 -07:00
|
|
|
argumentParam(param, args[i])
|
2021-06-22 01:26:34 -07:00
|
|
|
}
|
|
|
|
|
|
2021-07-03 04:53:25 -07:00
|
|
|
case ir.OINLCALL:
|
|
|
|
|
call := call.(*ir.InlinedCallExpr)
|
|
|
|
|
e.stmts(call.Body)
|
|
|
|
|
for i, result := range call.ReturnVars {
|
|
|
|
|
k := e.discardHole()
|
|
|
|
|
if ks != nil {
|
|
|
|
|
k = ks[i]
|
|
|
|
|
}
|
|
|
|
|
e.expr(k, result)
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-22 01:26:34 -07:00
|
|
|
case ir.OAPPEND:
|
|
|
|
|
call := call.(*ir.CallExpr)
|
|
|
|
|
args := call.Args
|
|
|
|
|
|
|
|
|
|
// Appendee slice may flow directly to the result, if
|
|
|
|
|
// it has enough capacity. Alternatively, a new heap
|
|
|
|
|
// slice might be allocated, and all slice elements
|
|
|
|
|
// might flow to heap.
|
2023-08-15 16:45:52 -07:00
|
|
|
appendeeK := e.teeHole(ks[0], e.mutatorHole())
|
2021-06-22 01:26:34 -07:00
|
|
|
if args[0].Type().Elem().HasPointers() {
|
|
|
|
|
appendeeK = e.teeHole(appendeeK, e.heapHole().deref(call, "appendee slice"))
|
|
|
|
|
}
|
2023-08-16 15:16:19 -07:00
|
|
|
argument(appendeeK, args[0])
|
2021-06-22 01:26:34 -07:00
|
|
|
|
|
|
|
|
if call.IsDDD {
|
|
|
|
|
appendedK := e.discardHole()
|
|
|
|
|
if args[1].Type().IsSlice() && args[1].Type().Elem().HasPointers() {
|
|
|
|
|
appendedK = e.heapHole().deref(call, "appended slice...")
|
|
|
|
|
}
|
2023-08-16 15:16:19 -07:00
|
|
|
argument(appendedK, args[1])
|
2021-06-22 01:26:34 -07:00
|
|
|
} else {
|
2021-06-22 20:53:14 -07:00
|
|
|
for i := 1; i < len(args); i++ {
|
2023-08-16 15:16:19 -07:00
|
|
|
argument(e.heapHole(), args[i])
|
2021-06-22 01:26:34 -07:00
|
|
|
}
|
|
|
|
|
}
|
2023-08-16 15:16:19 -07:00
|
|
|
e.discard(call.RType)
|
2021-06-22 01:26:34 -07:00
|
|
|
|
|
|
|
|
case ir.OCOPY:
|
|
|
|
|
call := call.(*ir.BinaryExpr)
|
2023-08-16 15:16:19 -07:00
|
|
|
argument(e.mutatorHole(), call.X)
|
2021-06-22 01:26:34 -07:00
|
|
|
|
|
|
|
|
copiedK := e.discardHole()
|
|
|
|
|
if call.Y.Type().IsSlice() && call.Y.Type().Elem().HasPointers() {
|
|
|
|
|
copiedK = e.heapHole().deref(call, "copied slice")
|
|
|
|
|
}
|
2023-08-16 15:16:19 -07:00
|
|
|
argument(copiedK, call.Y)
|
|
|
|
|
e.discard(call.RType)
|
2021-06-22 01:26:34 -07:00
|
|
|
|
|
|
|
|
case ir.OPANIC:
|
|
|
|
|
call := call.(*ir.UnaryExpr)
|
2023-08-16 15:16:19 -07:00
|
|
|
argument(e.heapHole(), call.X)
|
2021-06-22 01:26:34 -07:00
|
|
|
|
|
|
|
|
case ir.OCOMPLEX:
|
|
|
|
|
call := call.(*ir.BinaryExpr)
|
2023-08-16 15:16:19 -07:00
|
|
|
e.discard(call.X)
|
|
|
|
|
e.discard(call.Y)
|
[dev.typeparams] cmd/compile: desugar ORECOVER into ORECOVERFP
Currently ORECOVER is a single operation that both (1) calculates
the (logical) caller frame pointer and (2) calls runtime.gorecover.
This is normally fine, but it's inconvenient for regabi, which wants
to wrap "defer recover()" into "defer func() { recover() }" and
needs (1) and (2) to happen at different times.
The current solution is to apply walkRecover early to split it into
the two steps, but calling it during order is a minor layering
violation. It works well today because the order and walk phases are
closely related anyway and walkRecover is relatively simple, but it
won't work for go/defer wrapping earlier into the frontend.
This CL adds a new, lower-level ORECOVERFP primitive, which represents
just part (2); and OGETCALLER{PC,SP} primitives, which provide a way
to compute (1) in the frontend too.
OGETCALLERPC isn't needed/used today, but it seems worth including for
completeness. Maybe it will be useful at some point for intrinsifying
runtime.getcaller{pc,sp}, like we already do for runtime.getg.
Change-Id: Iaa8ae51e09306c45c147b6759a5b7c24dcc317ca
Reviewed-on: https://go-review.googlesource.com/c/go/+/330192
Run-TryBot: Matthew Dempsky <mdempsky@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Trust: Matthew Dempsky <mdempsky@google.com>
Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com>
2021-06-22 13:44:18 -07:00
|
|
|
|
2023-12-05 12:56:04 -08:00
|
|
|
case ir.ODELETE, ir.OPRINT, ir.OPRINTLN, ir.ORECOVERFP:
|
2021-06-22 01:26:34 -07:00
|
|
|
call := call.(*ir.CallExpr)
|
2023-12-05 12:56:04 -08:00
|
|
|
for _, arg := range call.Args {
|
|
|
|
|
e.discard(arg)
|
|
|
|
|
}
|
|
|
|
|
e.discard(call.RType)
|
|
|
|
|
|
|
|
|
|
case ir.OMIN, ir.OMAX:
|
|
|
|
|
call := call.(*ir.CallExpr)
|
|
|
|
|
for _, arg := range call.Args {
|
|
|
|
|
argument(ks[0], arg)
|
2021-06-22 01:26:34 -07:00
|
|
|
}
|
2023-08-16 15:16:19 -07:00
|
|
|
e.discard(call.RType)
|
[dev.typeparams] cmd/compile: desugar ORECOVER into ORECOVERFP
Currently ORECOVER is a single operation that both (1) calculates
the (logical) caller frame pointer and (2) calls runtime.gorecover.
This is normally fine, but it's inconvenient for regabi, which wants
to wrap "defer recover()" into "defer func() { recover() }" and
needs (1) and (2) to happen at different times.
The current solution is to apply walkRecover early to split it into
the two steps, but calling it during order is a minor layering
violation. It works well today because the order and walk phases are
closely related anyway and walkRecover is relatively simple, but it
won't work for go/defer wrapping earlier into the frontend.
This CL adds a new, lower-level ORECOVERFP primitive, which represents
just part (2); and OGETCALLER{PC,SP} primitives, which provide a way
to compute (1) in the frontend too.
OGETCALLERPC isn't needed/used today, but it seems worth including for
completeness. Maybe it will be useful at some point for intrinsifying
runtime.getcaller{pc,sp}, like we already do for runtime.getg.
Change-Id: Iaa8ae51e09306c45c147b6759a5b7c24dcc317ca
Reviewed-on: https://go-review.googlesource.com/c/go/+/330192
Run-TryBot: Matthew Dempsky <mdempsky@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Trust: Matthew Dempsky <mdempsky@google.com>
Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com>
2021-06-22 13:44:18 -07:00
|
|
|
|
2023-08-15 16:45:52 -07:00
|
|
|
case ir.OLEN, ir.OCAP, ir.OREAL, ir.OIMAG, ir.OCLOSE:
|
2021-06-22 01:26:34 -07:00
|
|
|
call := call.(*ir.UnaryExpr)
|
2023-08-16 15:16:19 -07:00
|
|
|
e.discard(call.X)
|
2021-06-22 01:26:34 -07:00
|
|
|
|
2023-08-15 16:45:52 -07:00
|
|
|
case ir.OCLEAR:
|
|
|
|
|
call := call.(*ir.UnaryExpr)
|
2023-08-16 15:16:19 -07:00
|
|
|
argument(e.mutatorHole(), call.X)
|
2023-08-15 16:45:52 -07:00
|
|
|
|
2023-01-17 01:29:02 +07:00
|
|
|
case ir.OUNSAFESTRINGDATA, ir.OUNSAFESLICEDATA:
|
|
|
|
|
call := call.(*ir.UnaryExpr)
|
2023-08-16 15:16:19 -07:00
|
|
|
argument(ks[0], call.X)
|
2023-01-17 01:29:02 +07:00
|
|
|
|
2022-08-16 17:52:13 +08:00
|
|
|
case ir.OUNSAFEADD, ir.OUNSAFESLICE, ir.OUNSAFESTRING:
|
2021-06-22 01:26:34 -07:00
|
|
|
call := call.(*ir.BinaryExpr)
|
2023-08-16 15:16:19 -07:00
|
|
|
argument(ks[0], call.X)
|
|
|
|
|
e.discard(call.Y)
|
|
|
|
|
e.discard(call.RType)
|
2021-06-22 01:26:34 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-21 22:35:01 -07:00
|
|
|
// goDeferStmt analyzes a "go" or "defer" statement.
|
2021-06-22 20:53:14 -07:00
|
|
|
func (e *escape) goDeferStmt(n *ir.GoDeferStmt) {
|
2021-06-21 22:35:01 -07:00
|
|
|
k := e.heapHole()
|
2023-06-14 10:56:49 -04:00
|
|
|
if n.Op() == ir.ODEFER && e.loopDepth == 1 && n.DeferAt == nil {
|
2021-06-21 22:35:01 -07:00
|
|
|
// Top-level defer arguments don't escape to the heap,
|
|
|
|
|
// but they do need to last until they're invoked.
|
|
|
|
|
k = e.later(e.discardHole())
|
|
|
|
|
|
2021-06-22 20:53:14 -07:00
|
|
|
// force stack allocation of defer record, unless
|
|
|
|
|
// open-coded defers are used (see ssa.go)
|
|
|
|
|
n.SetEsc(ir.EscNever)
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-21 22:35:01 -07:00
|
|
|
// If the function is already a zero argument/result function call,
|
|
|
|
|
// just escape analyze it normally.
|
runtime: drop function context from traceback
Currently, gentraceback tracks the closure context of the outermost
frame. This used to be important for "unstarted" calls to reflect
function stubs, where "unstarted" calls are either deferred functions
or the entry-point of a goroutine that hasn't run. Because reflect
function stubs have a dynamic argument map, we have to reach into
their closure context to fetch to map, and how to do this differs
depending on whether the function has started. This was discovered in
issue #25897.
However, as part of the register ABI, "go" and "defer" were made much
simpler, and any "go" or "defer" of a function that takes arguments or
returns results gets wrapped in a closure that provides those
arguments (and/or discards the results). Hence, we'll see that closure
instead of a direct call to a reflect stub, and can get its static
argument map without any trouble.
The one case where we may still see an unstarted reflect stub is if
the function takes no arguments and has no results, in which case the
compiler can optimize away the wrapper closure. But in this case we
know the argument map is empty: the compiler can apply this
optimization precisely because the target function has no argument
frame.
As a result, we no longer need to track the closure context during
traceback, so this CL drops all of that mechanism.
We still have to be careful about the unstarted case because we can't
reach into the function's locals frame to pull out its context
(because it has no locals frame). We double-check that in this case
we're at the function entry.
I would prefer to do this with some in-code PCDATA annotations of
where to find the dynamic argument map, but that's a lot of mechanism
to introduce for just this. It might make sense to consider this along
with #53609.
Finally, we beef up the test for this so it more reliably forces the
runtime down this path. It's fundamentally probabilistic, but this
tweak makes it better. Scheduler testing hooks (#54475) would make it
possible to write a reliable test for this.
For #54466, but it's a nice clean-up all on its own.
Change-Id: I16e4f2364ba2ea4b1fec1e27f971b06756e7b09f
Reviewed-on: https://go-review.googlesource.com/c/go/+/424254
Run-TryBot: Austin Clements <austin@google.com>
Reviewed-by: Michael Pratt <mpratt@google.com>
Auto-Submit: Austin Clements <austin@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
2022-08-15 10:41:03 -04:00
|
|
|
//
|
|
|
|
|
// Note that the runtime is aware of this optimization for
|
|
|
|
|
// "go" statements that start in reflect.makeFuncStub or
|
|
|
|
|
// reflect.methodValueCall.
|
2021-06-21 22:35:01 -07:00
|
|
|
|
2023-08-16 14:39:47 -07:00
|
|
|
call, ok := n.Call.(*ir.CallExpr)
|
|
|
|
|
if !ok || call.Op() != ir.OCALLFUNC {
|
|
|
|
|
base.FatalfAt(n.Pos(), "expected function call: %v", n.Call)
|
|
|
|
|
}
|
2023-10-04 15:22:49 -07:00
|
|
|
if sig := call.Fun.Type(); sig.NumParams()+sig.NumResults() != 0 {
|
2023-08-16 14:39:47 -07:00
|
|
|
base.FatalfAt(n.Pos(), "expected signature without parameters or results: %v", sig)
|
2022-02-07 12:00:44 -05:00
|
|
|
}
|
2021-06-21 22:35:01 -07:00
|
|
|
|
2023-10-04 15:22:49 -07:00
|
|
|
if clo, ok := call.Fun.(*ir.ClosureExpr); ok && n.Op() == ir.OGO {
|
2021-06-21 22:35:01 -07:00
|
|
|
clo.IsGoWrap = true
|
|
|
|
|
}
|
|
|
|
|
|
2023-10-04 15:22:49 -07:00
|
|
|
e.expr(k, call.Fun)
|
2021-06-21 22:35:01 -07:00
|
|
|
}
|
|
|
|
|
|
2023-08-16 15:16:19 -07:00
|
|
|
// rewriteArgument rewrites the argument arg of the given call expression.
|
2021-06-21 22:35:01 -07:00
|
|
|
// fn is the static callee function, if known.
|
2023-08-16 15:16:19 -07:00
|
|
|
func (e *escape) rewriteArgument(arg ir.Node, call *ir.CallExpr, fn *ir.Name) {
|
|
|
|
|
if fn == nil || fn.Func == nil {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
pragma := fn.Func.Pragma
|
|
|
|
|
if pragma&(ir.UintptrKeepAlive|ir.UintptrEscapes) == 0 {
|
|
|
|
|
return
|
2021-06-21 22:35:01 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// unsafeUintptr rewrites "uintptr(ptr)" arguments to syscall-like
|
|
|
|
|
// functions, so that ptr is kept alive and/or escaped as
|
|
|
|
|
// appropriate. unsafeUintptr also reports whether it modified arg0.
|
2023-08-16 15:16:19 -07:00
|
|
|
unsafeUintptr := func(arg ir.Node) {
|
2021-06-21 22:35:01 -07:00
|
|
|
// If the argument is really a pointer being converted to uintptr,
|
2023-08-16 15:16:19 -07:00
|
|
|
// arrange for the pointer to be kept alive until the call
|
|
|
|
|
// returns, by copying it into a temp and marking that temp still
|
|
|
|
|
// alive when we pop the temp stack.
|
|
|
|
|
conv, ok := arg.(*ir.ConvExpr)
|
|
|
|
|
if !ok || conv.Op() != ir.OCONVNOP {
|
|
|
|
|
return // not a conversion
|
2021-06-21 22:35:01 -07:00
|
|
|
}
|
2023-08-16 15:16:19 -07:00
|
|
|
if !conv.X.Type().IsUnsafePtr() || !conv.Type().IsUintptr() {
|
|
|
|
|
return // not an unsafe.Pointer->uintptr conversion
|
2021-06-21 22:35:01 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Create and declare a new pointer-typed temp variable.
|
2023-08-16 14:39:47 -07:00
|
|
|
//
|
|
|
|
|
// TODO(mdempsky): This potentially violates the Go spec's order
|
|
|
|
|
// of evaluations, by evaluating arg.X before any other
|
|
|
|
|
// operands.
|
2023-08-16 15:16:19 -07:00
|
|
|
tmp := e.copyExpr(conv.Pos(), conv.X, call.PtrInit())
|
|
|
|
|
conv.X = tmp
|
2021-06-21 22:35:01 -07:00
|
|
|
|
2023-08-15 16:45:52 -07:00
|
|
|
k := e.mutatorHole()
|
2021-06-21 22:35:01 -07:00
|
|
|
if pragma&ir.UintptrEscapes != 0 {
|
2023-08-16 15:16:19 -07:00
|
|
|
k = e.heapHole().note(conv, "//go:uintptrescapes")
|
2021-06-21 22:35:01 -07:00
|
|
|
}
|
2023-08-15 16:45:52 -07:00
|
|
|
e.flow(k, e.oldLoc(tmp))
|
2021-06-21 22:35:01 -07:00
|
|
|
|
|
|
|
|
if pragma&ir.UintptrKeepAlive != 0 {
|
2023-08-16 15:16:19 -07:00
|
|
|
tmp.SetAddrtaken(true) // ensure SSA keeps the tmp variable
|
|
|
|
|
call.KeepAlive = append(call.KeepAlive, tmp)
|
2021-06-21 22:35:01 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-11-02 10:19:24 +07:00
|
|
|
// For variadic functions, the compiler has already rewritten:
|
|
|
|
|
//
|
|
|
|
|
// f(a, b, c)
|
|
|
|
|
//
|
|
|
|
|
// to:
|
|
|
|
|
//
|
|
|
|
|
// f([]T{a, b, c}...)
|
|
|
|
|
//
|
|
|
|
|
// So we need to look into slice elements to handle uintptr(ptr)
|
2023-08-16 15:16:19 -07:00
|
|
|
// arguments to variadic syscall-like functions correctly.
|
|
|
|
|
if arg.Op() == ir.OSLICELIT {
|
2021-06-21 22:35:01 -07:00
|
|
|
list := arg.(*ir.CompLitExpr).List
|
2023-08-16 15:16:19 -07:00
|
|
|
for _, el := range list {
|
|
|
|
|
if el.Op() == ir.OKEY {
|
|
|
|
|
el = el.(*ir.KeyExpr).Value
|
2021-10-31 00:20:13 +07:00
|
|
|
}
|
2023-08-16 15:16:19 -07:00
|
|
|
unsafeUintptr(el)
|
2021-06-21 22:35:01 -07:00
|
|
|
}
|
|
|
|
|
} else {
|
2023-08-16 15:16:19 -07:00
|
|
|
unsafeUintptr(arg)
|
2021-06-21 22:35:01 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// copyExpr creates and returns a new temporary variable within fn;
|
|
|
|
|
// appends statements to init to declare and initialize it to expr;
|
2023-08-16 15:16:19 -07:00
|
|
|
// and escape analyzes the data flow.
|
|
|
|
|
func (e *escape) copyExpr(pos src.XPos, expr ir.Node, init *ir.Nodes) *ir.Name {
|
2021-06-21 22:35:01 -07:00
|
|
|
if ir.HasUniquePos(expr) {
|
|
|
|
|
pos = expr.Pos()
|
|
|
|
|
}
|
|
|
|
|
|
2023-08-16 15:16:19 -07:00
|
|
|
tmp := typecheck.TempAt(pos, e.curfn, expr.Type())
|
2021-06-21 22:35:01 -07:00
|
|
|
|
|
|
|
|
stmts := []ir.Node{
|
|
|
|
|
ir.NewDecl(pos, ir.ODCL, tmp),
|
|
|
|
|
ir.NewAssignStmt(pos, tmp, expr),
|
|
|
|
|
}
|
|
|
|
|
typecheck.Stmts(stmts)
|
|
|
|
|
init.Append(stmts...)
|
|
|
|
|
|
2023-08-16 15:16:19 -07:00
|
|
|
e.newLoc(tmp, true)
|
|
|
|
|
e.stmts(stmts)
|
2021-06-21 22:35:01 -07:00
|
|
|
|
|
|
|
|
return tmp
|
2021-06-22 20:53:14 -07:00
|
|
|
}
|
|
|
|
|
|
2021-06-22 01:26:34 -07:00
|
|
|
// tagHole returns a hole for evaluating an argument passed to param.
|
|
|
|
|
// ks should contain the holes representing where the function
|
|
|
|
|
// callee's results flows. fn is the statically-known callee function,
|
|
|
|
|
// if any.
|
|
|
|
|
func (e *escape) tagHole(ks []hole, fn *ir.Name, param *types.Field) hole {
|
|
|
|
|
// If this is a dynamic call, we can't rely on param.Note.
|
|
|
|
|
if fn == nil {
|
|
|
|
|
return e.heapHole()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if e.inMutualBatch(fn) {
|
2023-08-18 00:09:06 -07:00
|
|
|
if param.Nname == nil {
|
|
|
|
|
return e.discardHole()
|
|
|
|
|
}
|
|
|
|
|
return e.addr(param.Nname.(*ir.Name))
|
2021-06-22 01:26:34 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Call to previously tagged function.
|
|
|
|
|
|
|
|
|
|
var tagKs []hole
|
|
|
|
|
esc := parseLeaks(param.Note)
|
2023-08-15 16:45:52 -07:00
|
|
|
|
2021-06-22 01:26:34 -07:00
|
|
|
if x := esc.Heap(); x >= 0 {
|
|
|
|
|
tagKs = append(tagKs, e.heapHole().shift(x))
|
|
|
|
|
}
|
2023-08-15 16:45:52 -07:00
|
|
|
if x := esc.Mutator(); x >= 0 {
|
|
|
|
|
tagKs = append(tagKs, e.mutatorHole().shift(x))
|
|
|
|
|
}
|
|
|
|
|
if x := esc.Callee(); x >= 0 {
|
|
|
|
|
tagKs = append(tagKs, e.calleeHole().shift(x))
|
|
|
|
|
}
|
2021-06-22 01:26:34 -07:00
|
|
|
|
|
|
|
|
if ks != nil {
|
|
|
|
|
for i := 0; i < numEscResults; i++ {
|
|
|
|
|
if x := esc.Result(i); x >= 0 {
|
|
|
|
|
tagKs = append(tagKs, ks[i].shift(x))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return e.teeHole(tagKs...)
|
|
|
|
|
}
|