go/src/runtime/panic.go

1529 lines
45 KiB
Go
Raw Normal View History

// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/abi"
[dev.typeparams] runtime: replace Goarch* constants with internal/goarch versions [generated] [git-generate] cd src/runtime gofmt -w -r "sys.Goarch386 -> goarch.Is386" . gofmt -w -r "sys.GoarchAmd64 -> goarch.IsAmd64" . gofmt -w -r "sys.GoarchAmd64p32 -> goarch.IsAmd64p32" . gofmt -w -r "sys.GoarchArm -> goarch.IsArm" . gofmt -w -r "sys.GoarchArmbe -> goarch.IsArmbe" . gofmt -w -r "sys.GoarchArm64 -> goarch.IsArm64" . gofmt -w -r "sys.GoarchArm64be -> goarch.IsArm64be" . gofmt -w -r "sys.GoarchPpc64 -> goarch.IsPpc64" . gofmt -w -r "sys.GoarchPpc64le -> goarch.IsPpc64le" . gofmt -w -r "sys.GoarchMips -> goarch.IsMips" . gofmt -w -r "sys.GoarchMipsle -> goarch.IsMipsle" . gofmt -w -r "sys.GoarchMips64 -> goarch.IsMips64" . gofmt -w -r "sys.GoarchMips64le -> goarch.IsMips64le" . gofmt -w -r "sys.GoarchMips64p32 -> goarch.IsMips64p32" . gofmt -w -r "sys.GoarchMips64p32le -> goarch.IsMips64p32le" . gofmt -w -r "sys.GoarchPpc -> goarch.IsPpc" . gofmt -w -r "sys.GoarchRiscv -> goarch.IsRiscv" . gofmt -w -r "sys.GoarchRiscv64 -> goarch.IsRiscv64" . gofmt -w -r "sys.GoarchS390 -> goarch.IsS390" . gofmt -w -r "sys.GoarchS390x -> goarch.IsS390x" . gofmt -w -r "sys.GoarchSparc -> goarch.IsSparc" . gofmt -w -r "sys.GoarchSparc64 -> goarch.IsSparc64" . gofmt -w -r "sys.GoarchWasm -> goarch.IsWasm" . goimports -w *.go Change-Id: I9d88e1284efabaeb0ee3733cba6286247d078c85 Reviewed-on: https://go-review.googlesource.com/c/go/+/328345 Trust: Michael Knyszek <mknyszek@google.com> Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2021-06-16 21:57:58 +00:00
"internal/goarch"
"internal/runtime/atomic"
"internal/runtime/sys"
"internal/stringslite"
"unsafe"
)
// throwType indicates the current type of ongoing throw, which affects the
// amount of detail printed to stderr. Higher values include more detail.
type throwType uint32
const (
// throwTypeNone means that we are not throwing.
throwTypeNone throwType = iota
// throwTypeUser is a throw due to a problem with the application.
//
// These throws do not include runtime frames, system goroutines, or
// frame metadata.
throwTypeUser
// throwTypeRuntime is a throw due to a problem with Go itself.
//
// These throws include as much information as possible to aid in
// debugging the runtime, including runtime frames, system goroutines,
// and frame metadata.
throwTypeRuntime
)
cmd/compile, cmd/link, runtime: make defers low-cost through inline code and extra funcdata Generate inline code at defer time to save the args of defer calls to unique (autotmp) stack slots, and generate inline code at exit time to check which defer calls were made and make the associated function/method/interface calls. We remember that a particular defer statement was reached by storing in the deferBits variable (always stored on the stack). At exit time, we check the bits of the deferBits variable to determine which defer function calls to make (in reverse order). These low-cost defers are only used for functions where no defers appear in loops. In addition, we don't do these low-cost defers if there are too many defer statements or too many exits in a function (to limit code increase). When a function uses open-coded defers, we produce extra FUNCDATA_OpenCodedDeferInfo information that specifies the number of defers, and for each defer, the stack slots where the closure and associated args have been stored. The funcdata also includes the location of the deferBits variable. Therefore, for panics, we can use this funcdata to determine exactly which defers are active, and call the appropriate functions/methods/closures with the correct arguments for each active defer. In order to unwind the stack correctly after a recover(), we need to add an extra code segment to functions with open-coded defers that simply calls deferreturn() and returns. This segment is not reachable by the normal function, but is returned to by the runtime during recovery. We set the liveness information of this deferreturn() to be the same as the liveness at the first function call during the last defer exit code (so all return values and all stack slots needed by the defer calls will be live). I needed to increase the stackguard constant from 880 to 896, because of a small amount of new code in deferreturn(). The -N flag disables open-coded defers. '-d defer' prints out the kind of defer being used at each defer statement (heap-allocated, stack-allocated, or open-coded). Cost of defer statement [ go test -run NONE -bench BenchmarkDefer$ runtime ] With normal (stack-allocated) defers only: 35.4 ns/op With open-coded defers: 5.6 ns/op Cost of function call alone (remove defer keyword): 4.4 ns/op Text size increase (including funcdata) for go binary without/with open-coded defers: 0.09% The average size increase (including funcdata) for only the functions that use open-coded defers is 1.1%. The cost of a panic followed by a recover got noticeably slower, since panic processing now requires a scan of the stack for open-coded defer frames. This scan is required, even if no frames are using open-coded defers: Cost of panic and recover [ go test -run NONE -bench BenchmarkPanicRecover runtime ] Without open-coded defers: 62.0 ns/op With open-coded defers: 255 ns/op A CGO Go-to-C-to-Go benchmark got noticeably faster because of open-coded defers: CGO Go-to-C-to-Go benchmark [cd misc/cgo/test; go test -run NONE -bench BenchmarkCGoCallback ] Without open-coded defers: 443 ns/op With open-coded defers: 347 ns/op Updates #14939 (defer performance) Updates #34481 (design doc) Change-Id: I63b1a60d1ebf28126f55ee9fd7ecffe9cb23d1ff Reviewed-on: https://go-review.googlesource.com/c/go/+/202340 Reviewed-by: Austin Clements <austin@google.com>
2019-06-24 12:59:22 -07:00
// We have two different ways of doing defers. The older way involves creating a
// defer record at the time that a defer statement is executing and adding it to a
// defer chain. This chain is inspected by the deferreturn call at all function
// exits in order to run the appropriate defer calls. A cheaper way (which we call
// open-coded defers) is used for functions in which no defer statements occur in
// loops. In that case, we simply store the defer function/arg information into
// specific stack slots at the point of each defer statement, as well as setting a
// bit in a bitmask. At each function exit, we add inline code to directly make
// the appropriate defer calls based on the bitmask and fn/arg information stored
// on the stack. During panic/Goexit processing, the appropriate defer calls are
// made using extra funcdata info that indicates the exact stack slots that
// contain the bitmask and defer fn/args.
// Check to make sure we can really generate a panic. If the panic
// was generated from the runtime, or from inside malloc, then convert
// to a throw of msg.
// pc should be the program counter of the compiler-generated code that
// triggered this panic.
func panicCheck1(pc uintptr, msg string) {
if goarch.IsWasm == 0 && stringslite.HasPrefix(funcname(findfunc(pc)), "runtime.") {
// Note: wasm can't tail call, so we can't get the original caller's pc.
throw(msg)
}
// TODO: is this redundant? How could we be in malloc
// but not in the runtime? runtime/internal/*, maybe?
gp := getg()
if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
throw(msg)
}
}
// Same as above, but calling from the runtime is allowed.
//
// Using this function is necessary for any panic that may be
// generated by runtime.sigpanic, since those are always called by the
// runtime.
func panicCheck2(err string) {
// panic allocates, so to avoid recursive malloc, turn panics
// during malloc into throws.
gp := getg()
if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
throw(err)
}
}
// Many of the following panic entry-points turn into throws when they
// happen in various runtime contexts. These should never happen in
// the runtime, and if they do, they indicate a serious issue and
// should not be caught by user code.
//
// The panic{Index,Slice,divide,shift} functions are called by
// code generated by the compiler for out of bounds index expressions,
// out of bounds slice expressions, division by zero, and shift by negative.
// The panicdivide (again), panicoverflow, panicfloat, and panicmem
// functions are called by the signal handler when a signal occurs
// indicating the respective problem.
//
// Since panic{Index,Slice,shift} are never called directly, and
// since the runtime package should never have an out of bounds slice
// or array reference or negative shift, if we see those functions called from the
// runtime package we turn the panic into a throw. That will dump the
// entire runtime stack for easier debugging.
//
// The entry points called by the signal handler will be called from
// runtime.sigpanic, so we can't disallow calls from the runtime to
// these (they always look like they're called from the runtime).
// Hence, for these, we just check for clearly bad runtime conditions.
//
// The panic{Index,Slice} functions are implemented in assembly and tail call
// to the goPanic{Index,Slice} functions below. This is done so we can use
// a space-minimal register calling convention.
// failures in the comparisons for s[x], 0 <= x < y (y == len(s))
//
//go:yeswritebarrierrec
func goPanicIndex(x int, y int) {
panicCheck1(sys.GetCallerPC(), "index out of range")
panic(boundsError{x: int64(x), signed: true, y: y, code: boundsIndex})
}
//go:yeswritebarrierrec
func goPanicIndexU(x uint, y int) {
panicCheck1(sys.GetCallerPC(), "index out of range")
panic(boundsError{x: int64(x), signed: false, y: y, code: boundsIndex})
}
// failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s))
//
//go:yeswritebarrierrec
func goPanicSliceAlen(x int, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAlen})
}
//go:yeswritebarrierrec
func goPanicSliceAlenU(x uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAlen})
}
//go:yeswritebarrierrec
func goPanicSliceAcap(x int, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAcap})
}
//go:yeswritebarrierrec
func goPanicSliceAcapU(x uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAcap})
}
// failures in the comparisons for s[x:y], 0 <= x <= y
//
//go:yeswritebarrierrec
func goPanicSliceB(x int, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceB})
}
//go:yeswritebarrierrec
func goPanicSliceBU(x uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceB})
}
// failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s))
func goPanicSlice3Alen(x int, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Alen})
}
func goPanicSlice3AlenU(x uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Alen})
}
func goPanicSlice3Acap(x int, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Acap})
}
func goPanicSlice3AcapU(x uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Acap})
}
// failures in the comparisons for s[:x:y], 0 <= x <= y
func goPanicSlice3B(x int, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3B})
}
func goPanicSlice3BU(x uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3B})
}
// failures in the comparisons for s[x:y:], 0 <= x <= y
func goPanicSlice3C(x int, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3C})
}
func goPanicSlice3CU(x uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3C})
}
// failures in the conversion ([x]T)(s) or (*[x]T)(s), 0 <= x <= y, y == len(s)
func goPanicSliceConvert(x int, y int) {
panicCheck1(sys.GetCallerPC(), "slice length too short to convert to array or pointer to array")
panic(boundsError{x: int64(x), signed: true, y: y, code: boundsConvert})
}
// Implemented in assembly, as they take arguments in registers.
// Declared here to mark them as ABIInternal.
func panicIndex(x int, y int)
func panicIndexU(x uint, y int)
func panicSliceAlen(x int, y int)
func panicSliceAlenU(x uint, y int)
func panicSliceAcap(x int, y int)
func panicSliceAcapU(x uint, y int)
func panicSliceB(x int, y int)
func panicSliceBU(x uint, y int)
func panicSlice3Alen(x int, y int)
func panicSlice3AlenU(x uint, y int)
func panicSlice3Acap(x int, y int)
func panicSlice3AcapU(x uint, y int)
func panicSlice3B(x int, y int)
func panicSlice3BU(x uint, y int)
func panicSlice3C(x int, y int)
func panicSlice3CU(x uint, y int)
func panicSliceConvert(x int, y int)
var shiftError = error(errorString("negative shift amount"))
//go:yeswritebarrierrec
func panicshift() {
panicCheck1(sys.GetCallerPC(), "negative shift amount")
panic(shiftError)
}
var divideError = error(errorString("integer divide by zero"))
//go:yeswritebarrierrec
func panicdivide() {
panicCheck2("integer divide by zero")
panic(divideError)
}
var overflowError = error(errorString("integer overflow"))
func panicoverflow() {
panicCheck2("integer overflow")
panic(overflowError)
}
var floatError = error(errorString("floating point error"))
func panicfloat() {
panicCheck2("floating point error")
panic(floatError)
}
var memoryError = error(errorString("invalid memory address or nil pointer dereference"))
func panicmem() {
panicCheck2("invalid memory address or nil pointer dereference")
panic(memoryError)
}
func panicmemAddr(addr uintptr) {
panicCheck2("invalid memory address or nil pointer dereference")
panic(errorAddressString{msg: "invalid memory address or nil pointer dereference", addr: addr})
}
// Create a new deferred function fn, which has no arguments and results.
// The compiler turns a defer statement into a call to this.
func deferproc(fn func()) {
runtime: fix code so defer record is not added to g0 defer list during panic newdefer() actually adds the new defer to the current g's defer chain. That happens even if we are on the system stack, in which case the g will be the g0 stack. For open-coded defers, we call newdefer() (only during panic processing) while on the system stack, so the new defer is unintentionally added to the g0._defer defer list. The code later correctly adds the defer to the user g's defer list. The g0._defer list is never used. However, that pointer on the g0._defer list can keep a defer struct alive that is intended to be garbage-collected (smaller defers use a defer pool, but larger-sized defer records are just GC'ed). freedefer() does not zero out pointers when it intends that a defer become garbage-collected. So, we can have the pointers in a defer that is held alive by g0._defer become invalid (in particular d.link). This is the cause of the bad pointer bug in this issue The fix is to change newdefer (only used in two places) to not add the new defer to the gp._defer list. We just do it after the call with the correct gp pointer. (As mentioned above, this code was already there after the newdefer in addOneOpenDeferFrame.) That ensures that defers will be correctly garbage-collected and eliminate the bad pointer. This fix definitely fixes the original repro. I added a test and tried hard to reproduce the bug (based on the original repro code), but awasn't actually able to cause the bug. However, the test is still an interesting mix of heap-allocated, stack-allocated, and open-coded defers. Fixes #37688 Change-Id: I1a481b9d9e9b9ba4e8726ef718a1f4512a2d6faf Reviewed-on: https://go-review.googlesource.com/c/go/+/224581 Run-TryBot: Dan Scales <danscales@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
2020-03-20 09:31:20 -07:00
gp := getg()
if gp.m.curg != gp {
[dev.cc] runtime: delete scalararg, ptrarg; rename onM to systemstack Scalararg and ptrarg are not "signal safe". Go code filling them out can be interrupted by a signal, and then the signal handler runs, and if it also ends up in Go code that uses scalararg or ptrarg, now the old values have been smashed. For the pieces of code that do need to run in a signal handler, we introduced onM_signalok, which is really just onM except that the _signalok is meant to convey that the caller asserts that scalarg and ptrarg will be restored to their old values after the call (instead of the usual behavior, zeroing them). Scalararg and ptrarg are also untyped and therefore error-prone. Go code can always pass a closure instead of using scalararg and ptrarg; they were only really necessary for C code. And there's no more C code. For all these reasons, delete scalararg and ptrarg, converting the few remaining references to use closures. Once those are gone, there is no need for a distinction between onM and onM_signalok, so replace both with a single function equivalent to the current onM_signalok (that is, it can be called on any of the curg, g0, and gsignal stacks). The name onM and the phrase 'm stack' are misnomers, because on most system an M has two system stacks: the main thread stack and the signal handling stack. Correct the misnomer by naming the replacement function systemstack. Fix a few references to "M stack" in code. The main motivation for this change is to eliminate scalararg/ptrarg. Rick and I have already seen them cause problems because the calling sequence m.ptrarg[0] = p is a heap pointer assignment, so it gets a write barrier. The write barrier also uses onM, so it has all the same problems as if it were being invoked by a signal handler. We worked around this by saving and restoring the old values and by calling onM_signalok, but there's no point in keeping this nice home for bugs around any longer. This CL also changes funcline to return the file name as a result instead of filling in a passed-in *string. (The *string signature is left over from when the code was written in and called from C.) That's arguably an unrelated change, except that once I had done the ptrarg/scalararg/onM cleanup I started getting false positives about the *string argument escaping (not allowed in package runtime). The compiler is wrong, but the easiest fix is to write the code like Go code instead of like C code. I am a bit worried that the compiler is wrong because of some use of uninitialized memory in the escape analysis. If that's the reason, it will go away when we convert the compiler to Go. (And if not, we'll debug it the next time.) LGTM=khr R=r, khr CC=austin, golang-codereviews, iant, rlh https://golang.org/cl/174950043
2014-11-12 14:54:31 -05:00
// go code on the system stack can't defer
throw("defer on system stack")
}
d := newdefer()
runtime: fix code so defer record is not added to g0 defer list during panic newdefer() actually adds the new defer to the current g's defer chain. That happens even if we are on the system stack, in which case the g will be the g0 stack. For open-coded defers, we call newdefer() (only during panic processing) while on the system stack, so the new defer is unintentionally added to the g0._defer defer list. The code later correctly adds the defer to the user g's defer list. The g0._defer list is never used. However, that pointer on the g0._defer list can keep a defer struct alive that is intended to be garbage-collected (smaller defers use a defer pool, but larger-sized defer records are just GC'ed). freedefer() does not zero out pointers when it intends that a defer become garbage-collected. So, we can have the pointers in a defer that is held alive by g0._defer become invalid (in particular d.link). This is the cause of the bad pointer bug in this issue The fix is to change newdefer (only used in two places) to not add the new defer to the gp._defer list. We just do it after the call with the correct gp pointer. (As mentioned above, this code was already there after the newdefer in addOneOpenDeferFrame.) That ensures that defers will be correctly garbage-collected and eliminate the bad pointer. This fix definitely fixes the original repro. I added a test and tried hard to reproduce the bug (based on the original repro code), but awasn't actually able to cause the bug. However, the test is still an interesting mix of heap-allocated, stack-allocated, and open-coded defers. Fixes #37688 Change-Id: I1a481b9d9e9b9ba4e8726ef718a1f4512a2d6faf Reviewed-on: https://go-review.googlesource.com/c/go/+/224581 Run-TryBot: Dan Scales <danscales@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
2020-03-20 09:31:20 -07:00
d.link = gp._defer
gp._defer = d
runtime: optimize defer code This optimizes deferproc and deferreturn in various ways. The most important optimization is that it more carefully arranges to prevent preemption or stack growth. Currently we do this by switching to the system stack on every deferproc and every deferreturn. While we need to be on the system stack for the slow path of allocating and freeing defers, in the common case we can fit in the nosplit stack. Hence, this change pushes the system stack switch down into the slow paths and makes everything now exposed to the user stack nosplit. This also eliminates the need for various acquirem/releasem pairs, since we are now preventing preemption by preventing stack split checks. As another smaller optimization, we special case the common cases of zero-sized and pointer-sized defer frames to respectively skip the copy and perform the copy in line instead of calling memmove. This speeds up the runtime defer benchmark by 42%: name old time/op new time/op delta Defer-4 75.1ns ± 1% 43.3ns ± 1% -42.31% (p=0.000 n=8+10) In reality, this speeds up defer by about 2.2X. The two benchmarks below compare a Lock/defer Unlock pair (DeferLock) with a Lock/Unlock pair (NoDeferLock). NoDeferLock establishes a baseline cost, so these two benchmarks together show that this change reduces the overhead of defer from 61.4ns to 27.9ns. name old time/op new time/op delta DeferLock-4 77.4ns ± 1% 43.9ns ± 1% -43.31% (p=0.000 n=10+10) NoDeferLock-4 16.0ns ± 0% 15.9ns ± 0% -0.39% (p=0.000 n=9+8) This also shaves 34ns off cgo calls: name old time/op new time/op delta CgoNoop-4 122ns ± 1% 88.3ns ± 1% -27.72% (p=0.000 n=8+9) Updates #14939, #16051. Change-Id: I2baa0dea378b7e4efebbee8fca919a97d5e15f38 Reviewed-on: https://go-review.googlesource.com/29656 Reviewed-by: Keith Randall <khr@golang.org>
2016-09-22 17:08:37 -04:00
d.fn = fn
d.pc = sys.GetCallerPC()
// We must not be preempted between calling GetCallerSP and
// storing it to d.sp because GetCallerSP's result is a
// uintptr stack pointer.
d.sp = sys.GetCallerSP()
// deferproc returns 0 normally.
// a deferred func that stops a panic
// makes the deferproc return 1.
// the code the compiler generates always
// checks the return value and jumps to the
// end of the function if deferproc returns != 0.
return0()
// No code can go here - the C return register has
// been set and must not be clobbered.
}
var rangeDoneError = error(errorString("range function continued iteration after function for loop body returned false"))
cmd/compile: for rangefunc, add checks and tests, fix panic interactions Modify rangefunc #next protocol to make it more robust Extra-terrible nests of rangefunc iterators caused the prior implementation to misbehave non-locally (in outer loops). Add more rangefunc exit flag tests, parallel and tricky This tests the assertion that a rangefunc iterator running in parallel can trigger the race detector if any of the parallel goroutines attempts an early exit. It also verifies that if everything else is carefully written, that it does NOT trigger the race detector if all the parts run time completion. Another test tries to rerun a yield function within a loop, so that any per-line shared checking would be fooled. Added all the use-of-body/yield-function checking. These checks handle pathological cases that would cause rangefunc for loops to behave in surprising ways (compared to "regular" for loops). For example, a rangefunc iterator might defer-recover a panic thrown in the syntactic body of a loop; this notices the fault and panics with an explanation Modified closure naming to ID rangefunc bodies Add a "-range<N>" suffix to the name of any closure generated for a rangefunc loop body, as provided in Alessandro Arzilli's CL (which is merged into this one). Fix return values for panicky range functions This removes the delayed implementation of "return x" by ensuring that return values (in rangefunc-return-containing functions) always have names and translating the "return x" into "#rv1 = x" where #rv1 is the synthesized name of the first result. Updates #61405. Change-Id: I933299ecce04ceabcf1c0c2de8e610b2ecd1cfd8 Reviewed-on: https://go-review.googlesource.com/c/go/+/584596 Reviewed-by: Matthew Dempsky <mdempsky@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Tim King <taking@google.com>
2024-01-26 17:49:33 -05:00
var rangePanicError = error(errorString("range function continued iteration after loop body panic"))
var rangeExhaustedError = error(errorString("range function continued iteration after whole loop exit"))
var rangeMissingPanicError = error(errorString("range function recovered a loop body panic and did not resume panicking"))
//go:noinline
cmd/compile: for rangefunc, add checks and tests, fix panic interactions Modify rangefunc #next protocol to make it more robust Extra-terrible nests of rangefunc iterators caused the prior implementation to misbehave non-locally (in outer loops). Add more rangefunc exit flag tests, parallel and tricky This tests the assertion that a rangefunc iterator running in parallel can trigger the race detector if any of the parallel goroutines attempts an early exit. It also verifies that if everything else is carefully written, that it does NOT trigger the race detector if all the parts run time completion. Another test tries to rerun a yield function within a loop, so that any per-line shared checking would be fooled. Added all the use-of-body/yield-function checking. These checks handle pathological cases that would cause rangefunc for loops to behave in surprising ways (compared to "regular" for loops). For example, a rangefunc iterator might defer-recover a panic thrown in the syntactic body of a loop; this notices the fault and panics with an explanation Modified closure naming to ID rangefunc bodies Add a "-range<N>" suffix to the name of any closure generated for a rangefunc loop body, as provided in Alessandro Arzilli's CL (which is merged into this one). Fix return values for panicky range functions This removes the delayed implementation of "return x" by ensuring that return values (in rangefunc-return-containing functions) always have names and translating the "return x" into "#rv1 = x" where #rv1 is the synthesized name of the first result. Updates #61405. Change-Id: I933299ecce04ceabcf1c0c2de8e610b2ecd1cfd8 Reviewed-on: https://go-review.googlesource.com/c/go/+/584596 Reviewed-by: Matthew Dempsky <mdempsky@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Tim King <taking@google.com>
2024-01-26 17:49:33 -05:00
func panicrangestate(state int) {
switch abi.RF_State(state) {
case abi.RF_DONE:
cmd/compile: for rangefunc, add checks and tests, fix panic interactions Modify rangefunc #next protocol to make it more robust Extra-terrible nests of rangefunc iterators caused the prior implementation to misbehave non-locally (in outer loops). Add more rangefunc exit flag tests, parallel and tricky This tests the assertion that a rangefunc iterator running in parallel can trigger the race detector if any of the parallel goroutines attempts an early exit. It also verifies that if everything else is carefully written, that it does NOT trigger the race detector if all the parts run time completion. Another test tries to rerun a yield function within a loop, so that any per-line shared checking would be fooled. Added all the use-of-body/yield-function checking. These checks handle pathological cases that would cause rangefunc for loops to behave in surprising ways (compared to "regular" for loops). For example, a rangefunc iterator might defer-recover a panic thrown in the syntactic body of a loop; this notices the fault and panics with an explanation Modified closure naming to ID rangefunc bodies Add a "-range<N>" suffix to the name of any closure generated for a rangefunc loop body, as provided in Alessandro Arzilli's CL (which is merged into this one). Fix return values for panicky range functions This removes the delayed implementation of "return x" by ensuring that return values (in rangefunc-return-containing functions) always have names and translating the "return x" into "#rv1 = x" where #rv1 is the synthesized name of the first result. Updates #61405. Change-Id: I933299ecce04ceabcf1c0c2de8e610b2ecd1cfd8 Reviewed-on: https://go-review.googlesource.com/c/go/+/584596 Reviewed-by: Matthew Dempsky <mdempsky@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Tim King <taking@google.com>
2024-01-26 17:49:33 -05:00
panic(rangeDoneError)
case abi.RF_PANIC:
cmd/compile: for rangefunc, add checks and tests, fix panic interactions Modify rangefunc #next protocol to make it more robust Extra-terrible nests of rangefunc iterators caused the prior implementation to misbehave non-locally (in outer loops). Add more rangefunc exit flag tests, parallel and tricky This tests the assertion that a rangefunc iterator running in parallel can trigger the race detector if any of the parallel goroutines attempts an early exit. It also verifies that if everything else is carefully written, that it does NOT trigger the race detector if all the parts run time completion. Another test tries to rerun a yield function within a loop, so that any per-line shared checking would be fooled. Added all the use-of-body/yield-function checking. These checks handle pathological cases that would cause rangefunc for loops to behave in surprising ways (compared to "regular" for loops). For example, a rangefunc iterator might defer-recover a panic thrown in the syntactic body of a loop; this notices the fault and panics with an explanation Modified closure naming to ID rangefunc bodies Add a "-range<N>" suffix to the name of any closure generated for a rangefunc loop body, as provided in Alessandro Arzilli's CL (which is merged into this one). Fix return values for panicky range functions This removes the delayed implementation of "return x" by ensuring that return values (in rangefunc-return-containing functions) always have names and translating the "return x" into "#rv1 = x" where #rv1 is the synthesized name of the first result. Updates #61405. Change-Id: I933299ecce04ceabcf1c0c2de8e610b2ecd1cfd8 Reviewed-on: https://go-review.googlesource.com/c/go/+/584596 Reviewed-by: Matthew Dempsky <mdempsky@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Tim King <taking@google.com>
2024-01-26 17:49:33 -05:00
panic(rangePanicError)
case abi.RF_EXHAUSTED:
cmd/compile: for rangefunc, add checks and tests, fix panic interactions Modify rangefunc #next protocol to make it more robust Extra-terrible nests of rangefunc iterators caused the prior implementation to misbehave non-locally (in outer loops). Add more rangefunc exit flag tests, parallel and tricky This tests the assertion that a rangefunc iterator running in parallel can trigger the race detector if any of the parallel goroutines attempts an early exit. It also verifies that if everything else is carefully written, that it does NOT trigger the race detector if all the parts run time completion. Another test tries to rerun a yield function within a loop, so that any per-line shared checking would be fooled. Added all the use-of-body/yield-function checking. These checks handle pathological cases that would cause rangefunc for loops to behave in surprising ways (compared to "regular" for loops). For example, a rangefunc iterator might defer-recover a panic thrown in the syntactic body of a loop; this notices the fault and panics with an explanation Modified closure naming to ID rangefunc bodies Add a "-range<N>" suffix to the name of any closure generated for a rangefunc loop body, as provided in Alessandro Arzilli's CL (which is merged into this one). Fix return values for panicky range functions This removes the delayed implementation of "return x" by ensuring that return values (in rangefunc-return-containing functions) always have names and translating the "return x" into "#rv1 = x" where #rv1 is the synthesized name of the first result. Updates #61405. Change-Id: I933299ecce04ceabcf1c0c2de8e610b2ecd1cfd8 Reviewed-on: https://go-review.googlesource.com/c/go/+/584596 Reviewed-by: Matthew Dempsky <mdempsky@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Tim King <taking@google.com>
2024-01-26 17:49:33 -05:00
panic(rangeExhaustedError)
case abi.RF_MISSING_PANIC:
cmd/compile: for rangefunc, add checks and tests, fix panic interactions Modify rangefunc #next protocol to make it more robust Extra-terrible nests of rangefunc iterators caused the prior implementation to misbehave non-locally (in outer loops). Add more rangefunc exit flag tests, parallel and tricky This tests the assertion that a rangefunc iterator running in parallel can trigger the race detector if any of the parallel goroutines attempts an early exit. It also verifies that if everything else is carefully written, that it does NOT trigger the race detector if all the parts run time completion. Another test tries to rerun a yield function within a loop, so that any per-line shared checking would be fooled. Added all the use-of-body/yield-function checking. These checks handle pathological cases that would cause rangefunc for loops to behave in surprising ways (compared to "regular" for loops). For example, a rangefunc iterator might defer-recover a panic thrown in the syntactic body of a loop; this notices the fault and panics with an explanation Modified closure naming to ID rangefunc bodies Add a "-range<N>" suffix to the name of any closure generated for a rangefunc loop body, as provided in Alessandro Arzilli's CL (which is merged into this one). Fix return values for panicky range functions This removes the delayed implementation of "return x" by ensuring that return values (in rangefunc-return-containing functions) always have names and translating the "return x" into "#rv1 = x" where #rv1 is the synthesized name of the first result. Updates #61405. Change-Id: I933299ecce04ceabcf1c0c2de8e610b2ecd1cfd8 Reviewed-on: https://go-review.googlesource.com/c/go/+/584596 Reviewed-by: Matthew Dempsky <mdempsky@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Tim King <taking@google.com>
2024-01-26 17:49:33 -05:00
panic(rangeMissingPanicError)
}
throw("unexpected state passed to panicrangestate")
}
// deferrangefunc is called by functions that are about to
// execute a range-over-function loop in which the loop body
// may execute a defer statement. That defer needs to add to
// the chain for the current function, not the func literal synthesized
// to represent the loop body. To do that, the original function
// calls deferrangefunc to obtain an opaque token representing
// the current frame, and then the loop body uses deferprocat
// instead of deferproc to add to that frame's defer lists.
//
// The token is an 'any' with underlying type *atomic.Pointer[_defer].
// It is the atomically-updated head of a linked list of _defer structs
// representing deferred calls. At the same time, we create a _defer
// struct on the main g._defer list with d.head set to this head pointer.
//
// The g._defer list is now a linked list of deferred calls,
// but an atomic list hanging off:
//
// g._defer => d4 -> d3 -> drangefunc -> d2 -> d1 -> nil
// | .head
// |
// +--> dY -> dX -> nil
//
// with each -> indicating a d.link pointer, and where drangefunc
// has the d.rangefunc = true bit set.
// Note that the function being ranged over may have added
// its own defers (d4 and d3), so drangefunc need not be at the
// top of the list when deferprocat is used. This is why we pass
// the atomic head explicitly.
//
// To keep misbehaving programs from crashing the runtime,
// deferprocat pushes new defers onto the .head list atomically.
// The fact that it is a separate list from the main goroutine
// defer list means that the main goroutine's defers can still
// be handled non-atomically.
//
// In the diagram, dY and dX are meant to be processed when
// drangefunc would be processed, which is to say the defer order
// should be d4, d3, dY, dX, d2, d1. To make that happen,
// when defer processing reaches a d with rangefunc=true,
// it calls deferconvert to atomically take the extras
// away from d.head and then adds them to the main list.
//
// That is, deferconvert changes this list:
//
// g._defer => drangefunc -> d2 -> d1 -> nil
// | .head
// |
// +--> dY -> dX -> nil
//
// into this list:
//
// g._defer => dY -> dX -> d2 -> d1 -> nil
//
// It also poisons *drangefunc.head so that any future
// deferprocat using that head will throw.
// (The atomic head is ordinary garbage collected memory so that
// it's not a problem if user code holds onto it beyond
// the lifetime of drangefunc.)
//
// TODO: We could arrange for the compiler to call into the
// runtime after the loop finishes normally, to do an eager
// deferconvert, which would catch calling the loop body
// and having it defer after the loop is done. If we have a
// more general catch of loop body misuse, though, this
// might not be worth worrying about in addition.
//
// See also ../cmd/compile/internal/rangefunc/rewrite.go.
func deferrangefunc() any {
gp := getg()
if gp.m.curg != gp {
// go code on the system stack can't defer
throw("defer on system stack")
}
d := newdefer()
d.link = gp._defer
gp._defer = d
d.pc = sys.GetCallerPC()
// We must not be preempted between calling GetCallerSP and
// storing it to d.sp because GetCallerSP's result is a
// uintptr stack pointer.
d.sp = sys.GetCallerSP()
d.rangefunc = true
d.head = new(atomic.Pointer[_defer])
return d.head
}
// badDefer returns a fixed bad defer pointer for poisoning an atomic defer list head.
func badDefer() *_defer {
return (*_defer)(unsafe.Pointer(uintptr(1)))
}
// deferprocat is like deferproc but adds to the atomic list represented by frame.
// See the doc comment for deferrangefunc for details.
func deferprocat(fn func(), frame any) {
head := frame.(*atomic.Pointer[_defer])
if raceenabled {
racewritepc(unsafe.Pointer(head), sys.GetCallerPC(), abi.FuncPCABIInternal(deferprocat))
}
d1 := newdefer()
d1.fn = fn
for {
d1.link = head.Load()
if d1.link == badDefer() {
throw("defer after range func returned")
}
if head.CompareAndSwap(d1.link, d1) {
break
}
}
// Must be last - see deferproc above.
return0()
}
runtime: simplify freedefer logic Currently, freedefer's API forces a subtle and fragile situation. It requires that the caller unlink the _defer from the G list, but freedefer itself is responsible for zeroing some _defer fields. In the window between these two steps, we have to prevent stack growth because stack growth walks the defer list (which no longer contains the unlinked defer) to adjust pointers, and would thus leave an unadjusted and potentially invalid pointer behind in the _defer before freedefer zeroes it. This setup puts part of this subtle responsibility on the caller and also means freedefer must be nosplit, which forces other shenanigans to avoid nosplit overflows. We can simplify all of this by replacing freedefer with a new popDefer function that's responsible for both unlinking and zeroing the _defer, in addition to freeing it. Some history: prior to regabi, defer records contained their argument frame, which deferreturn copied to the stack before freeing the defer record (and subsequently running the defer). Since that argument frame didn't have a valid stack map until we ran the deferred function, the non-preemptible window was much larger and more difficult to isolate. Now we use normal closure calls to capture defer state and call the defer, so the non-preemptible window is narrowed to just the unlinking step. Change-Id: I7cf95ba18e1e2e7d73f616b9ed9fb38f5e725d72 Reviewed-on: https://go-review.googlesource.com/c/go/+/553696 Reviewed-by: Matthew Dempsky <mdempsky@google.com> Reviewed-by: Michael Pratt <mpratt@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Cherry Mui <cherryyz@google.com> Auto-Submit: Austin Clements <austin@google.com> Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com>
2024-01-03 10:41:09 -05:00
// deferconvert converts the rangefunc defer list of d0 into an ordinary list
// following d0.
// See the doc comment for deferrangefunc for details.
runtime: simplify freedefer logic Currently, freedefer's API forces a subtle and fragile situation. It requires that the caller unlink the _defer from the G list, but freedefer itself is responsible for zeroing some _defer fields. In the window between these two steps, we have to prevent stack growth because stack growth walks the defer list (which no longer contains the unlinked defer) to adjust pointers, and would thus leave an unadjusted and potentially invalid pointer behind in the _defer before freedefer zeroes it. This setup puts part of this subtle responsibility on the caller and also means freedefer must be nosplit, which forces other shenanigans to avoid nosplit overflows. We can simplify all of this by replacing freedefer with a new popDefer function that's responsible for both unlinking and zeroing the _defer, in addition to freeing it. Some history: prior to regabi, defer records contained their argument frame, which deferreturn copied to the stack before freeing the defer record (and subsequently running the defer). Since that argument frame didn't have a valid stack map until we ran the deferred function, the non-preemptible window was much larger and more difficult to isolate. Now we use normal closure calls to capture defer state and call the defer, so the non-preemptible window is narrowed to just the unlinking step. Change-Id: I7cf95ba18e1e2e7d73f616b9ed9fb38f5e725d72 Reviewed-on: https://go-review.googlesource.com/c/go/+/553696 Reviewed-by: Matthew Dempsky <mdempsky@google.com> Reviewed-by: Michael Pratt <mpratt@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Cherry Mui <cherryyz@google.com> Auto-Submit: Austin Clements <austin@google.com> Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com>
2024-01-03 10:41:09 -05:00
func deferconvert(d0 *_defer) {
head := d0.head
if raceenabled {
racereadpc(unsafe.Pointer(head), sys.GetCallerPC(), abi.FuncPCABIInternal(deferconvert))
}
runtime: simplify freedefer logic Currently, freedefer's API forces a subtle and fragile situation. It requires that the caller unlink the _defer from the G list, but freedefer itself is responsible for zeroing some _defer fields. In the window between these two steps, we have to prevent stack growth because stack growth walks the defer list (which no longer contains the unlinked defer) to adjust pointers, and would thus leave an unadjusted and potentially invalid pointer behind in the _defer before freedefer zeroes it. This setup puts part of this subtle responsibility on the caller and also means freedefer must be nosplit, which forces other shenanigans to avoid nosplit overflows. We can simplify all of this by replacing freedefer with a new popDefer function that's responsible for both unlinking and zeroing the _defer, in addition to freeing it. Some history: prior to regabi, defer records contained their argument frame, which deferreturn copied to the stack before freeing the defer record (and subsequently running the defer). Since that argument frame didn't have a valid stack map until we ran the deferred function, the non-preemptible window was much larger and more difficult to isolate. Now we use normal closure calls to capture defer state and call the defer, so the non-preemptible window is narrowed to just the unlinking step. Change-Id: I7cf95ba18e1e2e7d73f616b9ed9fb38f5e725d72 Reviewed-on: https://go-review.googlesource.com/c/go/+/553696 Reviewed-by: Matthew Dempsky <mdempsky@google.com> Reviewed-by: Michael Pratt <mpratt@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Cherry Mui <cherryyz@google.com> Auto-Submit: Austin Clements <austin@google.com> Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com>
2024-01-03 10:41:09 -05:00
tail := d0.link
d0.rangefunc = false
runtime: simplify freedefer logic Currently, freedefer's API forces a subtle and fragile situation. It requires that the caller unlink the _defer from the G list, but freedefer itself is responsible for zeroing some _defer fields. In the window between these two steps, we have to prevent stack growth because stack growth walks the defer list (which no longer contains the unlinked defer) to adjust pointers, and would thus leave an unadjusted and potentially invalid pointer behind in the _defer before freedefer zeroes it. This setup puts part of this subtle responsibility on the caller and also means freedefer must be nosplit, which forces other shenanigans to avoid nosplit overflows. We can simplify all of this by replacing freedefer with a new popDefer function that's responsible for both unlinking and zeroing the _defer, in addition to freeing it. Some history: prior to regabi, defer records contained their argument frame, which deferreturn copied to the stack before freeing the defer record (and subsequently running the defer). Since that argument frame didn't have a valid stack map until we ran the deferred function, the non-preemptible window was much larger and more difficult to isolate. Now we use normal closure calls to capture defer state and call the defer, so the non-preemptible window is narrowed to just the unlinking step. Change-Id: I7cf95ba18e1e2e7d73f616b9ed9fb38f5e725d72 Reviewed-on: https://go-review.googlesource.com/c/go/+/553696 Reviewed-by: Matthew Dempsky <mdempsky@google.com> Reviewed-by: Michael Pratt <mpratt@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Cherry Mui <cherryyz@google.com> Auto-Submit: Austin Clements <austin@google.com> Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com>
2024-01-03 10:41:09 -05:00
var d *_defer
for {
d = head.Load()
if head.CompareAndSwap(d, badDefer()) {
break
}
}
if d == nil {
runtime: simplify freedefer logic Currently, freedefer's API forces a subtle and fragile situation. It requires that the caller unlink the _defer from the G list, but freedefer itself is responsible for zeroing some _defer fields. In the window between these two steps, we have to prevent stack growth because stack growth walks the defer list (which no longer contains the unlinked defer) to adjust pointers, and would thus leave an unadjusted and potentially invalid pointer behind in the _defer before freedefer zeroes it. This setup puts part of this subtle responsibility on the caller and also means freedefer must be nosplit, which forces other shenanigans to avoid nosplit overflows. We can simplify all of this by replacing freedefer with a new popDefer function that's responsible for both unlinking and zeroing the _defer, in addition to freeing it. Some history: prior to regabi, defer records contained their argument frame, which deferreturn copied to the stack before freeing the defer record (and subsequently running the defer). Since that argument frame didn't have a valid stack map until we ran the deferred function, the non-preemptible window was much larger and more difficult to isolate. Now we use normal closure calls to capture defer state and call the defer, so the non-preemptible window is narrowed to just the unlinking step. Change-Id: I7cf95ba18e1e2e7d73f616b9ed9fb38f5e725d72 Reviewed-on: https://go-review.googlesource.com/c/go/+/553696 Reviewed-by: Matthew Dempsky <mdempsky@google.com> Reviewed-by: Michael Pratt <mpratt@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Cherry Mui <cherryyz@google.com> Auto-Submit: Austin Clements <austin@google.com> Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com>
2024-01-03 10:41:09 -05:00
return
}
for d1 := d; ; d1 = d1.link {
d1.sp = d0.sp
d1.pc = d0.pc
if d1.link == nil {
d1.link = tail
break
}
}
runtime: simplify freedefer logic Currently, freedefer's API forces a subtle and fragile situation. It requires that the caller unlink the _defer from the G list, but freedefer itself is responsible for zeroing some _defer fields. In the window between these two steps, we have to prevent stack growth because stack growth walks the defer list (which no longer contains the unlinked defer) to adjust pointers, and would thus leave an unadjusted and potentially invalid pointer behind in the _defer before freedefer zeroes it. This setup puts part of this subtle responsibility on the caller and also means freedefer must be nosplit, which forces other shenanigans to avoid nosplit overflows. We can simplify all of this by replacing freedefer with a new popDefer function that's responsible for both unlinking and zeroing the _defer, in addition to freeing it. Some history: prior to regabi, defer records contained their argument frame, which deferreturn copied to the stack before freeing the defer record (and subsequently running the defer). Since that argument frame didn't have a valid stack map until we ran the deferred function, the non-preemptible window was much larger and more difficult to isolate. Now we use normal closure calls to capture defer state and call the defer, so the non-preemptible window is narrowed to just the unlinking step. Change-Id: I7cf95ba18e1e2e7d73f616b9ed9fb38f5e725d72 Reviewed-on: https://go-review.googlesource.com/c/go/+/553696 Reviewed-by: Matthew Dempsky <mdempsky@google.com> Reviewed-by: Michael Pratt <mpratt@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Cherry Mui <cherryyz@google.com> Auto-Submit: Austin Clements <austin@google.com> Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com>
2024-01-03 10:41:09 -05:00
d0.link = d
return
}
// deferprocStack queues a new deferred function with a defer record on the stack.
// The defer record must have its fn field initialized.
// All other fields can contain junk.
[dev.typeparams] runtime: remove unnecessary split-prevention from defer code Prior to regabi, the compiler passed defer arguments to the runtime as untyped values on the stack. This meant a lot of defer-related runtime functions had to be very careful not to grow the stack or allow preemption since the stack could not be safely scanned or moved. However, with regabi, every defer is now simply a func() from the runtime's perspective, which means we no longer have untyped values on the stack when we enter defer-related runtime code. Hence, this CL removes a lot of the now-unnecessary carefulness in the defer implementation. Specifically, deferreturn no longer needs to be nosplit because it doesn't copy untyped defer arguments to its caller's frame (we also update some stale comments in deferreturn). freedefer no longer needs to be nosplit because it's none of its callers are deeply nosplit. And newdefer and freedefer no longer need to switch to the systemstack on their slow paths to avoid stack growth. deferprocStack is the only function that still needs to be nosplit, but that's because the compiler calls it with uninitialized live pointer slots on the stack (maybe we should change that, but that's a very different fix). This is a retry of CL 337651, which was rolled back. This version disables preemption in newdefer and freedefer while they hold the current P. Change-Id: Ibf469addc0b69dc3ba9a3d1a5e0c2804b7b4b244 Reviewed-on: https://go-review.googlesource.com/c/go/+/339396 Trust: Austin Clements <austin@google.com> Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-07-26 12:04:36 -04:00
// Nosplit because of the uninitialized pointer fields on the stack.
//
//go:nosplit
func deferprocStack(d *_defer) {
gp := getg()
if gp.m.curg != gp {
// go code on the system stack can't defer
throw("defer on system stack")
}
// fn is already set.
// The other fields are junk on entry to deferprocStack and
// are initialized here.
d.heap = false
d.rangefunc = false
d.sp = sys.GetCallerSP()
d.pc = sys.GetCallerPC()
// The lines below implement:
// d.panic = nil
// d.fd = nil
// d.link = gp._defer
// d.head = nil
// gp._defer = d
cmd/compile, cmd/link, runtime: make defers low-cost through inline code and extra funcdata Generate inline code at defer time to save the args of defer calls to unique (autotmp) stack slots, and generate inline code at exit time to check which defer calls were made and make the associated function/method/interface calls. We remember that a particular defer statement was reached by storing in the deferBits variable (always stored on the stack). At exit time, we check the bits of the deferBits variable to determine which defer function calls to make (in reverse order). These low-cost defers are only used for functions where no defers appear in loops. In addition, we don't do these low-cost defers if there are too many defer statements or too many exits in a function (to limit code increase). When a function uses open-coded defers, we produce extra FUNCDATA_OpenCodedDeferInfo information that specifies the number of defers, and for each defer, the stack slots where the closure and associated args have been stored. The funcdata also includes the location of the deferBits variable. Therefore, for panics, we can use this funcdata to determine exactly which defers are active, and call the appropriate functions/methods/closures with the correct arguments for each active defer. In order to unwind the stack correctly after a recover(), we need to add an extra code segment to functions with open-coded defers that simply calls deferreturn() and returns. This segment is not reachable by the normal function, but is returned to by the runtime during recovery. We set the liveness information of this deferreturn() to be the same as the liveness at the first function call during the last defer exit code (so all return values and all stack slots needed by the defer calls will be live). I needed to increase the stackguard constant from 880 to 896, because of a small amount of new code in deferreturn(). The -N flag disables open-coded defers. '-d defer' prints out the kind of defer being used at each defer statement (heap-allocated, stack-allocated, or open-coded). Cost of defer statement [ go test -run NONE -bench BenchmarkDefer$ runtime ] With normal (stack-allocated) defers only: 35.4 ns/op With open-coded defers: 5.6 ns/op Cost of function call alone (remove defer keyword): 4.4 ns/op Text size increase (including funcdata) for go binary without/with open-coded defers: 0.09% The average size increase (including funcdata) for only the functions that use open-coded defers is 1.1%. The cost of a panic followed by a recover got noticeably slower, since panic processing now requires a scan of the stack for open-coded defer frames. This scan is required, even if no frames are using open-coded defers: Cost of panic and recover [ go test -run NONE -bench BenchmarkPanicRecover runtime ] Without open-coded defers: 62.0 ns/op With open-coded defers: 255 ns/op A CGO Go-to-C-to-Go benchmark got noticeably faster because of open-coded defers: CGO Go-to-C-to-Go benchmark [cd misc/cgo/test; go test -run NONE -bench BenchmarkCGoCallback ] Without open-coded defers: 443 ns/op With open-coded defers: 347 ns/op Updates #14939 (defer performance) Updates #34481 (design doc) Change-Id: I63b1a60d1ebf28126f55ee9fd7ecffe9cb23d1ff Reviewed-on: https://go-review.googlesource.com/c/go/+/202340 Reviewed-by: Austin Clements <austin@google.com>
2019-06-24 12:59:22 -07:00
// But without write barriers. The first three are writes to
// the stack so they don't need a write barrier, and furthermore
// are to uninitialized memory, so they must not use a write barrier.
cmd/compile, cmd/link, runtime: make defers low-cost through inline code and extra funcdata Generate inline code at defer time to save the args of defer calls to unique (autotmp) stack slots, and generate inline code at exit time to check which defer calls were made and make the associated function/method/interface calls. We remember that a particular defer statement was reached by storing in the deferBits variable (always stored on the stack). At exit time, we check the bits of the deferBits variable to determine which defer function calls to make (in reverse order). These low-cost defers are only used for functions where no defers appear in loops. In addition, we don't do these low-cost defers if there are too many defer statements or too many exits in a function (to limit code increase). When a function uses open-coded defers, we produce extra FUNCDATA_OpenCodedDeferInfo information that specifies the number of defers, and for each defer, the stack slots where the closure and associated args have been stored. The funcdata also includes the location of the deferBits variable. Therefore, for panics, we can use this funcdata to determine exactly which defers are active, and call the appropriate functions/methods/closures with the correct arguments for each active defer. In order to unwind the stack correctly after a recover(), we need to add an extra code segment to functions with open-coded defers that simply calls deferreturn() and returns. This segment is not reachable by the normal function, but is returned to by the runtime during recovery. We set the liveness information of this deferreturn() to be the same as the liveness at the first function call during the last defer exit code (so all return values and all stack slots needed by the defer calls will be live). I needed to increase the stackguard constant from 880 to 896, because of a small amount of new code in deferreturn(). The -N flag disables open-coded defers. '-d defer' prints out the kind of defer being used at each defer statement (heap-allocated, stack-allocated, or open-coded). Cost of defer statement [ go test -run NONE -bench BenchmarkDefer$ runtime ] With normal (stack-allocated) defers only: 35.4 ns/op With open-coded defers: 5.6 ns/op Cost of function call alone (remove defer keyword): 4.4 ns/op Text size increase (including funcdata) for go binary without/with open-coded defers: 0.09% The average size increase (including funcdata) for only the functions that use open-coded defers is 1.1%. The cost of a panic followed by a recover got noticeably slower, since panic processing now requires a scan of the stack for open-coded defer frames. This scan is required, even if no frames are using open-coded defers: Cost of panic and recover [ go test -run NONE -bench BenchmarkPanicRecover runtime ] Without open-coded defers: 62.0 ns/op With open-coded defers: 255 ns/op A CGO Go-to-C-to-Go benchmark got noticeably faster because of open-coded defers: CGO Go-to-C-to-Go benchmark [cd misc/cgo/test; go test -run NONE -bench BenchmarkCGoCallback ] Without open-coded defers: 443 ns/op With open-coded defers: 347 ns/op Updates #14939 (defer performance) Updates #34481 (design doc) Change-Id: I63b1a60d1ebf28126f55ee9fd7ecffe9cb23d1ff Reviewed-on: https://go-review.googlesource.com/c/go/+/202340 Reviewed-by: Austin Clements <austin@google.com>
2019-06-24 12:59:22 -07:00
// The fourth write does not require a write barrier because we
// explicitly mark all the defer structures, so we don't need to
// keep track of pointers to them with a write barrier.
*(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer))
*(*uintptr)(unsafe.Pointer(&d.head)) = 0
*(*uintptr)(unsafe.Pointer(&gp._defer)) = uintptr(unsafe.Pointer(d))
return0()
// No code can go here - the C return register has
// been set and must not be clobbered.
}
// Each P holds a pool for defers.
runtime: use traceback to traverse defer structures This makes the GC and the stack copying agree about how to interpret the defer structures. Previously, only the stack copying treated them precisely. This removes an untyped memory allocation and fixes at least three copystack bugs. To make sure the GC can find the deferred argument frame until it has been copied, keep a Defer on the defer list during its execution. In addition to making it possible to remove the untyped memory allocation, keeping the Defer on the list fixes two races between copystack and execution of defers (in both gopanic and Goexit). The problem is that once the defer has been taken off the list, a stack copy that happens before the deferred arguments have been copied back to the stack will not update the arguments correctly. The new tests TestDeferPtrsPanic and TestDeferPtrsGoexit (variations on the existing TestDeferPtrs) pass now but failed before this CL. In addition to those fixes, keeping the Defer on the list helps correct a dangling pointer error during copystack. The traceback routines walk the Defer chain to provide information about where a panic may resume execution. When the executing Defer was not on the Defer chain but instead linked from the Panic chain, the traceback had to walk the Panic chain too. But Panic structs are on the stack and being updated by copystack. Traceback's use of the Panic chain while copystack is updating those structs means that it can follow an updated pointer and find itself reading from the new stack. The new stack is usually all zeros, so it sees an incorrect early end to the chain. The new TestPanicUseStack makes this happen at tip and dies when adjustdefers finds an unexpected argp. The new StackCopyPoison mode causes an earlier bad dereference instead. By keeping the Defer on the list, traceback can avoid walking the Panic chain at all, making it okay for copystack to update the Panics. We'd have the same problem for any Defers on the stack. There was only one: gopanic's dabort. Since we are not taking the executing Defer off the chain, we can use it to do what dabort was doing, and then there are no Defers on the stack ever, so it is okay for traceback to use the Defer chain even while copystack is executing: copystack cannot modify the Defer chain. LGTM=khr R=khr CC=dvyukov, golang-codereviews, iant, rlh https://golang.org/cl/141490043
2014-09-16 10:36:38 -04:00
// Allocate a Defer, usually using per-P pool.
runtime: fix code so defer record is not added to g0 defer list during panic newdefer() actually adds the new defer to the current g's defer chain. That happens even if we are on the system stack, in which case the g will be the g0 stack. For open-coded defers, we call newdefer() (only during panic processing) while on the system stack, so the new defer is unintentionally added to the g0._defer defer list. The code later correctly adds the defer to the user g's defer list. The g0._defer list is never used. However, that pointer on the g0._defer list can keep a defer struct alive that is intended to be garbage-collected (smaller defers use a defer pool, but larger-sized defer records are just GC'ed). freedefer() does not zero out pointers when it intends that a defer become garbage-collected. So, we can have the pointers in a defer that is held alive by g0._defer become invalid (in particular d.link). This is the cause of the bad pointer bug in this issue The fix is to change newdefer (only used in two places) to not add the new defer to the gp._defer list. We just do it after the call with the correct gp pointer. (As mentioned above, this code was already there after the newdefer in addOneOpenDeferFrame.) That ensures that defers will be correctly garbage-collected and eliminate the bad pointer. This fix definitely fixes the original repro. I added a test and tried hard to reproduce the bug (based on the original repro code), but awasn't actually able to cause the bug. However, the test is still an interesting mix of heap-allocated, stack-allocated, and open-coded defers. Fixes #37688 Change-Id: I1a481b9d9e9b9ba4e8726ef718a1f4512a2d6faf Reviewed-on: https://go-review.googlesource.com/c/go/+/224581 Run-TryBot: Dan Scales <danscales@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
2020-03-20 09:31:20 -07:00
// Each defer must be released with freedefer. The defer is not
// added to any defer chain yet.
func newdefer() *_defer {
var d *_defer
[dev.typeparams] runtime: remove unnecessary split-prevention from defer code Prior to regabi, the compiler passed defer arguments to the runtime as untyped values on the stack. This meant a lot of defer-related runtime functions had to be very careful not to grow the stack or allow preemption since the stack could not be safely scanned or moved. However, with regabi, every defer is now simply a func() from the runtime's perspective, which means we no longer have untyped values on the stack when we enter defer-related runtime code. Hence, this CL removes a lot of the now-unnecessary carefulness in the defer implementation. Specifically, deferreturn no longer needs to be nosplit because it doesn't copy untyped defer arguments to its caller's frame (we also update some stale comments in deferreturn). freedefer no longer needs to be nosplit because it's none of its callers are deeply nosplit. And newdefer and freedefer no longer need to switch to the systemstack on their slow paths to avoid stack growth. deferprocStack is the only function that still needs to be nosplit, but that's because the compiler calls it with uninitialized live pointer slots on the stack (maybe we should change that, but that's a very different fix). This is a retry of CL 337651, which was rolled back. This version disables preemption in newdefer and freedefer while they hold the current P. Change-Id: Ibf469addc0b69dc3ba9a3d1a5e0c2804b7b4b244 Reviewed-on: https://go-review.googlesource.com/c/go/+/339396 Trust: Austin Clements <austin@google.com> Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-07-26 12:04:36 -04:00
mp := acquirem()
pp := mp.p.ptr()
if len(pp.deferpool) == 0 && sched.deferpool != nil {
[dev.typeparams] runtime: remove unnecessary split-prevention from defer code Prior to regabi, the compiler passed defer arguments to the runtime as untyped values on the stack. This meant a lot of defer-related runtime functions had to be very careful not to grow the stack or allow preemption since the stack could not be safely scanned or moved. However, with regabi, every defer is now simply a func() from the runtime's perspective, which means we no longer have untyped values on the stack when we enter defer-related runtime code. Hence, this CL removes a lot of the now-unnecessary carefulness in the defer implementation. Specifically, deferreturn no longer needs to be nosplit because it doesn't copy untyped defer arguments to its caller's frame (we also update some stale comments in deferreturn). freedefer no longer needs to be nosplit because it's none of its callers are deeply nosplit. And newdefer and freedefer no longer need to switch to the systemstack on their slow paths to avoid stack growth. deferprocStack is the only function that still needs to be nosplit, but that's because the compiler calls it with uninitialized live pointer slots on the stack (maybe we should change that, but that's a very different fix). This is a retry of CL 337651, which was rolled back. This version disables preemption in newdefer and freedefer while they hold the current P. Change-Id: Ibf469addc0b69dc3ba9a3d1a5e0c2804b7b4b244 Reviewed-on: https://go-review.googlesource.com/c/go/+/339396 Trust: Austin Clements <austin@google.com> Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-07-26 12:04:36 -04:00
lock(&sched.deferlock)
for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil {
d := sched.deferpool
sched.deferpool = d.link
d.link = nil
pp.deferpool = append(pp.deferpool, d)
}
unlock(&sched.deferlock)
}
if n := len(pp.deferpool); n > 0 {
d = pp.deferpool[n-1]
pp.deferpool[n-1] = nil
pp.deferpool = pp.deferpool[:n-1]
}
[dev.typeparams] runtime: remove unnecessary split-prevention from defer code Prior to regabi, the compiler passed defer arguments to the runtime as untyped values on the stack. This meant a lot of defer-related runtime functions had to be very careful not to grow the stack or allow preemption since the stack could not be safely scanned or moved. However, with regabi, every defer is now simply a func() from the runtime's perspective, which means we no longer have untyped values on the stack when we enter defer-related runtime code. Hence, this CL removes a lot of the now-unnecessary carefulness in the defer implementation. Specifically, deferreturn no longer needs to be nosplit because it doesn't copy untyped defer arguments to its caller's frame (we also update some stale comments in deferreturn). freedefer no longer needs to be nosplit because it's none of its callers are deeply nosplit. And newdefer and freedefer no longer need to switch to the systemstack on their slow paths to avoid stack growth. deferprocStack is the only function that still needs to be nosplit, but that's because the compiler calls it with uninitialized live pointer slots on the stack (maybe we should change that, but that's a very different fix). This is a retry of CL 337651, which was rolled back. This version disables preemption in newdefer and freedefer while they hold the current P. Change-Id: Ibf469addc0b69dc3ba9a3d1a5e0c2804b7b4b244 Reviewed-on: https://go-review.googlesource.com/c/go/+/339396 Trust: Austin Clements <austin@google.com> Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-07-26 12:04:36 -04:00
releasem(mp)
mp, pp = nil, nil
if d == nil {
// Allocate new defer.
d = new(_defer)
}
d.heap = true
return d
}
runtime: simplify freedefer logic Currently, freedefer's API forces a subtle and fragile situation. It requires that the caller unlink the _defer from the G list, but freedefer itself is responsible for zeroing some _defer fields. In the window between these two steps, we have to prevent stack growth because stack growth walks the defer list (which no longer contains the unlinked defer) to adjust pointers, and would thus leave an unadjusted and potentially invalid pointer behind in the _defer before freedefer zeroes it. This setup puts part of this subtle responsibility on the caller and also means freedefer must be nosplit, which forces other shenanigans to avoid nosplit overflows. We can simplify all of this by replacing freedefer with a new popDefer function that's responsible for both unlinking and zeroing the _defer, in addition to freeing it. Some history: prior to regabi, defer records contained their argument frame, which deferreturn copied to the stack before freeing the defer record (and subsequently running the defer). Since that argument frame didn't have a valid stack map until we ran the deferred function, the non-preemptible window was much larger and more difficult to isolate. Now we use normal closure calls to capture defer state and call the defer, so the non-preemptible window is narrowed to just the unlinking step. Change-Id: I7cf95ba18e1e2e7d73f616b9ed9fb38f5e725d72 Reviewed-on: https://go-review.googlesource.com/c/go/+/553696 Reviewed-by: Matthew Dempsky <mdempsky@google.com> Reviewed-by: Michael Pratt <mpratt@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Cherry Mui <cherryyz@google.com> Auto-Submit: Austin Clements <austin@google.com> Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com>
2024-01-03 10:41:09 -05:00
// popDefer pops the head of gp's defer list and frees it.
func popDefer(gp *g) {
d := gp._defer
d.fn = nil // Can in theory point to the stack
// We must not copy the stack between the updating gp._defer and setting
// d.link to nil. Between these two steps, d is not on any defer list, so
// stack copying won't adjust stack pointers in it (namely, d.link). Hence,
// if we were to copy the stack, d could then contain a stale pointer.
gp._defer = d.link
[dev.typeparams] runtime: handle d.link carefully when freeing a defer CL 339396 allowed stack copying on entry to and during freedefer, but this introduced a subtle bug: if d is heap-allocated, and d.link points to a stack-allocated defer, stack copying during freedefer can briefly introduce a stale pointer, which the garbage collector can discover and panic about. This happens because d has already been unlinked from the defer chain when freedefer is called, so stack copying won't update stack pointers in it. Fix this by making freedefer nosplit again and immediately clearing d.link. This should fix the longtest builders, which currently fail on GOMAXPROCS=2 runtime -cpu=1,2,4 -quick in the TestDeferHeapAndStack test. This seems like the simplest fix, but it just deals with the subtlety rather than eliminating it. Really, every call site of freedefer (of which there are surprisingly many) has hidden subtlety between unlinking the defer and calling freedefer. We could consolidate the subtlety into each call site by requiring that they unlink the defer and set d.link to nil before calling freedefer. freedefer could check this condition like it checks that various other fields have already been zeroed. A more radical option is to replace freedefer with "popDefer", which would both pop the defer off the link and take care of freeing it. There would still be a brief moment of subtlety, but it would be in one place, in popDefer. Annoyingly, *almost* every call to freedefer just pops the defer from the head of the G's list, but there's one place when handling open-coded defers where we have to remove a defer from the middle of the list. I'm inclined to first fix that subtlety by only expanding open-coded defer records when they're at the head of the defer list, and then revisit the popDefer idea. Change-Id: I3130d2542c01a421a5d60e8c31f5379263219627 Reviewed-on: https://go-review.googlesource.com/c/go/+/339730 Trust: Austin Clements <austin@google.com> Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-08-04 08:54:09 -04:00
d.link = nil
// After this point we can copy the stack.
if !d.heap {
return
}
[dev.typeparams] runtime: remove unnecessary split-prevention from defer code Prior to regabi, the compiler passed defer arguments to the runtime as untyped values on the stack. This meant a lot of defer-related runtime functions had to be very careful not to grow the stack or allow preemption since the stack could not be safely scanned or moved. However, with regabi, every defer is now simply a func() from the runtime's perspective, which means we no longer have untyped values on the stack when we enter defer-related runtime code. Hence, this CL removes a lot of the now-unnecessary carefulness in the defer implementation. Specifically, deferreturn no longer needs to be nosplit because it doesn't copy untyped defer arguments to its caller's frame (we also update some stale comments in deferreturn). freedefer no longer needs to be nosplit because it's none of its callers are deeply nosplit. And newdefer and freedefer no longer need to switch to the systemstack on their slow paths to avoid stack growth. deferprocStack is the only function that still needs to be nosplit, but that's because the compiler calls it with uninitialized live pointer slots on the stack (maybe we should change that, but that's a very different fix). This is a retry of CL 337651, which was rolled back. This version disables preemption in newdefer and freedefer while they hold the current P. Change-Id: Ibf469addc0b69dc3ba9a3d1a5e0c2804b7b4b244 Reviewed-on: https://go-review.googlesource.com/c/go/+/339396 Trust: Austin Clements <austin@google.com> Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-07-26 12:04:36 -04:00
mp := acquirem()
pp := mp.p.ptr()
if len(pp.deferpool) == cap(pp.deferpool) {
// Transfer half of local cache to the central cache.
[dev.typeparams] runtime: remove unnecessary split-prevention from defer code Prior to regabi, the compiler passed defer arguments to the runtime as untyped values on the stack. This meant a lot of defer-related runtime functions had to be very careful not to grow the stack or allow preemption since the stack could not be safely scanned or moved. However, with regabi, every defer is now simply a func() from the runtime's perspective, which means we no longer have untyped values on the stack when we enter defer-related runtime code. Hence, this CL removes a lot of the now-unnecessary carefulness in the defer implementation. Specifically, deferreturn no longer needs to be nosplit because it doesn't copy untyped defer arguments to its caller's frame (we also update some stale comments in deferreturn). freedefer no longer needs to be nosplit because it's none of its callers are deeply nosplit. And newdefer and freedefer no longer need to switch to the systemstack on their slow paths to avoid stack growth. deferprocStack is the only function that still needs to be nosplit, but that's because the compiler calls it with uninitialized live pointer slots on the stack (maybe we should change that, but that's a very different fix). This is a retry of CL 337651, which was rolled back. This version disables preemption in newdefer and freedefer while they hold the current P. Change-Id: Ibf469addc0b69dc3ba9a3d1a5e0c2804b7b4b244 Reviewed-on: https://go-review.googlesource.com/c/go/+/339396 Trust: Austin Clements <austin@google.com> Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-07-26 12:04:36 -04:00
var first, last *_defer
for len(pp.deferpool) > cap(pp.deferpool)/2 {
n := len(pp.deferpool)
d := pp.deferpool[n-1]
pp.deferpool[n-1] = nil
pp.deferpool = pp.deferpool[:n-1]
if first == nil {
first = d
} else {
last.link = d
}
[dev.typeparams] runtime: remove unnecessary split-prevention from defer code Prior to regabi, the compiler passed defer arguments to the runtime as untyped values on the stack. This meant a lot of defer-related runtime functions had to be very careful not to grow the stack or allow preemption since the stack could not be safely scanned or moved. However, with regabi, every defer is now simply a func() from the runtime's perspective, which means we no longer have untyped values on the stack when we enter defer-related runtime code. Hence, this CL removes a lot of the now-unnecessary carefulness in the defer implementation. Specifically, deferreturn no longer needs to be nosplit because it doesn't copy untyped defer arguments to its caller's frame (we also update some stale comments in deferreturn). freedefer no longer needs to be nosplit because it's none of its callers are deeply nosplit. And newdefer and freedefer no longer need to switch to the systemstack on their slow paths to avoid stack growth. deferprocStack is the only function that still needs to be nosplit, but that's because the compiler calls it with uninitialized live pointer slots on the stack (maybe we should change that, but that's a very different fix). This is a retry of CL 337651, which was rolled back. This version disables preemption in newdefer and freedefer while they hold the current P. Change-Id: Ibf469addc0b69dc3ba9a3d1a5e0c2804b7b4b244 Reviewed-on: https://go-review.googlesource.com/c/go/+/339396 Trust: Austin Clements <austin@google.com> Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-07-26 12:04:36 -04:00
last = d
}
lock(&sched.deferlock)
last.link = sched.deferpool
sched.deferpool = first
unlock(&sched.deferlock)
}
*d = _defer{}
pp.deferpool = append(pp.deferpool, d)
[dev.typeparams] runtime: remove unnecessary split-prevention from defer code Prior to regabi, the compiler passed defer arguments to the runtime as untyped values on the stack. This meant a lot of defer-related runtime functions had to be very careful not to grow the stack or allow preemption since the stack could not be safely scanned or moved. However, with regabi, every defer is now simply a func() from the runtime's perspective, which means we no longer have untyped values on the stack when we enter defer-related runtime code. Hence, this CL removes a lot of the now-unnecessary carefulness in the defer implementation. Specifically, deferreturn no longer needs to be nosplit because it doesn't copy untyped defer arguments to its caller's frame (we also update some stale comments in deferreturn). freedefer no longer needs to be nosplit because it's none of its callers are deeply nosplit. And newdefer and freedefer no longer need to switch to the systemstack on their slow paths to avoid stack growth. deferprocStack is the only function that still needs to be nosplit, but that's because the compiler calls it with uninitialized live pointer slots on the stack (maybe we should change that, but that's a very different fix). This is a retry of CL 337651, which was rolled back. This version disables preemption in newdefer and freedefer while they hold the current P. Change-Id: Ibf469addc0b69dc3ba9a3d1a5e0c2804b7b4b244 Reviewed-on: https://go-review.googlesource.com/c/go/+/339396 Trust: Austin Clements <austin@google.com> Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-07-26 12:04:36 -04:00
releasem(mp)
mp, pp = nil, nil
}
[dev.typeparams] runtime,cmd/compile,cmd/link: replace jmpdefer with a loop Currently, deferreturn runs deferred functions by backing up its return PC to the deferreturn call, and then effectively tail-calling the deferred function (via jmpdefer). The effect of this is that the deferred function appears to be called directly from the deferee, and when it returns, the deferee calls deferreturn again so it can run the next deferred function if necessary. This unusual flow control leads to a large number of special cases and complications all over the tool chain. This used to be necessary because deferreturn copied the deferred function's argument frame directly into its caller's frame and then had to invoke that call as if it had been called from its caller's frame so it could access it arguments. But now that we've simplified defer processing so the runtime only deals with argument-less closures, this approach is no longer necessary. This CL simplifies all of this by making deferreturn simply call deferred functions in a loop. This eliminates the need for jmpdefer, so we can delete a bunch of per-architecture assembly code. This eliminates several special cases on Wasm, since it couldn't support these calling shenanigans directly and thus had to simulate the loop a different way. Now Wasm can largely work the way the other platforms do. This eliminates the per-architecture Ginsnopdefer operation. On PPC64, this was necessary to reload the TOC pointer after the tail call (since TOC pointers in general make tail calls impossible). The tail call is gone, and in the case where we do force a jump to the deferreturn call when recovering from an open-coded defer, we go through gogo (via runtime.recovery), which handles the TOC. On other platforms, we needed a NOP so traceback didn't get confused by seeing the return to the CALL instruction, rather than the usual return to the instruction following the CALL instruction. Now we don't inject a return to the CALL instruction at all, so this NOP is also unnecessary. The one potential effect of this is that deferreturn could now appear in stack traces from deferred functions. However, this could already happen from open-coded defers, so we've long since marked deferreturn as a "wrapper" so it gets elided not only from printed stack traces, but from runtime.Callers*. This is a retry of CL 337652 because we had to back out its parent. There are no changes in this version. Change-Id: I3f54b7fec1d7ccac71cc6cf6835c6a46b7e5fb6c Reviewed-on: https://go-review.googlesource.com/c/go/+/339397 Trust: Austin Clements <austin@google.com> Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-07-26 15:44:22 -04:00
// deferreturn runs deferred functions for the caller's frame.
// The compiler inserts a call to this at the end of any
// function which calls defer.
func deferreturn() {
var p _panic
p.deferreturn = true
p.start(sys.GetCallerPC(), unsafe.Pointer(sys.GetCallerSP()))
[dev.typeparams] runtime,cmd/compile,cmd/link: replace jmpdefer with a loop Currently, deferreturn runs deferred functions by backing up its return PC to the deferreturn call, and then effectively tail-calling the deferred function (via jmpdefer). The effect of this is that the deferred function appears to be called directly from the deferee, and when it returns, the deferee calls deferreturn again so it can run the next deferred function if necessary. This unusual flow control leads to a large number of special cases and complications all over the tool chain. This used to be necessary because deferreturn copied the deferred function's argument frame directly into its caller's frame and then had to invoke that call as if it had been called from its caller's frame so it could access it arguments. But now that we've simplified defer processing so the runtime only deals with argument-less closures, this approach is no longer necessary. This CL simplifies all of this by making deferreturn simply call deferred functions in a loop. This eliminates the need for jmpdefer, so we can delete a bunch of per-architecture assembly code. This eliminates several special cases on Wasm, since it couldn't support these calling shenanigans directly and thus had to simulate the loop a different way. Now Wasm can largely work the way the other platforms do. This eliminates the per-architecture Ginsnopdefer operation. On PPC64, this was necessary to reload the TOC pointer after the tail call (since TOC pointers in general make tail calls impossible). The tail call is gone, and in the case where we do force a jump to the deferreturn call when recovering from an open-coded defer, we go through gogo (via runtime.recovery), which handles the TOC. On other platforms, we needed a NOP so traceback didn't get confused by seeing the return to the CALL instruction, rather than the usual return to the instruction following the CALL instruction. Now we don't inject a return to the CALL instruction at all, so this NOP is also unnecessary. The one potential effect of this is that deferreturn could now appear in stack traces from deferred functions. However, this could already happen from open-coded defers, so we've long since marked deferreturn as a "wrapper" so it gets elided not only from printed stack traces, but from runtime.Callers*. This is a retry of CL 337652 because we had to back out its parent. There are no changes in this version. Change-Id: I3f54b7fec1d7ccac71cc6cf6835c6a46b7e5fb6c Reviewed-on: https://go-review.googlesource.com/c/go/+/339397 Trust: Austin Clements <austin@google.com> Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-07-26 15:44:22 -04:00
for {
fn, ok := p.nextDefer()
if !ok {
break
[dev.typeparams] runtime,cmd/compile,cmd/link: replace jmpdefer with a loop Currently, deferreturn runs deferred functions by backing up its return PC to the deferreturn call, and then effectively tail-calling the deferred function (via jmpdefer). The effect of this is that the deferred function appears to be called directly from the deferee, and when it returns, the deferee calls deferreturn again so it can run the next deferred function if necessary. This unusual flow control leads to a large number of special cases and complications all over the tool chain. This used to be necessary because deferreturn copied the deferred function's argument frame directly into its caller's frame and then had to invoke that call as if it had been called from its caller's frame so it could access it arguments. But now that we've simplified defer processing so the runtime only deals with argument-less closures, this approach is no longer necessary. This CL simplifies all of this by making deferreturn simply call deferred functions in a loop. This eliminates the need for jmpdefer, so we can delete a bunch of per-architecture assembly code. This eliminates several special cases on Wasm, since it couldn't support these calling shenanigans directly and thus had to simulate the loop a different way. Now Wasm can largely work the way the other platforms do. This eliminates the per-architecture Ginsnopdefer operation. On PPC64, this was necessary to reload the TOC pointer after the tail call (since TOC pointers in general make tail calls impossible). The tail call is gone, and in the case where we do force a jump to the deferreturn call when recovering from an open-coded defer, we go through gogo (via runtime.recovery), which handles the TOC. On other platforms, we needed a NOP so traceback didn't get confused by seeing the return to the CALL instruction, rather than the usual return to the instruction following the CALL instruction. Now we don't inject a return to the CALL instruction at all, so this NOP is also unnecessary. The one potential effect of this is that deferreturn could now appear in stack traces from deferred functions. However, this could already happen from open-coded defers, so we've long since marked deferreturn as a "wrapper" so it gets elided not only from printed stack traces, but from runtime.Callers*. This is a retry of CL 337652 because we had to back out its parent. There are no changes in this version. Change-Id: I3f54b7fec1d7ccac71cc6cf6835c6a46b7e5fb6c Reviewed-on: https://go-review.googlesource.com/c/go/+/339397 Trust: Austin Clements <austin@google.com> Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-07-26 15:44:22 -04:00
}
fn()
cmd/compile, cmd/link, runtime: make defers low-cost through inline code and extra funcdata Generate inline code at defer time to save the args of defer calls to unique (autotmp) stack slots, and generate inline code at exit time to check which defer calls were made and make the associated function/method/interface calls. We remember that a particular defer statement was reached by storing in the deferBits variable (always stored on the stack). At exit time, we check the bits of the deferBits variable to determine which defer function calls to make (in reverse order). These low-cost defers are only used for functions where no defers appear in loops. In addition, we don't do these low-cost defers if there are too many defer statements or too many exits in a function (to limit code increase). When a function uses open-coded defers, we produce extra FUNCDATA_OpenCodedDeferInfo information that specifies the number of defers, and for each defer, the stack slots where the closure and associated args have been stored. The funcdata also includes the location of the deferBits variable. Therefore, for panics, we can use this funcdata to determine exactly which defers are active, and call the appropriate functions/methods/closures with the correct arguments for each active defer. In order to unwind the stack correctly after a recover(), we need to add an extra code segment to functions with open-coded defers that simply calls deferreturn() and returns. This segment is not reachable by the normal function, but is returned to by the runtime during recovery. We set the liveness information of this deferreturn() to be the same as the liveness at the first function call during the last defer exit code (so all return values and all stack slots needed by the defer calls will be live). I needed to increase the stackguard constant from 880 to 896, because of a small amount of new code in deferreturn(). The -N flag disables open-coded defers. '-d defer' prints out the kind of defer being used at each defer statement (heap-allocated, stack-allocated, or open-coded). Cost of defer statement [ go test -run NONE -bench BenchmarkDefer$ runtime ] With normal (stack-allocated) defers only: 35.4 ns/op With open-coded defers: 5.6 ns/op Cost of function call alone (remove defer keyword): 4.4 ns/op Text size increase (including funcdata) for go binary without/with open-coded defers: 0.09% The average size increase (including funcdata) for only the functions that use open-coded defers is 1.1%. The cost of a panic followed by a recover got noticeably slower, since panic processing now requires a scan of the stack for open-coded defer frames. This scan is required, even if no frames are using open-coded defers: Cost of panic and recover [ go test -run NONE -bench BenchmarkPanicRecover runtime ] Without open-coded defers: 62.0 ns/op With open-coded defers: 255 ns/op A CGO Go-to-C-to-Go benchmark got noticeably faster because of open-coded defers: CGO Go-to-C-to-Go benchmark [cd misc/cgo/test; go test -run NONE -bench BenchmarkCGoCallback ] Without open-coded defers: 443 ns/op With open-coded defers: 347 ns/op Updates #14939 (defer performance) Updates #34481 (design doc) Change-Id: I63b1a60d1ebf28126f55ee9fd7ecffe9cb23d1ff Reviewed-on: https://go-review.googlesource.com/c/go/+/202340 Reviewed-by: Austin Clements <austin@google.com>
2019-06-24 12:59:22 -07:00
}
}
// Goexit terminates the goroutine that calls it. No other goroutine is affected.
// Goexit runs all deferred calls before terminating the goroutine. Because Goexit
// is not a panic, any recover calls in those deferred functions will return nil.
//
// Calling Goexit from the main goroutine terminates that goroutine
// without func main returning. Since func main has not returned,
// the program continues execution of other goroutines.
// If all other goroutines exit, the program crashes.
//
// It crashes if called from a thread not created by the Go runtime.
func Goexit() {
runtime: ensure that Goexit cannot be aborted by a recursive panic/recover When we do a successful recover of a panic, we resume normal execution by returning from the frame that had the deferred call that did the recover (after executing any remaining deferred calls in that frame). However, suppose we have called runtime.Goexit and there is a panic during one of the deferred calls run by the Goexit. Further assume that there is a deferred call in the frame of the Goexit or a parent frame that does a recover. Then the recovery process will actually resume normal execution above the Goexit frame and hence abort the Goexit. We will not terminate the thread as expected, but continue running in the frame above the Goexit. To fix this, we explicitly create a _panic object for a Goexit call. We then change the "abort" behavior for Goexits, but not panics. After a recovery, if the top-level panic is actually a Goexit that is marked to be aborted, then we return to the Goexit defer-processing loop, so that the Goexit is not actually aborted. Actual code changes are just panic.go, runtime2.go, and funcid.go. Adjusted the test related to the new Goexit behavior (TestRecoverBeforePanicAfterGoexit) and added several new tests of aborted panics (whose behavior has not changed). Fixes #29226 Change-Id: Ib13cb0074f5acc2567a28db7ca6912cfc47eecb5 Reviewed-on: https://go-review.googlesource.com/c/go/+/200081 Run-TryBot: Dan Scales <danscales@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2019-10-09 12:18:26 -07:00
// Create a panic object for Goexit, so we can recognize when it might be
// bypassed by a recover().
var p _panic
p.goexit = true
p.start(sys.GetCallerPC(), unsafe.Pointer(sys.GetCallerSP()))
for {
fn, ok := p.nextDefer()
if !ok {
break
}
fn()
}
runtime: call goexit1 instead of goexit Currently, runtime.Goexit() calls goexit()—the goroutine exit stub—to terminate the goroutine. This *mostly* works, but can cause a "leftover stack barriers" panic if the following happens: 1. Goroutine A has a reasonably large stack. 2. The garbage collector scan phase runs and installs stack barriers in A's stack. The top-most stack barrier happens to fall at address X. 3. Goroutine A unwinds the stack far enough to be a candidate for stack shrinking, but not past X. 4. Goroutine A calls runtime.Goexit(), which calls goexit(), which calls goexit1(). 5. The garbage collector enters mark termination. 6. Goroutine A is preempted right at the prologue of goexit1() and performs a stack shrink, which calls gentraceback. gentraceback stops as soon as it sees goexit on the stack, which is only two frames up at this point, even though there may really be many frames above it. More to the point, the stack barrier at X is above the goexit frame, so gentraceback never sees that stack barrier. At the end of gentraceback, it checks that it saw all of the stack barriers and panics because it didn't see the one at X. The fix is simple: call goexit1, which actually implements the process of exiting a goroutine, rather than goexit, the exit stub. To make sure this doesn't happen again in the future, we also add an argument to the stub prototype of goexit so you really, really have to want to call it in order to call it. We were able to reliably reproduce the above sequence with a fair amount of awful code inserted at the right places in the runtime, but chose to change the goexit prototype to ensure this wouldn't happen again rather than pollute the runtime with ugly testing code. Change-Id: Ifb6fb53087e09a252baddadc36eebf954468f2a8 Reviewed-on: https://go-review.googlesource.com/13323 Reviewed-by: Russ Cox <rsc@golang.org>
2015-08-06 15:36:50 -04:00
goexit1()
}
// Call all Error and String methods before freezing the world.
// Used when crashing with panicking.
func preprintpanics(p *_panic) {
defer func() {
text := "panic while printing panic value"
switch r := recover().(type) {
case nil:
// nothing to do
case string:
throw(text + ": " + r)
default:
throw(text + ": type " + toRType(efaceOf(&r)._type).string())
}
}()
for p != nil {
switch v := p.arg.(type) {
case error:
p.arg = v.Error()
case stringer:
p.arg = v.String()
}
p = p.link
}
}
// Print all currently active panics. Used when crashing.
runtime: never allocate during an unrecoverable panic Currently, startpanic_m (which prepares for an unrecoverable panic) goes out of its way to make it possible to allocate during panic handling by allocating an mcache if there isn't one. However, this is both potentially dangerous and unnecessary. Allocating an mcache is a generally complex thing to do in an already precarious situation. Specifically, it requires obtaining the heap lock, and there's evidence that this may be able to deadlock (#23360). However, it's also unnecessary because we never allocate from the unrecoverable panic path. This didn't use to be the case. The call to allocmcache was introduced long ago, in CL 7388043, where it was in preparation for separating Ms and Ps and potentially running an M without an mcache. At the time, after calling startpanic, the runtime could call String and Error methods on panicked values, which could do anything including allocating. That was generally unsafe even at the time, and CL 19792 fixed this be pre-printing panic messages before calling startpanic. As a result, we now no longer allocate after calling startpanic. This CL not only removes the allocmcache call, but goes a step further to explicitly disallow any allocation during unrecoverable panic handling, even in situations where it might be safe. This way, if panic handling ever does an allocation that would be unsafe in unusual circumstances, we'll know even if it happens during normal circumstances. This would help with debugging #23360, since the deadlock in allocmcache is currently masking the real failure. Beyond all.bash, I manually tested this change by adding panics at various points in early runtime init, signal handling, and the scheduler to check unusual panic situations. Change-Id: I85df21e2b4b20c6faf1f13fae266c9339eebc061 Reviewed-on: https://go-review.googlesource.com/88835 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org> Reviewed-by: Ian Lance Taylor <iant@golang.org>
2018-01-18 14:58:05 -05:00
// Should only be called after preprintpanics.
func printpanics(p *_panic) {
if p.link != nil {
printpanics(p.link)
runtime: ensure that Goexit cannot be aborted by a recursive panic/recover When we do a successful recover of a panic, we resume normal execution by returning from the frame that had the deferred call that did the recover (after executing any remaining deferred calls in that frame). However, suppose we have called runtime.Goexit and there is a panic during one of the deferred calls run by the Goexit. Further assume that there is a deferred call in the frame of the Goexit or a parent frame that does a recover. Then the recovery process will actually resume normal execution above the Goexit frame and hence abort the Goexit. We will not terminate the thread as expected, but continue running in the frame above the Goexit. To fix this, we explicitly create a _panic object for a Goexit call. We then change the "abort" behavior for Goexits, but not panics. After a recovery, if the top-level panic is actually a Goexit that is marked to be aborted, then we return to the Goexit defer-processing loop, so that the Goexit is not actually aborted. Actual code changes are just panic.go, runtime2.go, and funcid.go. Adjusted the test related to the new Goexit behavior (TestRecoverBeforePanicAfterGoexit) and added several new tests of aborted panics (whose behavior has not changed). Fixes #29226 Change-Id: Ib13cb0074f5acc2567a28db7ca6912cfc47eecb5 Reviewed-on: https://go-review.googlesource.com/c/go/+/200081 Run-TryBot: Dan Scales <danscales@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2019-10-09 12:18:26 -07:00
if !p.link.goexit {
print("\t")
}
}
if p.goexit {
return
}
print("panic: ")
printpanicval(p.arg)
if p.recovered {
print(" [recovered]")
}
print("\n")
}
cmd/compile, cmd/link, runtime: make defers low-cost through inline code and extra funcdata Generate inline code at defer time to save the args of defer calls to unique (autotmp) stack slots, and generate inline code at exit time to check which defer calls were made and make the associated function/method/interface calls. We remember that a particular defer statement was reached by storing in the deferBits variable (always stored on the stack). At exit time, we check the bits of the deferBits variable to determine which defer function calls to make (in reverse order). These low-cost defers are only used for functions where no defers appear in loops. In addition, we don't do these low-cost defers if there are too many defer statements or too many exits in a function (to limit code increase). When a function uses open-coded defers, we produce extra FUNCDATA_OpenCodedDeferInfo information that specifies the number of defers, and for each defer, the stack slots where the closure and associated args have been stored. The funcdata also includes the location of the deferBits variable. Therefore, for panics, we can use this funcdata to determine exactly which defers are active, and call the appropriate functions/methods/closures with the correct arguments for each active defer. In order to unwind the stack correctly after a recover(), we need to add an extra code segment to functions with open-coded defers that simply calls deferreturn() and returns. This segment is not reachable by the normal function, but is returned to by the runtime during recovery. We set the liveness information of this deferreturn() to be the same as the liveness at the first function call during the last defer exit code (so all return values and all stack slots needed by the defer calls will be live). I needed to increase the stackguard constant from 880 to 896, because of a small amount of new code in deferreturn(). The -N flag disables open-coded defers. '-d defer' prints out the kind of defer being used at each defer statement (heap-allocated, stack-allocated, or open-coded). Cost of defer statement [ go test -run NONE -bench BenchmarkDefer$ runtime ] With normal (stack-allocated) defers only: 35.4 ns/op With open-coded defers: 5.6 ns/op Cost of function call alone (remove defer keyword): 4.4 ns/op Text size increase (including funcdata) for go binary without/with open-coded defers: 0.09% The average size increase (including funcdata) for only the functions that use open-coded defers is 1.1%. The cost of a panic followed by a recover got noticeably slower, since panic processing now requires a scan of the stack for open-coded defer frames. This scan is required, even if no frames are using open-coded defers: Cost of panic and recover [ go test -run NONE -bench BenchmarkPanicRecover runtime ] Without open-coded defers: 62.0 ns/op With open-coded defers: 255 ns/op A CGO Go-to-C-to-Go benchmark got noticeably faster because of open-coded defers: CGO Go-to-C-to-Go benchmark [cd misc/cgo/test; go test -run NONE -bench BenchmarkCGoCallback ] Without open-coded defers: 443 ns/op With open-coded defers: 347 ns/op Updates #14939 (defer performance) Updates #34481 (design doc) Change-Id: I63b1a60d1ebf28126f55ee9fd7ecffe9cb23d1ff Reviewed-on: https://go-review.googlesource.com/c/go/+/202340 Reviewed-by: Austin Clements <austin@google.com>
2019-06-24 12:59:22 -07:00
// readvarintUnsafe reads the uint32 in varint format starting at fd, and returns the
// uint32 and a pointer to the byte following the varint.
//
// The implementation is the same with runtime.readvarint, except that this function
// uses unsafe.Pointer for speed.
cmd/compile, cmd/link, runtime: make defers low-cost through inline code and extra funcdata Generate inline code at defer time to save the args of defer calls to unique (autotmp) stack slots, and generate inline code at exit time to check which defer calls were made and make the associated function/method/interface calls. We remember that a particular defer statement was reached by storing in the deferBits variable (always stored on the stack). At exit time, we check the bits of the deferBits variable to determine which defer function calls to make (in reverse order). These low-cost defers are only used for functions where no defers appear in loops. In addition, we don't do these low-cost defers if there are too many defer statements or too many exits in a function (to limit code increase). When a function uses open-coded defers, we produce extra FUNCDATA_OpenCodedDeferInfo information that specifies the number of defers, and for each defer, the stack slots where the closure and associated args have been stored. The funcdata also includes the location of the deferBits variable. Therefore, for panics, we can use this funcdata to determine exactly which defers are active, and call the appropriate functions/methods/closures with the correct arguments for each active defer. In order to unwind the stack correctly after a recover(), we need to add an extra code segment to functions with open-coded defers that simply calls deferreturn() and returns. This segment is not reachable by the normal function, but is returned to by the runtime during recovery. We set the liveness information of this deferreturn() to be the same as the liveness at the first function call during the last defer exit code (so all return values and all stack slots needed by the defer calls will be live). I needed to increase the stackguard constant from 880 to 896, because of a small amount of new code in deferreturn(). The -N flag disables open-coded defers. '-d defer' prints out the kind of defer being used at each defer statement (heap-allocated, stack-allocated, or open-coded). Cost of defer statement [ go test -run NONE -bench BenchmarkDefer$ runtime ] With normal (stack-allocated) defers only: 35.4 ns/op With open-coded defers: 5.6 ns/op Cost of function call alone (remove defer keyword): 4.4 ns/op Text size increase (including funcdata) for go binary without/with open-coded defers: 0.09% The average size increase (including funcdata) for only the functions that use open-coded defers is 1.1%. The cost of a panic followed by a recover got noticeably slower, since panic processing now requires a scan of the stack for open-coded defer frames. This scan is required, even if no frames are using open-coded defers: Cost of panic and recover [ go test -run NONE -bench BenchmarkPanicRecover runtime ] Without open-coded defers: 62.0 ns/op With open-coded defers: 255 ns/op A CGO Go-to-C-to-Go benchmark got noticeably faster because of open-coded defers: CGO Go-to-C-to-Go benchmark [cd misc/cgo/test; go test -run NONE -bench BenchmarkCGoCallback ] Without open-coded defers: 443 ns/op With open-coded defers: 347 ns/op Updates #14939 (defer performance) Updates #34481 (design doc) Change-Id: I63b1a60d1ebf28126f55ee9fd7ecffe9cb23d1ff Reviewed-on: https://go-review.googlesource.com/c/go/+/202340 Reviewed-by: Austin Clements <austin@google.com>
2019-06-24 12:59:22 -07:00
func readvarintUnsafe(fd unsafe.Pointer) (uint32, unsafe.Pointer) {
var r uint32
var shift int
for {
b := *(*uint8)(fd)
cmd/compile, cmd/link, runtime: make defers low-cost through inline code and extra funcdata Generate inline code at defer time to save the args of defer calls to unique (autotmp) stack slots, and generate inline code at exit time to check which defer calls were made and make the associated function/method/interface calls. We remember that a particular defer statement was reached by storing in the deferBits variable (always stored on the stack). At exit time, we check the bits of the deferBits variable to determine which defer function calls to make (in reverse order). These low-cost defers are only used for functions where no defers appear in loops. In addition, we don't do these low-cost defers if there are too many defer statements or too many exits in a function (to limit code increase). When a function uses open-coded defers, we produce extra FUNCDATA_OpenCodedDeferInfo information that specifies the number of defers, and for each defer, the stack slots where the closure and associated args have been stored. The funcdata also includes the location of the deferBits variable. Therefore, for panics, we can use this funcdata to determine exactly which defers are active, and call the appropriate functions/methods/closures with the correct arguments for each active defer. In order to unwind the stack correctly after a recover(), we need to add an extra code segment to functions with open-coded defers that simply calls deferreturn() and returns. This segment is not reachable by the normal function, but is returned to by the runtime during recovery. We set the liveness information of this deferreturn() to be the same as the liveness at the first function call during the last defer exit code (so all return values and all stack slots needed by the defer calls will be live). I needed to increase the stackguard constant from 880 to 896, because of a small amount of new code in deferreturn(). The -N flag disables open-coded defers. '-d defer' prints out the kind of defer being used at each defer statement (heap-allocated, stack-allocated, or open-coded). Cost of defer statement [ go test -run NONE -bench BenchmarkDefer$ runtime ] With normal (stack-allocated) defers only: 35.4 ns/op With open-coded defers: 5.6 ns/op Cost of function call alone (remove defer keyword): 4.4 ns/op Text size increase (including funcdata) for go binary without/with open-coded defers: 0.09% The average size increase (including funcdata) for only the functions that use open-coded defers is 1.1%. The cost of a panic followed by a recover got noticeably slower, since panic processing now requires a scan of the stack for open-coded defer frames. This scan is required, even if no frames are using open-coded defers: Cost of panic and recover [ go test -run NONE -bench BenchmarkPanicRecover runtime ] Without open-coded defers: 62.0 ns/op With open-coded defers: 255 ns/op A CGO Go-to-C-to-Go benchmark got noticeably faster because of open-coded defers: CGO Go-to-C-to-Go benchmark [cd misc/cgo/test; go test -run NONE -bench BenchmarkCGoCallback ] Without open-coded defers: 443 ns/op With open-coded defers: 347 ns/op Updates #14939 (defer performance) Updates #34481 (design doc) Change-Id: I63b1a60d1ebf28126f55ee9fd7ecffe9cb23d1ff Reviewed-on: https://go-review.googlesource.com/c/go/+/202340 Reviewed-by: Austin Clements <austin@google.com>
2019-06-24 12:59:22 -07:00
fd = add(fd, unsafe.Sizeof(b))
if b < 128 {
return r + uint32(b)<<shift, fd
}
r += uint32(b&0x7F) << (shift & 31)
cmd/compile, cmd/link, runtime: make defers low-cost through inline code and extra funcdata Generate inline code at defer time to save the args of defer calls to unique (autotmp) stack slots, and generate inline code at exit time to check which defer calls were made and make the associated function/method/interface calls. We remember that a particular defer statement was reached by storing in the deferBits variable (always stored on the stack). At exit time, we check the bits of the deferBits variable to determine which defer function calls to make (in reverse order). These low-cost defers are only used for functions where no defers appear in loops. In addition, we don't do these low-cost defers if there are too many defer statements or too many exits in a function (to limit code increase). When a function uses open-coded defers, we produce extra FUNCDATA_OpenCodedDeferInfo information that specifies the number of defers, and for each defer, the stack slots where the closure and associated args have been stored. The funcdata also includes the location of the deferBits variable. Therefore, for panics, we can use this funcdata to determine exactly which defers are active, and call the appropriate functions/methods/closures with the correct arguments for each active defer. In order to unwind the stack correctly after a recover(), we need to add an extra code segment to functions with open-coded defers that simply calls deferreturn() and returns. This segment is not reachable by the normal function, but is returned to by the runtime during recovery. We set the liveness information of this deferreturn() to be the same as the liveness at the first function call during the last defer exit code (so all return values and all stack slots needed by the defer calls will be live). I needed to increase the stackguard constant from 880 to 896, because of a small amount of new code in deferreturn(). The -N flag disables open-coded defers. '-d defer' prints out the kind of defer being used at each defer statement (heap-allocated, stack-allocated, or open-coded). Cost of defer statement [ go test -run NONE -bench BenchmarkDefer$ runtime ] With normal (stack-allocated) defers only: 35.4 ns/op With open-coded defers: 5.6 ns/op Cost of function call alone (remove defer keyword): 4.4 ns/op Text size increase (including funcdata) for go binary without/with open-coded defers: 0.09% The average size increase (including funcdata) for only the functions that use open-coded defers is 1.1%. The cost of a panic followed by a recover got noticeably slower, since panic processing now requires a scan of the stack for open-coded defer frames. This scan is required, even if no frames are using open-coded defers: Cost of panic and recover [ go test -run NONE -bench BenchmarkPanicRecover runtime ] Without open-coded defers: 62.0 ns/op With open-coded defers: 255 ns/op A CGO Go-to-C-to-Go benchmark got noticeably faster because of open-coded defers: CGO Go-to-C-to-Go benchmark [cd misc/cgo/test; go test -run NONE -bench BenchmarkCGoCallback ] Without open-coded defers: 443 ns/op With open-coded defers: 347 ns/op Updates #14939 (defer performance) Updates #34481 (design doc) Change-Id: I63b1a60d1ebf28126f55ee9fd7ecffe9cb23d1ff Reviewed-on: https://go-review.googlesource.com/c/go/+/202340 Reviewed-by: Austin Clements <austin@google.com>
2019-06-24 12:59:22 -07:00
shift += 7
if shift > 28 {
panic("Bad varint")
}
}
}
runtime: replace panic(nil) with panic(new(runtime.PanicNilError)) Long ago we decided that panic(nil) was too unlikely to bother making a special case for purposes of recover. Unfortunately, it has turned out not to be a special case. There are many examples of code in the Go ecosystem where an author has written panic(nil) because they want to panic and don't care about the panic value. Using panic(nil) in this case has the unfortunate behavior of making recover behave as though the goroutine isn't panicking. As a result, code like: func f() { defer func() { if err := recover(); err != nil { log.Fatalf("panicked! %v", err) } }() call1() call2() } looks like it guarantees that call2 has been run any time f returns, but that turns out not to be strictly true. If call1 does panic(nil), then f returns "successfully", having recovered the panic, but without calling call2. Instead you have to write something like: func f() { done := false defer func() { if err := recover(); !done { log.Fatalf("panicked! %v", err) } }() call1() call2() done = true } which defeats nearly the whole point of recover. No one does this, with the result that almost all uses of recover are subtly broken. One specific broken use along these lines is in net/http, which recovers from panics in handlers and sends back an HTTP error. Users discovered in the early days of Go that panic(nil) was a convenient way to jump out of a handler up to the serving loop without sending back an HTTP error. This was a bug, not a feature. Go 1.8 added panic(http.ErrAbortHandler) as a better way to access the feature. Any lingering code that uses panic(nil) to abort an HTTP handler without a failure message should be changed to use http.ErrAbortHandler. Programs that need the old, unintended behavior from net/http or other packages can set GODEBUG=panicnil=1 to stop the run-time error. Uses of recover that want to detect panic(nil) in new programs can check for recover returning a value of type *runtime.PanicNilError. Because the new GODEBUG is used inside the runtime, we can't import internal/godebug, so there is some new machinery to cross-connect those in this CL, to allow a mutable GODEBUG setting. That won't be necessary if we add any other mutable GODEBUG settings in the future. The CL also corrects the handling of defaulted GODEBUG values in the runtime, for #56986. Fixes #25448. Change-Id: I2b39c7e83e4f7aa308777dabf2edae54773e03f5 Reviewed-on: https://go-review.googlesource.com/c/go/+/461956 Reviewed-by: Robert Griesemer <gri@google.com> Run-TryBot: Russ Cox <rsc@golang.org> TryBot-Result: Gopher Robot <gobot@golang.org> Auto-Submit: Russ Cox <rsc@golang.org>
2023-01-12 09:30:38 -05:00
// A PanicNilError happens when code calls panic(nil).
//
// Before Go 1.21, programs that called panic(nil) observed recover returning nil.
// Starting in Go 1.21, programs that call panic(nil) observe recover returning a *PanicNilError.
// Programs can change back to the old behavior by setting GODEBUG=panicnil=1.
type PanicNilError struct {
// This field makes PanicNilError structurally different from
// any other struct in this package, and the _ makes it different
// from any struct in other packages too.
// This avoids any accidental conversions being possible
// between this struct and some other struct sharing the same fields,
// like happened in go.dev/issue/56603.
_ [0]*PanicNilError
}
func (*PanicNilError) Error() string { return "panic called with nil argument" }
func (*PanicNilError) RuntimeError() {}
var panicnil = &godebugInc{name: "panicnil"}
// The implementation of the predeclared function panic.
// The compiler emits calls to this function.
//
// gopanic should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - go.undefinedlabs.com/scopeagent
// - github.com/goplus/igop
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname gopanic
func gopanic(e any) {
if e == nil {
if debug.panicnil.Load() != 1 {
e = new(PanicNilError)
} else {
panicnil.IncNonDefault()
}
runtime: replace panic(nil) with panic(new(runtime.PanicNilError)) Long ago we decided that panic(nil) was too unlikely to bother making a special case for purposes of recover. Unfortunately, it has turned out not to be a special case. There are many examples of code in the Go ecosystem where an author has written panic(nil) because they want to panic and don't care about the panic value. Using panic(nil) in this case has the unfortunate behavior of making recover behave as though the goroutine isn't panicking. As a result, code like: func f() { defer func() { if err := recover(); err != nil { log.Fatalf("panicked! %v", err) } }() call1() call2() } looks like it guarantees that call2 has been run any time f returns, but that turns out not to be strictly true. If call1 does panic(nil), then f returns "successfully", having recovered the panic, but without calling call2. Instead you have to write something like: func f() { done := false defer func() { if err := recover(); !done { log.Fatalf("panicked! %v", err) } }() call1() call2() done = true } which defeats nearly the whole point of recover. No one does this, with the result that almost all uses of recover are subtly broken. One specific broken use along these lines is in net/http, which recovers from panics in handlers and sends back an HTTP error. Users discovered in the early days of Go that panic(nil) was a convenient way to jump out of a handler up to the serving loop without sending back an HTTP error. This was a bug, not a feature. Go 1.8 added panic(http.ErrAbortHandler) as a better way to access the feature. Any lingering code that uses panic(nil) to abort an HTTP handler without a failure message should be changed to use http.ErrAbortHandler. Programs that need the old, unintended behavior from net/http or other packages can set GODEBUG=panicnil=1 to stop the run-time error. Uses of recover that want to detect panic(nil) in new programs can check for recover returning a value of type *runtime.PanicNilError. Because the new GODEBUG is used inside the runtime, we can't import internal/godebug, so there is some new machinery to cross-connect those in this CL, to allow a mutable GODEBUG setting. That won't be necessary if we add any other mutable GODEBUG settings in the future. The CL also corrects the handling of defaulted GODEBUG values in the runtime, for #56986. Fixes #25448. Change-Id: I2b39c7e83e4f7aa308777dabf2edae54773e03f5 Reviewed-on: https://go-review.googlesource.com/c/go/+/461956 Reviewed-by: Robert Griesemer <gri@google.com> Run-TryBot: Russ Cox <rsc@golang.org> TryBot-Result: Gopher Robot <gobot@golang.org> Auto-Submit: Russ Cox <rsc@golang.org>
2023-01-12 09:30:38 -05:00
}
gp := getg()
if gp.m.curg != gp {
print("panic: ")
printpanicval(e)
print("\n")
throw("panic on system stack")
}
if gp.m.mallocing != 0 {
print("panic: ")
printpanicval(e)
print("\n")
throw("panic during malloc")
}
if gp.m.preemptoff != "" {
print("panic: ")
printpanicval(e)
print("\n")
print("preempt off reason: ")
print(gp.m.preemptoff)
print("\n")
throw("panic during preemptoff")
}
if gp.m.locks != 0 {
print("panic: ")
printpanicval(e)
print("\n")
throw("panic holding locks")
}
var p _panic
p.arg = e
runningPanicDefers.Add(1)
p.start(sys.GetCallerPC(), unsafe.Pointer(sys.GetCallerSP()))
for {
fn, ok := p.nextDefer()
if !ok {
break
}
fn()
}
// If we're tracing, flush the current generation to make the trace more
// readable.
//
// TODO(aktau): Handle a panic from within traceAdvance more gracefully.
// Currently it would hang. Not handled now because it is very unlikely, and
// already unrecoverable.
if traceEnabled() {
traceAdvance(false)
}
// ran out of deferred calls - old-school panic now
// Because it is unsafe to call arbitrary user code after freezing
// the world, we call preprintpanics to invoke all necessary Error
// and String methods to prepare the panic strings before startpanic.
preprintpanics(&p)
fatalpanic(&p) // should not return
*(*int)(nil) = 0 // not reached
}
// start initializes a panic to start unwinding the stack.
//
// If p.goexit is true, then start may return multiple times.
func (p *_panic) start(pc uintptr, sp unsafe.Pointer) {
gp := getg()
// Record the caller's PC and SP, so recovery can identify panics
// that have been recovered. Also, so that if p is from Goexit, we
// can restart its defer processing loop if a recovered panic tries
// to jump past it.
p.startPC = sys.GetCallerPC()
p.startSP = unsafe.Pointer(sys.GetCallerSP())
if p.deferreturn {
p.sp = sp
if s := (*savedOpenDeferState)(gp.param); s != nil {
// recovery saved some state for us, so that we can resume
// calling open-coded defers without unwinding the stack.
gp.param = nil
p.retpc = s.retpc
p.deferBitsPtr = (*byte)(add(sp, s.deferBitsOffset))
p.slotsPtr = add(sp, s.slotsOffset)
}
return
}
p.link = gp._panic
gp._panic = (*_panic)(noescape(unsafe.Pointer(p)))
// Initialize state machine, and find the first frame with a defer.
//
// Note: We could use startPC and startSP here, but callers will
// never have defer statements themselves. By starting at their
// caller instead, we avoid needing to unwind through an extra
// frame. It also somewhat simplifies the terminating condition for
// deferreturn.
p.lr, p.fp = pc, sp
p.nextFrame()
}
// nextDefer returns the next deferred function to invoke, if any.
//
// Note: The "ok bool" result is necessary to correctly handle when
// the deferred function itself was nil (e.g., "defer (func())(nil)").
func (p *_panic) nextDefer() (func(), bool) {
gp := getg()
if !p.deferreturn {
if gp._panic != p {
throw("bad panic stack")
}
if p.recovered {
mcall(recovery) // does not return
throw("recovery failed")
}
}
// The assembler adjusts p.argp in wrapper functions that shouldn't
// be visible to recover(), so we need to restore it each iteration.
p.argp = add(p.startSP, sys.MinFrameSize)
for {
for p.deferBitsPtr != nil {
bits := *p.deferBitsPtr
// Check whether any open-coded defers are still pending.
//
// Note: We need to check this upfront (rather than after
// clearing the top bit) because it's possible that Goexit
// invokes a deferred call, and there were still more pending
// open-coded defers in the frame; but then the deferred call
// panic and invoked the remaining defers in the frame, before
// recovering and restarting the Goexit loop.
if bits == 0 {
p.deferBitsPtr = nil
break
cmd/compile, cmd/link, runtime: make defers low-cost through inline code and extra funcdata Generate inline code at defer time to save the args of defer calls to unique (autotmp) stack slots, and generate inline code at exit time to check which defer calls were made and make the associated function/method/interface calls. We remember that a particular defer statement was reached by storing in the deferBits variable (always stored on the stack). At exit time, we check the bits of the deferBits variable to determine which defer function calls to make (in reverse order). These low-cost defers are only used for functions where no defers appear in loops. In addition, we don't do these low-cost defers if there are too many defer statements or too many exits in a function (to limit code increase). When a function uses open-coded defers, we produce extra FUNCDATA_OpenCodedDeferInfo information that specifies the number of defers, and for each defer, the stack slots where the closure and associated args have been stored. The funcdata also includes the location of the deferBits variable. Therefore, for panics, we can use this funcdata to determine exactly which defers are active, and call the appropriate functions/methods/closures with the correct arguments for each active defer. In order to unwind the stack correctly after a recover(), we need to add an extra code segment to functions with open-coded defers that simply calls deferreturn() and returns. This segment is not reachable by the normal function, but is returned to by the runtime during recovery. We set the liveness information of this deferreturn() to be the same as the liveness at the first function call during the last defer exit code (so all return values and all stack slots needed by the defer calls will be live). I needed to increase the stackguard constant from 880 to 896, because of a small amount of new code in deferreturn(). The -N flag disables open-coded defers. '-d defer' prints out the kind of defer being used at each defer statement (heap-allocated, stack-allocated, or open-coded). Cost of defer statement [ go test -run NONE -bench BenchmarkDefer$ runtime ] With normal (stack-allocated) defers only: 35.4 ns/op With open-coded defers: 5.6 ns/op Cost of function call alone (remove defer keyword): 4.4 ns/op Text size increase (including funcdata) for go binary without/with open-coded defers: 0.09% The average size increase (including funcdata) for only the functions that use open-coded defers is 1.1%. The cost of a panic followed by a recover got noticeably slower, since panic processing now requires a scan of the stack for open-coded defer frames. This scan is required, even if no frames are using open-coded defers: Cost of panic and recover [ go test -run NONE -bench BenchmarkPanicRecover runtime ] Without open-coded defers: 62.0 ns/op With open-coded defers: 255 ns/op A CGO Go-to-C-to-Go benchmark got noticeably faster because of open-coded defers: CGO Go-to-C-to-Go benchmark [cd misc/cgo/test; go test -run NONE -bench BenchmarkCGoCallback ] Without open-coded defers: 443 ns/op With open-coded defers: 347 ns/op Updates #14939 (defer performance) Updates #34481 (design doc) Change-Id: I63b1a60d1ebf28126f55ee9fd7ecffe9cb23d1ff Reviewed-on: https://go-review.googlesource.com/c/go/+/202340 Reviewed-by: Austin Clements <austin@google.com>
2019-06-24 12:59:22 -07:00
}
runtime: use traceback to traverse defer structures This makes the GC and the stack copying agree about how to interpret the defer structures. Previously, only the stack copying treated them precisely. This removes an untyped memory allocation and fixes at least three copystack bugs. To make sure the GC can find the deferred argument frame until it has been copied, keep a Defer on the defer list during its execution. In addition to making it possible to remove the untyped memory allocation, keeping the Defer on the list fixes two races between copystack and execution of defers (in both gopanic and Goexit). The problem is that once the defer has been taken off the list, a stack copy that happens before the deferred arguments have been copied back to the stack will not update the arguments correctly. The new tests TestDeferPtrsPanic and TestDeferPtrsGoexit (variations on the existing TestDeferPtrs) pass now but failed before this CL. In addition to those fixes, keeping the Defer on the list helps correct a dangling pointer error during copystack. The traceback routines walk the Defer chain to provide information about where a panic may resume execution. When the executing Defer was not on the Defer chain but instead linked from the Panic chain, the traceback had to walk the Panic chain too. But Panic structs are on the stack and being updated by copystack. Traceback's use of the Panic chain while copystack is updating those structs means that it can follow an updated pointer and find itself reading from the new stack. The new stack is usually all zeros, so it sees an incorrect early end to the chain. The new TestPanicUseStack makes this happen at tip and dies when adjustdefers finds an unexpected argp. The new StackCopyPoison mode causes an earlier bad dereference instead. By keeping the Defer on the list, traceback can avoid walking the Panic chain at all, making it okay for copystack to update the Panics. We'd have the same problem for any Defers on the stack. There was only one: gopanic's dabort. Since we are not taking the executing Defer off the chain, we can use it to do what dabort was doing, and then there are no Defers on the stack ever, so it is okay for traceback to use the Defer chain even while copystack is executing: copystack cannot modify the Defer chain. LGTM=khr R=khr CC=dvyukov, golang-codereviews, iant, rlh https://golang.org/cl/141490043
2014-09-16 10:36:38 -04:00
// Find index of top bit set.
i := 7 - uintptr(sys.LeadingZeros8(bits))
// Clear bit and store it back.
bits &^= 1 << i
*p.deferBitsPtr = bits
return *(*func())(add(p.slotsPtr, i*goarch.PtrSize)), true
}
Recheck:
if d := gp._defer; d != nil && d.sp == uintptr(p.sp) {
if d.rangefunc {
runtime: simplify freedefer logic Currently, freedefer's API forces a subtle and fragile situation. It requires that the caller unlink the _defer from the G list, but freedefer itself is responsible for zeroing some _defer fields. In the window between these two steps, we have to prevent stack growth because stack growth walks the defer list (which no longer contains the unlinked defer) to adjust pointers, and would thus leave an unadjusted and potentially invalid pointer behind in the _defer before freedefer zeroes it. This setup puts part of this subtle responsibility on the caller and also means freedefer must be nosplit, which forces other shenanigans to avoid nosplit overflows. We can simplify all of this by replacing freedefer with a new popDefer function that's responsible for both unlinking and zeroing the _defer, in addition to freeing it. Some history: prior to regabi, defer records contained their argument frame, which deferreturn copied to the stack before freeing the defer record (and subsequently running the defer). Since that argument frame didn't have a valid stack map until we ran the deferred function, the non-preemptible window was much larger and more difficult to isolate. Now we use normal closure calls to capture defer state and call the defer, so the non-preemptible window is narrowed to just the unlinking step. Change-Id: I7cf95ba18e1e2e7d73f616b9ed9fb38f5e725d72 Reviewed-on: https://go-review.googlesource.com/c/go/+/553696 Reviewed-by: Matthew Dempsky <mdempsky@google.com> Reviewed-by: Michael Pratt <mpratt@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Cherry Mui <cherryyz@google.com> Auto-Submit: Austin Clements <austin@google.com> Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com>
2024-01-03 10:41:09 -05:00
deferconvert(d)
popDefer(gp)
goto Recheck
}
fn := d.fn
// TODO(mdempsky): Instead of having each deferproc call have
// its own "deferreturn(); return" sequence, we should just make
// them reuse the one we emit for open-coded defers.
p.retpc = d.pc
// Unlink and free.
runtime: simplify freedefer logic Currently, freedefer's API forces a subtle and fragile situation. It requires that the caller unlink the _defer from the G list, but freedefer itself is responsible for zeroing some _defer fields. In the window between these two steps, we have to prevent stack growth because stack growth walks the defer list (which no longer contains the unlinked defer) to adjust pointers, and would thus leave an unadjusted and potentially invalid pointer behind in the _defer before freedefer zeroes it. This setup puts part of this subtle responsibility on the caller and also means freedefer must be nosplit, which forces other shenanigans to avoid nosplit overflows. We can simplify all of this by replacing freedefer with a new popDefer function that's responsible for both unlinking and zeroing the _defer, in addition to freeing it. Some history: prior to regabi, defer records contained their argument frame, which deferreturn copied to the stack before freeing the defer record (and subsequently running the defer). Since that argument frame didn't have a valid stack map until we ran the deferred function, the non-preemptible window was much larger and more difficult to isolate. Now we use normal closure calls to capture defer state and call the defer, so the non-preemptible window is narrowed to just the unlinking step. Change-Id: I7cf95ba18e1e2e7d73f616b9ed9fb38f5e725d72 Reviewed-on: https://go-review.googlesource.com/c/go/+/553696 Reviewed-by: Matthew Dempsky <mdempsky@google.com> Reviewed-by: Michael Pratt <mpratt@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Cherry Mui <cherryyz@google.com> Auto-Submit: Austin Clements <austin@google.com> Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com>
2024-01-03 10:41:09 -05:00
popDefer(gp)
return fn, true
cmd/compile, cmd/link, runtime: make defers low-cost through inline code and extra funcdata Generate inline code at defer time to save the args of defer calls to unique (autotmp) stack slots, and generate inline code at exit time to check which defer calls were made and make the associated function/method/interface calls. We remember that a particular defer statement was reached by storing in the deferBits variable (always stored on the stack). At exit time, we check the bits of the deferBits variable to determine which defer function calls to make (in reverse order). These low-cost defers are only used for functions where no defers appear in loops. In addition, we don't do these low-cost defers if there are too many defer statements or too many exits in a function (to limit code increase). When a function uses open-coded defers, we produce extra FUNCDATA_OpenCodedDeferInfo information that specifies the number of defers, and for each defer, the stack slots where the closure and associated args have been stored. The funcdata also includes the location of the deferBits variable. Therefore, for panics, we can use this funcdata to determine exactly which defers are active, and call the appropriate functions/methods/closures with the correct arguments for each active defer. In order to unwind the stack correctly after a recover(), we need to add an extra code segment to functions with open-coded defers that simply calls deferreturn() and returns. This segment is not reachable by the normal function, but is returned to by the runtime during recovery. We set the liveness information of this deferreturn() to be the same as the liveness at the first function call during the last defer exit code (so all return values and all stack slots needed by the defer calls will be live). I needed to increase the stackguard constant from 880 to 896, because of a small amount of new code in deferreturn(). The -N flag disables open-coded defers. '-d defer' prints out the kind of defer being used at each defer statement (heap-allocated, stack-allocated, or open-coded). Cost of defer statement [ go test -run NONE -bench BenchmarkDefer$ runtime ] With normal (stack-allocated) defers only: 35.4 ns/op With open-coded defers: 5.6 ns/op Cost of function call alone (remove defer keyword): 4.4 ns/op Text size increase (including funcdata) for go binary without/with open-coded defers: 0.09% The average size increase (including funcdata) for only the functions that use open-coded defers is 1.1%. The cost of a panic followed by a recover got noticeably slower, since panic processing now requires a scan of the stack for open-coded defer frames. This scan is required, even if no frames are using open-coded defers: Cost of panic and recover [ go test -run NONE -bench BenchmarkPanicRecover runtime ] Without open-coded defers: 62.0 ns/op With open-coded defers: 255 ns/op A CGO Go-to-C-to-Go benchmark got noticeably faster because of open-coded defers: CGO Go-to-C-to-Go benchmark [cd misc/cgo/test; go test -run NONE -bench BenchmarkCGoCallback ] Without open-coded defers: 443 ns/op With open-coded defers: 347 ns/op Updates #14939 (defer performance) Updates #34481 (design doc) Change-Id: I63b1a60d1ebf28126f55ee9fd7ecffe9cb23d1ff Reviewed-on: https://go-review.googlesource.com/c/go/+/202340 Reviewed-by: Austin Clements <austin@google.com>
2019-06-24 12:59:22 -07:00
}
if !p.nextFrame() {
return nil, false
}
}
}
// nextFrame finds the next frame that contains deferred calls, if any.
func (p *_panic) nextFrame() (ok bool) {
if p.lr == 0 {
return false
}
gp := getg()
systemstack(func() {
var limit uintptr
if d := gp._defer; d != nil {
limit = d.sp
}
var u unwinder
u.initAt(p.lr, uintptr(p.fp), 0, gp, 0)
for {
if !u.valid() {
p.lr = 0
return // ok == false
runtime: make sure to remove open-coded defer entries in all cases after a recover We add entries to the defer list at panic/goexit time on-the-fly for frames with open-coded defers. We do this so that we can correctly process open-coded defers and non-open-coded defers in the correct order during panics/goexits. But we need to remove entries for open-coded defers from the defer list when there is a recover, since those entries may never get removed otherwise and will get stale, since their corresponding defers may now be processed normally (inline). This bug here is that we were only removing higher-up stale entries during a recover if all defers in the current frame were done. But we could have more defers in the current frame (as the new test case shows). In this case, we need to leave the current defer entry around for use by deferreturn, but still remove any stale entries further along the chain. For bug 43921, simple change that we should abort the removal loop for any defer entry that is started (i.e. in process by a still not-recovered outer panic), even if it is not an open-coded defer. This change does not fix bug 43920, which looks to be a more complex fix. Fixes #43882 Fixes #43921 Change-Id: Ie05b2fa26973aa26b25c8899a2abc916090ee4f5 Reviewed-on: https://go-review.googlesource.com/c/go/+/286712 Run-TryBot: Dan Scales <danscales@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Matthew Dempsky <mdempsky@google.com> Reviewed-by: Keith Randall <khr@golang.org> Trust: Dan Scales <danscales@google.com>
2021-01-25 17:51:03 -08:00
}
// TODO(mdempsky): If we populate u.frame.fn.deferreturn for
// every frame containing a defer (not just open-coded defers),
// then we can simply loop until we find the next frame where
// it's non-zero.
if u.frame.sp == limit {
break // found a frame with linked defers
cmd/compile, cmd/link, runtime: make defers low-cost through inline code and extra funcdata Generate inline code at defer time to save the args of defer calls to unique (autotmp) stack slots, and generate inline code at exit time to check which defer calls were made and make the associated function/method/interface calls. We remember that a particular defer statement was reached by storing in the deferBits variable (always stored on the stack). At exit time, we check the bits of the deferBits variable to determine which defer function calls to make (in reverse order). These low-cost defers are only used for functions where no defers appear in loops. In addition, we don't do these low-cost defers if there are too many defer statements or too many exits in a function (to limit code increase). When a function uses open-coded defers, we produce extra FUNCDATA_OpenCodedDeferInfo information that specifies the number of defers, and for each defer, the stack slots where the closure and associated args have been stored. The funcdata also includes the location of the deferBits variable. Therefore, for panics, we can use this funcdata to determine exactly which defers are active, and call the appropriate functions/methods/closures with the correct arguments for each active defer. In order to unwind the stack correctly after a recover(), we need to add an extra code segment to functions with open-coded defers that simply calls deferreturn() and returns. This segment is not reachable by the normal function, but is returned to by the runtime during recovery. We set the liveness information of this deferreturn() to be the same as the liveness at the first function call during the last defer exit code (so all return values and all stack slots needed by the defer calls will be live). I needed to increase the stackguard constant from 880 to 896, because of a small amount of new code in deferreturn(). The -N flag disables open-coded defers. '-d defer' prints out the kind of defer being used at each defer statement (heap-allocated, stack-allocated, or open-coded). Cost of defer statement [ go test -run NONE -bench BenchmarkDefer$ runtime ] With normal (stack-allocated) defers only: 35.4 ns/op With open-coded defers: 5.6 ns/op Cost of function call alone (remove defer keyword): 4.4 ns/op Text size increase (including funcdata) for go binary without/with open-coded defers: 0.09% The average size increase (including funcdata) for only the functions that use open-coded defers is 1.1%. The cost of a panic followed by a recover got noticeably slower, since panic processing now requires a scan of the stack for open-coded defer frames. This scan is required, even if no frames are using open-coded defers: Cost of panic and recover [ go test -run NONE -bench BenchmarkPanicRecover runtime ] Without open-coded defers: 62.0 ns/op With open-coded defers: 255 ns/op A CGO Go-to-C-to-Go benchmark got noticeably faster because of open-coded defers: CGO Go-to-C-to-Go benchmark [cd misc/cgo/test; go test -run NONE -bench BenchmarkCGoCallback ] Without open-coded defers: 443 ns/op With open-coded defers: 347 ns/op Updates #14939 (defer performance) Updates #34481 (design doc) Change-Id: I63b1a60d1ebf28126f55ee9fd7ecffe9cb23d1ff Reviewed-on: https://go-review.googlesource.com/c/go/+/202340 Reviewed-by: Austin Clements <austin@google.com>
2019-06-24 12:59:22 -07:00
}
if p.initOpenCodedDefers(u.frame.fn, unsafe.Pointer(u.frame.varp)) {
break // found a frame with open-coded defers
}
u.next()
}
p.lr = u.frame.lr
p.sp = unsafe.Pointer(u.frame.sp)
p.fp = unsafe.Pointer(u.frame.fp)
ok = true
})
return
}
func (p *_panic) initOpenCodedDefers(fn funcInfo, varp unsafe.Pointer) bool {
fd := funcdata(fn, abi.FUNCDATA_OpenCodedDeferInfo)
if fd == nil {
return false
}
if fn.deferreturn == 0 {
throw("missing deferreturn")
}
deferBitsOffset, fd := readvarintUnsafe(fd)
deferBitsPtr := (*uint8)(add(varp, -uintptr(deferBitsOffset)))
if *deferBitsPtr == 0 {
return false // has open-coded defers, but none pending
}
slotsOffset, fd := readvarintUnsafe(fd)
p.retpc = fn.entry() + uintptr(fn.deferreturn)
p.deferBitsPtr = deferBitsPtr
p.slotsPtr = add(varp, -uintptr(slotsOffset))
return true
}
// The implementation of the predeclared function recover.
// Cannot split the stack because it needs to reliably
// find the stack segment of its caller.
//
// TODO(rsc): Once we commit to CopyStackAlways,
// this doesn't need to be nosplit.
//
//go:nosplit
func gorecover(argp uintptr) any {
// Must be in a function running as part of a deferred call during the panic.
// Must be called from the topmost function of the call
// (the function used in the defer statement).
// p.argp is the argument pointer of that topmost deferred function call.
// Compare against argp reported by caller.
// If they match, the caller is the one who can recover.
gp := getg()
p := gp._panic
runtime: ensure that Goexit cannot be aborted by a recursive panic/recover When we do a successful recover of a panic, we resume normal execution by returning from the frame that had the deferred call that did the recover (after executing any remaining deferred calls in that frame). However, suppose we have called runtime.Goexit and there is a panic during one of the deferred calls run by the Goexit. Further assume that there is a deferred call in the frame of the Goexit or a parent frame that does a recover. Then the recovery process will actually resume normal execution above the Goexit frame and hence abort the Goexit. We will not terminate the thread as expected, but continue running in the frame above the Goexit. To fix this, we explicitly create a _panic object for a Goexit call. We then change the "abort" behavior for Goexits, but not panics. After a recovery, if the top-level panic is actually a Goexit that is marked to be aborted, then we return to the Goexit defer-processing loop, so that the Goexit is not actually aborted. Actual code changes are just panic.go, runtime2.go, and funcid.go. Adjusted the test related to the new Goexit behavior (TestRecoverBeforePanicAfterGoexit) and added several new tests of aborted panics (whose behavior has not changed). Fixes #29226 Change-Id: Ib13cb0074f5acc2567a28db7ca6912cfc47eecb5 Reviewed-on: https://go-review.googlesource.com/c/go/+/200081 Run-TryBot: Dan Scales <danscales@google.com> Reviewed-by: Keith Randall <khr@golang.org>
2019-10-09 12:18:26 -07:00
if p != nil && !p.goexit && !p.recovered && argp == uintptr(p.argp) {
p.recovered = true
return p.arg
}
return nil
}
//go:linkname sync_throw sync.throw
func sync_throw(s string) {
throw(s)
}
//go:linkname sync_fatal sync.fatal
func sync_fatal(s string) {
fatal(s)
}
//go:linkname rand_fatal crypto/rand.fatal
func rand_fatal(s string) {
fatal(s)
}
//go:linkname sysrand_fatal crypto/internal/sysrand.fatal
func sysrand_fatal(s string) {
fatal(s)
}
//go:linkname fips_fatal crypto/internal/fips140.fatal
func fips_fatal(s string) {
fatal(s)
}
//go:linkname maps_fatal internal/runtime/maps.fatal
func maps_fatal(s string) {
fatal(s)
}
//go:linkname internal_sync_throw internal/sync.throw
func internal_sync_throw(s string) {
throw(s)
}
//go:linkname internal_sync_fatal internal/sync.fatal
func internal_sync_fatal(s string) {
fatal(s)
}
// throw triggers a fatal error that dumps a stack trace and exits.
//
// throw should be used for runtime-internal fatal errors where Go itself,
// rather than user code, may be at fault for the failure.
//
// NOTE: temporarily marked "go:noinline" pending investigation/fix of
// issue #67274, so as to fix longtest builders.
//
// throw should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/sonic
// - github.com/cockroachdb/pebble
// - github.com/dgraph-io/ristretto
// - github.com/outcaste-io/ristretto
// - github.com/pingcap/br
// - gvisor.dev/gvisor
// - github.com/sagernet/gvisor
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname throw
//go:nosplit
func throw(s string) {
// Everything throw does should be recursively nosplit so it
// can be called even when it's unsafe to grow the stack.
systemstack(func() {
print("fatal error: ")
printindented(s) // logically printpanicval(s), but avoids convTstring write barrier
print("\n")
})
fatalthrow(throwTypeRuntime)
}
// fatal triggers a fatal error that dumps a stack trace and exits.
//
// fatal is equivalent to throw, but is used when user code is expected to be
// at fault for the failure, such as racing map writes.
//
// fatal does not include runtime frames, system goroutines, or frame metadata
// (fp, sp, pc) in the stack trace unless GOTRACEBACK=system or higher.
//
//go:nosplit
func fatal(s string) {
// Everything fatal does should be recursively nosplit so it
// can be called even when it's unsafe to grow the stack.
printlock() // Prevent multiple interleaved fatal reports. See issue 69447.
systemstack(func() {
print("fatal error: ")
printindented(s) // logically printpanicval(s), but avoids convTstring write barrier
print("\n")
})
fatalthrow(throwTypeUser)
printunlock()
}
// runningPanicDefers is non-zero while running deferred functions for panic.
// This is used to try hard to get a panic stack trace out when exiting.
var runningPanicDefers atomic.Uint32
// panicking is non-zero when crashing the program for an unrecovered panic.
var panicking atomic.Uint32
// paniclk is held while printing the panic information and stack trace,
// so that two concurrent panics don't overlap their output.
var paniclk mutex
// Unwind the stack after a deferred function calls recover
// after a panic. Then arrange to continue running as though
// the caller of the deferred function returned normally.
//
// However, if unwinding the stack would skip over a Goexit call, we
// return into the Goexit loop instead, so it can continue processing
// defers instead.
func recovery(gp *g) {
p := gp._panic
pc, sp, fp := p.retpc, uintptr(p.sp), uintptr(p.fp)
p0, saveOpenDeferState := p, p.deferBitsPtr != nil && *p.deferBitsPtr != 0
// Unwind the panic stack.
for ; p != nil && uintptr(p.startSP) < sp; p = p.link {
// Don't allow jumping past a pending Goexit.
// Instead, have its _panic.start() call return again.
//
// TODO(mdempsky): In this case, Goexit will resume walking the
// stack where it left off, which means it will need to rewalk
// frames that we've already processed.
//
// There's a similar issue with nested panics, when the inner
// panic supersedes the outer panic. Again, we end up needing to
// walk the same stack frames.
//
// These are probably pretty rare occurrences in practice, and
// they don't seem any worse than the existing logic. But if we
// move the unwinding state into _panic, we could detect when we
// run into where the last panic started, and then just pick up
// where it left off instead.
//
// With how subtle defer handling is, this might not actually be
// worthwhile though.
if p.goexit {
pc, sp = p.startPC, uintptr(p.startSP)
saveOpenDeferState = false // goexit is unwinding the stack anyway
break
}
runningPanicDefers.Add(-1)
}
gp._panic = p
if p == nil { // must be done with signal
gp.sig = 0
}
if gp.param != nil {
throw("unexpected gp.param")
}
if saveOpenDeferState {
// If we're returning to deferreturn and there are more open-coded
// defers for it to call, save enough state for it to be able to
// pick up where p0 left off.
gp.param = unsafe.Pointer(&savedOpenDeferState{
retpc: p0.retpc,
// We need to save deferBitsPtr and slotsPtr too, but those are
// stack pointers. To avoid issues around heap objects pointing
// to the stack, save them as offsets from SP.
deferBitsOffset: uintptr(unsafe.Pointer(p0.deferBitsPtr)) - uintptr(p0.sp),
slotsOffset: uintptr(p0.slotsPtr) - uintptr(p0.sp),
})
}
// TODO(mdempsky): Currently, we rely on frames containing "defer"
// to end with "CALL deferreturn; RET". This allows deferreturn to
// finish running any pending defers in the frame.
//
// But we should be able to tell whether there are still pending
// defers here. If there aren't, we can just jump directly to the
// "RET" instruction. And if there are, we don't need an actual
// "CALL deferreturn" instruction; we can simulate it with something
// like:
//
// if usesLR {
// lr = pc
// } else {
// sp -= sizeof(pc)
// *(*uintptr)(sp) = pc
// }
// pc = funcPC(deferreturn)
//
// So that we effectively tail call into deferreturn, such that it
// then returns to the simple "RET" epilogue. That would save the
// overhead of the "deferreturn" call when there aren't actually any
// pending defers left, and shrink the TEXT size of compiled
// binaries. (Admittedly, both of these are modest savings.)
// Ensure we're recovering within the appropriate stack.
if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) {
print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
throw("bad recovery")
}
// Make the deferproc for this d return again,
cmd/compile, cmd/link, runtime: make defers low-cost through inline code and extra funcdata Generate inline code at defer time to save the args of defer calls to unique (autotmp) stack slots, and generate inline code at exit time to check which defer calls were made and make the associated function/method/interface calls. We remember that a particular defer statement was reached by storing in the deferBits variable (always stored on the stack). At exit time, we check the bits of the deferBits variable to determine which defer function calls to make (in reverse order). These low-cost defers are only used for functions where no defers appear in loops. In addition, we don't do these low-cost defers if there are too many defer statements or too many exits in a function (to limit code increase). When a function uses open-coded defers, we produce extra FUNCDATA_OpenCodedDeferInfo information that specifies the number of defers, and for each defer, the stack slots where the closure and associated args have been stored. The funcdata also includes the location of the deferBits variable. Therefore, for panics, we can use this funcdata to determine exactly which defers are active, and call the appropriate functions/methods/closures with the correct arguments for each active defer. In order to unwind the stack correctly after a recover(), we need to add an extra code segment to functions with open-coded defers that simply calls deferreturn() and returns. This segment is not reachable by the normal function, but is returned to by the runtime during recovery. We set the liveness information of this deferreturn() to be the same as the liveness at the first function call during the last defer exit code (so all return values and all stack slots needed by the defer calls will be live). I needed to increase the stackguard constant from 880 to 896, because of a small amount of new code in deferreturn(). The -N flag disables open-coded defers. '-d defer' prints out the kind of defer being used at each defer statement (heap-allocated, stack-allocated, or open-coded). Cost of defer statement [ go test -run NONE -bench BenchmarkDefer$ runtime ] With normal (stack-allocated) defers only: 35.4 ns/op With open-coded defers: 5.6 ns/op Cost of function call alone (remove defer keyword): 4.4 ns/op Text size increase (including funcdata) for go binary without/with open-coded defers: 0.09% The average size increase (including funcdata) for only the functions that use open-coded defers is 1.1%. The cost of a panic followed by a recover got noticeably slower, since panic processing now requires a scan of the stack for open-coded defer frames. This scan is required, even if no frames are using open-coded defers: Cost of panic and recover [ go test -run NONE -bench BenchmarkPanicRecover runtime ] Without open-coded defers: 62.0 ns/op With open-coded defers: 255 ns/op A CGO Go-to-C-to-Go benchmark got noticeably faster because of open-coded defers: CGO Go-to-C-to-Go benchmark [cd misc/cgo/test; go test -run NONE -bench BenchmarkCGoCallback ] Without open-coded defers: 443 ns/op With open-coded defers: 347 ns/op Updates #14939 (defer performance) Updates #34481 (design doc) Change-Id: I63b1a60d1ebf28126f55ee9fd7ecffe9cb23d1ff Reviewed-on: https://go-review.googlesource.com/c/go/+/202340 Reviewed-by: Austin Clements <austin@google.com>
2019-06-24 12:59:22 -07:00
// this time returning 1. The calling function will
// jump to the standard return epilogue.
gp.sched.sp = sp
gp.sched.pc = pc
gp.sched.lr = 0
// Restore the bp on platforms that support frame pointers.
// N.B. It's fine to not set anything for platforms that don't
// support frame pointers, since nothing consumes them.
switch {
case goarch.IsAmd64 != 0:
// on x86, fp actually points one word higher than the top of
// the frame since the return address is saved on the stack by
// the caller
gp.sched.bp = fp - 2*goarch.PtrSize
case goarch.IsArm64 != 0:
// on arm64, the architectural bp points one word higher
// than the sp. fp is totally useless to us here, because it
// only gets us to the caller's fp.
gp.sched.bp = sp - goarch.PtrSize
}
gp.sched.ret = 1
gogo(&gp.sched)
}
runtime: perform crashes outside systemstack CL 93658 moved stack trace printing inside a systemstack call to sidestep complexity in case the runtime is in a inconsistent state. Unfortunately, debuggers generating backtraces for a Go panic will be confused and come up with a technical correct but useless stack. This CL moves just the crash performing - typically a SIGABRT signal - outside the systemstack call to improve backtraces. Unfortunately, the crash function now needs to be marked nosplit and that triggers the no split stackoverflow check. To work around that, split fatalpanic in two: fatalthrow for runtime.throw and fatalpanic for runtime.gopanic. Only Go panics really needs crashes on the right stack and there is enough stack for gopanic. Example program: package main import "runtime/debug" func main() { debug.SetTraceback("crash") crash() } func crash() { panic("panic!") } Before: (lldb) bt * thread #1, name = 'simple', stop reason = signal SIGABRT * frame #0: 0x000000000044ffe4 simple`runtime.raise at <autogenerated>:1 frame #1: 0x0000000000438cfb simple`runtime.dieFromSignal(sig=<unavailable>) at signal_unix.go:424 frame #2: 0x0000000000438ec9 simple`runtime.crash at signal_unix.go:525 frame #3: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758 frame #4: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #5: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 frame #6: 0x000000000042a980 simple at proc.go:1094 frame #7: 0x0000000000438ec9 simple`runtime.crash at signal_unix.go:525 frame #8: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758 frame #9: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #10: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 frame #11: 0x000000000042a980 simple at proc.go:1094 frame #12: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758 frame #13: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #14: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 frame #15: 0x000000000042a980 simple at proc.go:1094 frame #16: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #17: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 After: (lldb) bt * thread #7, stop reason = signal SIGABRT * frame #0: 0x0000000000450024 simple`runtime.raise at <autogenerated>:1 frame #1: 0x0000000000438d1b simple`runtime.dieFromSignal(sig=<unavailable>) at signal_unix.go:424 frame #2: 0x0000000000438ee9 simple`runtime.crash at signal_unix.go:525 frame #3: 0x00000000004264e3 simple`runtime.fatalpanic(msgs=<unavailable>) at panic.go:664 frame #4: 0x0000000000425f1b simple`runtime.gopanic(e=<unavailable>) at panic.go:537 frame #5: 0x0000000000470c62 simple`main.crash at simple.go:11 frame #6: 0x0000000000470c00 simple`main.main at simple.go:6 frame #7: 0x0000000000427be7 simple`runtime.main at proc.go:198 frame #8: 0x000000000044ef91 simple`runtime.goexit at <autogenerated>:1 Updates #22716 Change-Id: Ib5fa35c13662c1dac2f1eac8b59c4a5824b98d92 Reviewed-on: https://go-review.googlesource.com/110065 Run-TryBot: Elias Naur <elias.naur@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com>
2018-04-29 16:29:43 +02:00
// fatalthrow implements an unrecoverable runtime throw. It freezes the
// system, prints stack traces starting from its caller, and terminates the
// process.
//
runtime: perform crashes outside systemstack CL 93658 moved stack trace printing inside a systemstack call to sidestep complexity in case the runtime is in a inconsistent state. Unfortunately, debuggers generating backtraces for a Go panic will be confused and come up with a technical correct but useless stack. This CL moves just the crash performing - typically a SIGABRT signal - outside the systemstack call to improve backtraces. Unfortunately, the crash function now needs to be marked nosplit and that triggers the no split stackoverflow check. To work around that, split fatalpanic in two: fatalthrow for runtime.throw and fatalpanic for runtime.gopanic. Only Go panics really needs crashes on the right stack and there is enough stack for gopanic. Example program: package main import "runtime/debug" func main() { debug.SetTraceback("crash") crash() } func crash() { panic("panic!") } Before: (lldb) bt * thread #1, name = 'simple', stop reason = signal SIGABRT * frame #0: 0x000000000044ffe4 simple`runtime.raise at <autogenerated>:1 frame #1: 0x0000000000438cfb simple`runtime.dieFromSignal(sig=<unavailable>) at signal_unix.go:424 frame #2: 0x0000000000438ec9 simple`runtime.crash at signal_unix.go:525 frame #3: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758 frame #4: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #5: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 frame #6: 0x000000000042a980 simple at proc.go:1094 frame #7: 0x0000000000438ec9 simple`runtime.crash at signal_unix.go:525 frame #8: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758 frame #9: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #10: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 frame #11: 0x000000000042a980 simple at proc.go:1094 frame #12: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758 frame #13: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #14: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 frame #15: 0x000000000042a980 simple at proc.go:1094 frame #16: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #17: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 After: (lldb) bt * thread #7, stop reason = signal SIGABRT * frame #0: 0x0000000000450024 simple`runtime.raise at <autogenerated>:1 frame #1: 0x0000000000438d1b simple`runtime.dieFromSignal(sig=<unavailable>) at signal_unix.go:424 frame #2: 0x0000000000438ee9 simple`runtime.crash at signal_unix.go:525 frame #3: 0x00000000004264e3 simple`runtime.fatalpanic(msgs=<unavailable>) at panic.go:664 frame #4: 0x0000000000425f1b simple`runtime.gopanic(e=<unavailable>) at panic.go:537 frame #5: 0x0000000000470c62 simple`main.crash at simple.go:11 frame #6: 0x0000000000470c00 simple`main.main at simple.go:6 frame #7: 0x0000000000427be7 simple`runtime.main at proc.go:198 frame #8: 0x000000000044ef91 simple`runtime.goexit at <autogenerated>:1 Updates #22716 Change-Id: Ib5fa35c13662c1dac2f1eac8b59c4a5824b98d92 Reviewed-on: https://go-review.googlesource.com/110065 Run-TryBot: Elias Naur <elias.naur@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com>
2018-04-29 16:29:43 +02:00
//go:nosplit
func fatalthrow(t throwType) {
pc := sys.GetCallerPC()
sp := sys.GetCallerSP()
runtime: perform crashes outside systemstack CL 93658 moved stack trace printing inside a systemstack call to sidestep complexity in case the runtime is in a inconsistent state. Unfortunately, debuggers generating backtraces for a Go panic will be confused and come up with a technical correct but useless stack. This CL moves just the crash performing - typically a SIGABRT signal - outside the systemstack call to improve backtraces. Unfortunately, the crash function now needs to be marked nosplit and that triggers the no split stackoverflow check. To work around that, split fatalpanic in two: fatalthrow for runtime.throw and fatalpanic for runtime.gopanic. Only Go panics really needs crashes on the right stack and there is enough stack for gopanic. Example program: package main import "runtime/debug" func main() { debug.SetTraceback("crash") crash() } func crash() { panic("panic!") } Before: (lldb) bt * thread #1, name = 'simple', stop reason = signal SIGABRT * frame #0: 0x000000000044ffe4 simple`runtime.raise at <autogenerated>:1 frame #1: 0x0000000000438cfb simple`runtime.dieFromSignal(sig=<unavailable>) at signal_unix.go:424 frame #2: 0x0000000000438ec9 simple`runtime.crash at signal_unix.go:525 frame #3: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758 frame #4: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #5: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 frame #6: 0x000000000042a980 simple at proc.go:1094 frame #7: 0x0000000000438ec9 simple`runtime.crash at signal_unix.go:525 frame #8: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758 frame #9: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #10: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 frame #11: 0x000000000042a980 simple at proc.go:1094 frame #12: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758 frame #13: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #14: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 frame #15: 0x000000000042a980 simple at proc.go:1094 frame #16: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #17: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 After: (lldb) bt * thread #7, stop reason = signal SIGABRT * frame #0: 0x0000000000450024 simple`runtime.raise at <autogenerated>:1 frame #1: 0x0000000000438d1b simple`runtime.dieFromSignal(sig=<unavailable>) at signal_unix.go:424 frame #2: 0x0000000000438ee9 simple`runtime.crash at signal_unix.go:525 frame #3: 0x00000000004264e3 simple`runtime.fatalpanic(msgs=<unavailable>) at panic.go:664 frame #4: 0x0000000000425f1b simple`runtime.gopanic(e=<unavailable>) at panic.go:537 frame #5: 0x0000000000470c62 simple`main.crash at simple.go:11 frame #6: 0x0000000000470c00 simple`main.main at simple.go:6 frame #7: 0x0000000000427be7 simple`runtime.main at proc.go:198 frame #8: 0x000000000044ef91 simple`runtime.goexit at <autogenerated>:1 Updates #22716 Change-Id: Ib5fa35c13662c1dac2f1eac8b59c4a5824b98d92 Reviewed-on: https://go-review.googlesource.com/110065 Run-TryBot: Elias Naur <elias.naur@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com>
2018-04-29 16:29:43 +02:00
gp := getg()
if gp.m.throwing == throwTypeNone {
gp.m.throwing = t
}
// Switch to the system stack to avoid any stack growth, which may make
// things worse if the runtime is in a bad state.
runtime: perform crashes outside systemstack CL 93658 moved stack trace printing inside a systemstack call to sidestep complexity in case the runtime is in a inconsistent state. Unfortunately, debuggers generating backtraces for a Go panic will be confused and come up with a technical correct but useless stack. This CL moves just the crash performing - typically a SIGABRT signal - outside the systemstack call to improve backtraces. Unfortunately, the crash function now needs to be marked nosplit and that triggers the no split stackoverflow check. To work around that, split fatalpanic in two: fatalthrow for runtime.throw and fatalpanic for runtime.gopanic. Only Go panics really needs crashes on the right stack and there is enough stack for gopanic. Example program: package main import "runtime/debug" func main() { debug.SetTraceback("crash") crash() } func crash() { panic("panic!") } Before: (lldb) bt * thread #1, name = 'simple', stop reason = signal SIGABRT * frame #0: 0x000000000044ffe4 simple`runtime.raise at <autogenerated>:1 frame #1: 0x0000000000438cfb simple`runtime.dieFromSignal(sig=<unavailable>) at signal_unix.go:424 frame #2: 0x0000000000438ec9 simple`runtime.crash at signal_unix.go:525 frame #3: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758 frame #4: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #5: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 frame #6: 0x000000000042a980 simple at proc.go:1094 frame #7: 0x0000000000438ec9 simple`runtime.crash at signal_unix.go:525 frame #8: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758 frame #9: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #10: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 frame #11: 0x000000000042a980 simple at proc.go:1094 frame #12: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758 frame #13: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #14: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 frame #15: 0x000000000042a980 simple at proc.go:1094 frame #16: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #17: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 After: (lldb) bt * thread #7, stop reason = signal SIGABRT * frame #0: 0x0000000000450024 simple`runtime.raise at <autogenerated>:1 frame #1: 0x0000000000438d1b simple`runtime.dieFromSignal(sig=<unavailable>) at signal_unix.go:424 frame #2: 0x0000000000438ee9 simple`runtime.crash at signal_unix.go:525 frame #3: 0x00000000004264e3 simple`runtime.fatalpanic(msgs=<unavailable>) at panic.go:664 frame #4: 0x0000000000425f1b simple`runtime.gopanic(e=<unavailable>) at panic.go:537 frame #5: 0x0000000000470c62 simple`main.crash at simple.go:11 frame #6: 0x0000000000470c00 simple`main.main at simple.go:6 frame #7: 0x0000000000427be7 simple`runtime.main at proc.go:198 frame #8: 0x000000000044ef91 simple`runtime.goexit at <autogenerated>:1 Updates #22716 Change-Id: Ib5fa35c13662c1dac2f1eac8b59c4a5824b98d92 Reviewed-on: https://go-review.googlesource.com/110065 Run-TryBot: Elias Naur <elias.naur@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com>
2018-04-29 16:29:43 +02:00
systemstack(func() {
runtime: implement SUID/SGID protections On Unix platforms, the runtime previously did nothing special when a program was run with either the SUID or SGID bits set. This can be dangerous in certain cases, such as when dumping memory state, or assuming the status of standard i/o file descriptors. Taking cues from glibc, this change implements a set of protections when a binary is run with SUID or SGID bits set (or is SUID/SGID-like). On Linux, whether to enable these protections is determined by whether the AT_SECURE flag is passed in the auxiliary vector. On platforms which have the issetugid syscall (the BSDs, darwin, and Solaris/Illumos), that is used. On the remaining platforms (currently only AIX) we check !(getuid() == geteuid() && getgid == getegid()). Currently when we determine a binary is "tainted" (using the glibc terminology), we implement two specific protections: 1. we check if the file descriptors 0, 1, and 2 are open, and if they are not, we open them, pointing at /dev/null (or fail). 2. we force GOTRACKBACK=none, and generally prevent dumping of trackbacks and registers when a program panics/aborts. In the future we may add additional protections. This change requires implementing issetugid on the platforms which support it, and implementing getuid, geteuid, getgid, and getegid on AIX. Thanks to Vincent Dehors from Synacktiv for reporting this issue. Fixes #60272 Fixes CVE-2023-29403 Change-Id: I73fc93f2b7a8933c192ce3eabbf1db359db7d5fa Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1878434 Reviewed-by: Damien Neil <dneil@google.com> Reviewed-by: Ian Lance Taylor <iant@google.com> Run-TryBot: Roland Shoemaker <bracewell@google.com> Reviewed-by: Russ Cox <rsc@google.com> Reviewed-on: https://go-review.googlesource.com/c/go/+/501223 Run-TryBot: David Chase <drchase@google.com> Reviewed-by: Michael Knyszek <mknyszek@google.com> TryBot-Result: Gopher Robot <gobot@golang.org>
2023-05-09 11:47:57 -07:00
if isSecureMode() {
exit(2)
}
runtime: perform crashes outside systemstack CL 93658 moved stack trace printing inside a systemstack call to sidestep complexity in case the runtime is in a inconsistent state. Unfortunately, debuggers generating backtraces for a Go panic will be confused and come up with a technical correct but useless stack. This CL moves just the crash performing - typically a SIGABRT signal - outside the systemstack call to improve backtraces. Unfortunately, the crash function now needs to be marked nosplit and that triggers the no split stackoverflow check. To work around that, split fatalpanic in two: fatalthrow for runtime.throw and fatalpanic for runtime.gopanic. Only Go panics really needs crashes on the right stack and there is enough stack for gopanic. Example program: package main import "runtime/debug" func main() { debug.SetTraceback("crash") crash() } func crash() { panic("panic!") } Before: (lldb) bt * thread #1, name = 'simple', stop reason = signal SIGABRT * frame #0: 0x000000000044ffe4 simple`runtime.raise at <autogenerated>:1 frame #1: 0x0000000000438cfb simple`runtime.dieFromSignal(sig=<unavailable>) at signal_unix.go:424 frame #2: 0x0000000000438ec9 simple`runtime.crash at signal_unix.go:525 frame #3: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758 frame #4: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #5: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 frame #6: 0x000000000042a980 simple at proc.go:1094 frame #7: 0x0000000000438ec9 simple`runtime.crash at signal_unix.go:525 frame #8: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758 frame #9: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #10: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 frame #11: 0x000000000042a980 simple at proc.go:1094 frame #12: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758 frame #13: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #14: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 frame #15: 0x000000000042a980 simple at proc.go:1094 frame #16: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #17: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 After: (lldb) bt * thread #7, stop reason = signal SIGABRT * frame #0: 0x0000000000450024 simple`runtime.raise at <autogenerated>:1 frame #1: 0x0000000000438d1b simple`runtime.dieFromSignal(sig=<unavailable>) at signal_unix.go:424 frame #2: 0x0000000000438ee9 simple`runtime.crash at signal_unix.go:525 frame #3: 0x00000000004264e3 simple`runtime.fatalpanic(msgs=<unavailable>) at panic.go:664 frame #4: 0x0000000000425f1b simple`runtime.gopanic(e=<unavailable>) at panic.go:537 frame #5: 0x0000000000470c62 simple`main.crash at simple.go:11 frame #6: 0x0000000000470c00 simple`main.main at simple.go:6 frame #7: 0x0000000000427be7 simple`runtime.main at proc.go:198 frame #8: 0x000000000044ef91 simple`runtime.goexit at <autogenerated>:1 Updates #22716 Change-Id: Ib5fa35c13662c1dac2f1eac8b59c4a5824b98d92 Reviewed-on: https://go-review.googlesource.com/110065 Run-TryBot: Elias Naur <elias.naur@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com>
2018-04-29 16:29:43 +02:00
startpanic_m()
if dopanic_m(gp, pc, sp) {
// crash uses a decent amount of nosplit stack and we're already
// low on stack in throw, so crash on the system stack (unlike
// fatalpanic).
crash()
}
exit(2)
})
*(*int)(nil) = 0 // not reached
}
// fatalpanic implements an unrecoverable panic. It is like fatalthrow, except
// that if msgs != nil, fatalpanic also prints panic messages and decrements
// runningPanicDefers once main is blocked from exiting.
//
//go:nosplit
func fatalpanic(msgs *_panic) {
pc := sys.GetCallerPC()
sp := sys.GetCallerSP()
gp := getg()
runtime: perform crashes outside systemstack CL 93658 moved stack trace printing inside a systemstack call to sidestep complexity in case the runtime is in a inconsistent state. Unfortunately, debuggers generating backtraces for a Go panic will be confused and come up with a technical correct but useless stack. This CL moves just the crash performing - typically a SIGABRT signal - outside the systemstack call to improve backtraces. Unfortunately, the crash function now needs to be marked nosplit and that triggers the no split stackoverflow check. To work around that, split fatalpanic in two: fatalthrow for runtime.throw and fatalpanic for runtime.gopanic. Only Go panics really needs crashes on the right stack and there is enough stack for gopanic. Example program: package main import "runtime/debug" func main() { debug.SetTraceback("crash") crash() } func crash() { panic("panic!") } Before: (lldb) bt * thread #1, name = 'simple', stop reason = signal SIGABRT * frame #0: 0x000000000044ffe4 simple`runtime.raise at <autogenerated>:1 frame #1: 0x0000000000438cfb simple`runtime.dieFromSignal(sig=<unavailable>) at signal_unix.go:424 frame #2: 0x0000000000438ec9 simple`runtime.crash at signal_unix.go:525 frame #3: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758 frame #4: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #5: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 frame #6: 0x000000000042a980 simple at proc.go:1094 frame #7: 0x0000000000438ec9 simple`runtime.crash at signal_unix.go:525 frame #8: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758 frame #9: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #10: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 frame #11: 0x000000000042a980 simple at proc.go:1094 frame #12: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758 frame #13: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #14: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 frame #15: 0x000000000042a980 simple at proc.go:1094 frame #16: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #17: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 After: (lldb) bt * thread #7, stop reason = signal SIGABRT * frame #0: 0x0000000000450024 simple`runtime.raise at <autogenerated>:1 frame #1: 0x0000000000438d1b simple`runtime.dieFromSignal(sig=<unavailable>) at signal_unix.go:424 frame #2: 0x0000000000438ee9 simple`runtime.crash at signal_unix.go:525 frame #3: 0x00000000004264e3 simple`runtime.fatalpanic(msgs=<unavailable>) at panic.go:664 frame #4: 0x0000000000425f1b simple`runtime.gopanic(e=<unavailable>) at panic.go:537 frame #5: 0x0000000000470c62 simple`main.crash at simple.go:11 frame #6: 0x0000000000470c00 simple`main.main at simple.go:6 frame #7: 0x0000000000427be7 simple`runtime.main at proc.go:198 frame #8: 0x000000000044ef91 simple`runtime.goexit at <autogenerated>:1 Updates #22716 Change-Id: Ib5fa35c13662c1dac2f1eac8b59c4a5824b98d92 Reviewed-on: https://go-review.googlesource.com/110065 Run-TryBot: Elias Naur <elias.naur@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com>
2018-04-29 16:29:43 +02:00
var docrash bool
// Switch to the system stack to avoid any stack growth, which
// may make things worse if the runtime is in a bad state.
systemstack(func() {
if startpanic_m() && msgs != nil {
// There were panic messages and startpanic_m
// says it's okay to try to print them.
// startpanic_m set panicking, which will
// block main from exiting, so now OK to
// decrement runningPanicDefers.
runningPanicDefers.Add(-1)
printpanics(msgs)
}
runtime: perform crashes outside systemstack CL 93658 moved stack trace printing inside a systemstack call to sidestep complexity in case the runtime is in a inconsistent state. Unfortunately, debuggers generating backtraces for a Go panic will be confused and come up with a technical correct but useless stack. This CL moves just the crash performing - typically a SIGABRT signal - outside the systemstack call to improve backtraces. Unfortunately, the crash function now needs to be marked nosplit and that triggers the no split stackoverflow check. To work around that, split fatalpanic in two: fatalthrow for runtime.throw and fatalpanic for runtime.gopanic. Only Go panics really needs crashes on the right stack and there is enough stack for gopanic. Example program: package main import "runtime/debug" func main() { debug.SetTraceback("crash") crash() } func crash() { panic("panic!") } Before: (lldb) bt * thread #1, name = 'simple', stop reason = signal SIGABRT * frame #0: 0x000000000044ffe4 simple`runtime.raise at <autogenerated>:1 frame #1: 0x0000000000438cfb simple`runtime.dieFromSignal(sig=<unavailable>) at signal_unix.go:424 frame #2: 0x0000000000438ec9 simple`runtime.crash at signal_unix.go:525 frame #3: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758 frame #4: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #5: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 frame #6: 0x000000000042a980 simple at proc.go:1094 frame #7: 0x0000000000438ec9 simple`runtime.crash at signal_unix.go:525 frame #8: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758 frame #9: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #10: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 frame #11: 0x000000000042a980 simple at proc.go:1094 frame #12: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758 frame #13: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #14: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 frame #15: 0x000000000042a980 simple at proc.go:1094 frame #16: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #17: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 After: (lldb) bt * thread #7, stop reason = signal SIGABRT * frame #0: 0x0000000000450024 simple`runtime.raise at <autogenerated>:1 frame #1: 0x0000000000438d1b simple`runtime.dieFromSignal(sig=<unavailable>) at signal_unix.go:424 frame #2: 0x0000000000438ee9 simple`runtime.crash at signal_unix.go:525 frame #3: 0x00000000004264e3 simple`runtime.fatalpanic(msgs=<unavailable>) at panic.go:664 frame #4: 0x0000000000425f1b simple`runtime.gopanic(e=<unavailable>) at panic.go:537 frame #5: 0x0000000000470c62 simple`main.crash at simple.go:11 frame #6: 0x0000000000470c00 simple`main.main at simple.go:6 frame #7: 0x0000000000427be7 simple`runtime.main at proc.go:198 frame #8: 0x000000000044ef91 simple`runtime.goexit at <autogenerated>:1 Updates #22716 Change-Id: Ib5fa35c13662c1dac2f1eac8b59c4a5824b98d92 Reviewed-on: https://go-review.googlesource.com/110065 Run-TryBot: Elias Naur <elias.naur@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com>
2018-04-29 16:29:43 +02:00
docrash = dopanic_m(gp, pc, sp)
})
runtime: perform crashes outside systemstack CL 93658 moved stack trace printing inside a systemstack call to sidestep complexity in case the runtime is in a inconsistent state. Unfortunately, debuggers generating backtraces for a Go panic will be confused and come up with a technical correct but useless stack. This CL moves just the crash performing - typically a SIGABRT signal - outside the systemstack call to improve backtraces. Unfortunately, the crash function now needs to be marked nosplit and that triggers the no split stackoverflow check. To work around that, split fatalpanic in two: fatalthrow for runtime.throw and fatalpanic for runtime.gopanic. Only Go panics really needs crashes on the right stack and there is enough stack for gopanic. Example program: package main import "runtime/debug" func main() { debug.SetTraceback("crash") crash() } func crash() { panic("panic!") } Before: (lldb) bt * thread #1, name = 'simple', stop reason = signal SIGABRT * frame #0: 0x000000000044ffe4 simple`runtime.raise at <autogenerated>:1 frame #1: 0x0000000000438cfb simple`runtime.dieFromSignal(sig=<unavailable>) at signal_unix.go:424 frame #2: 0x0000000000438ec9 simple`runtime.crash at signal_unix.go:525 frame #3: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758 frame #4: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #5: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 frame #6: 0x000000000042a980 simple at proc.go:1094 frame #7: 0x0000000000438ec9 simple`runtime.crash at signal_unix.go:525 frame #8: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758 frame #9: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #10: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 frame #11: 0x000000000042a980 simple at proc.go:1094 frame #12: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758 frame #13: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #14: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 frame #15: 0x000000000042a980 simple at proc.go:1094 frame #16: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #17: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 After: (lldb) bt * thread #7, stop reason = signal SIGABRT * frame #0: 0x0000000000450024 simple`runtime.raise at <autogenerated>:1 frame #1: 0x0000000000438d1b simple`runtime.dieFromSignal(sig=<unavailable>) at signal_unix.go:424 frame #2: 0x0000000000438ee9 simple`runtime.crash at signal_unix.go:525 frame #3: 0x00000000004264e3 simple`runtime.fatalpanic(msgs=<unavailable>) at panic.go:664 frame #4: 0x0000000000425f1b simple`runtime.gopanic(e=<unavailable>) at panic.go:537 frame #5: 0x0000000000470c62 simple`main.crash at simple.go:11 frame #6: 0x0000000000470c00 simple`main.main at simple.go:6 frame #7: 0x0000000000427be7 simple`runtime.main at proc.go:198 frame #8: 0x000000000044ef91 simple`runtime.goexit at <autogenerated>:1 Updates #22716 Change-Id: Ib5fa35c13662c1dac2f1eac8b59c4a5824b98d92 Reviewed-on: https://go-review.googlesource.com/110065 Run-TryBot: Elias Naur <elias.naur@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com>
2018-04-29 16:29:43 +02:00
if docrash {
// By crashing outside the above systemstack call, debuggers
// will not be confused when generating a backtrace.
// Function crash is marked nosplit to avoid stack growth.
crash()
}
systemstack(func() {
exit(2)
})
*(*int)(nil) = 0 // not reached
}
runtime: never allocate during an unrecoverable panic Currently, startpanic_m (which prepares for an unrecoverable panic) goes out of its way to make it possible to allocate during panic handling by allocating an mcache if there isn't one. However, this is both potentially dangerous and unnecessary. Allocating an mcache is a generally complex thing to do in an already precarious situation. Specifically, it requires obtaining the heap lock, and there's evidence that this may be able to deadlock (#23360). However, it's also unnecessary because we never allocate from the unrecoverable panic path. This didn't use to be the case. The call to allocmcache was introduced long ago, in CL 7388043, where it was in preparation for separating Ms and Ps and potentially running an M without an mcache. At the time, after calling startpanic, the runtime could call String and Error methods on panicked values, which could do anything including allocating. That was generally unsafe even at the time, and CL 19792 fixed this be pre-printing panic messages before calling startpanic. As a result, we now no longer allocate after calling startpanic. This CL not only removes the allocmcache call, but goes a step further to explicitly disallow any allocation during unrecoverable panic handling, even in situations where it might be safe. This way, if panic handling ever does an allocation that would be unsafe in unusual circumstances, we'll know even if it happens during normal circumstances. This would help with debugging #23360, since the deadlock in allocmcache is currently masking the real failure. Beyond all.bash, I manually tested this change by adding panics at various points in early runtime init, signal handling, and the scheduler to check unusual panic situations. Change-Id: I85df21e2b4b20c6faf1f13fae266c9339eebc061 Reviewed-on: https://go-review.googlesource.com/88835 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org> Reviewed-by: Ian Lance Taylor <iant@golang.org>
2018-01-18 14:58:05 -05:00
// startpanic_m prepares for an unrecoverable panic.
//
// It returns true if panic messages should be printed, or false if
// the runtime is in bad shape and should just print stacks.
//
// It must not have write barriers even though the write barrier
// explicitly ignores writes once dying > 0. Write barriers still
// assume that g.m.p != nil, and this function may not have P
// in some contexts (e.g. a panic in a signal handler for a signal
// sent to an M with no P).
//
//go:nowritebarrierrec
func startpanic_m() bool {
gp := getg()
if mheap_.cachealloc.size == 0 { // very early
print("runtime: panic before malloc heap initialized\n")
}
runtime: never allocate during an unrecoverable panic Currently, startpanic_m (which prepares for an unrecoverable panic) goes out of its way to make it possible to allocate during panic handling by allocating an mcache if there isn't one. However, this is both potentially dangerous and unnecessary. Allocating an mcache is a generally complex thing to do in an already precarious situation. Specifically, it requires obtaining the heap lock, and there's evidence that this may be able to deadlock (#23360). However, it's also unnecessary because we never allocate from the unrecoverable panic path. This didn't use to be the case. The call to allocmcache was introduced long ago, in CL 7388043, where it was in preparation for separating Ms and Ps and potentially running an M without an mcache. At the time, after calling startpanic, the runtime could call String and Error methods on panicked values, which could do anything including allocating. That was generally unsafe even at the time, and CL 19792 fixed this be pre-printing panic messages before calling startpanic. As a result, we now no longer allocate after calling startpanic. This CL not only removes the allocmcache call, but goes a step further to explicitly disallow any allocation during unrecoverable panic handling, even in situations where it might be safe. This way, if panic handling ever does an allocation that would be unsafe in unusual circumstances, we'll know even if it happens during normal circumstances. This would help with debugging #23360, since the deadlock in allocmcache is currently masking the real failure. Beyond all.bash, I manually tested this change by adding panics at various points in early runtime init, signal handling, and the scheduler to check unusual panic situations. Change-Id: I85df21e2b4b20c6faf1f13fae266c9339eebc061 Reviewed-on: https://go-review.googlesource.com/88835 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org> Reviewed-by: Ian Lance Taylor <iant@golang.org>
2018-01-18 14:58:05 -05:00
// Disallow malloc during an unrecoverable panic. A panic
// could happen in a signal handler, or in a throw, or inside
// malloc itself. We want to catch if an allocation ever does
// happen (even if we're not in one of these situations).
gp.m.mallocing++
// If we're dying because of a bad lock count, set it to a
// good lock count so we don't recursively panic below.
if gp.m.locks < 0 {
gp.m.locks = 1
}
switch gp.m.dying {
case 0:
// Setting dying >0 has the side-effect of disabling this G's writebuf.
gp.m.dying = 1
panicking.Add(1)
lock(&paniclk)
if debug.schedtrace > 0 || debug.scheddetail > 0 {
schedtrace(true)
}
freezetheworld()
return true
case 1:
// Something failed while panicking.
// Just print a stack trace and exit.
gp.m.dying = 2
print("panic during panic\n")
return false
case 2:
// This is a genuine bug in the runtime, we couldn't even
// print the stack trace successfully.
gp.m.dying = 3
print("stack trace unavailable\n")
exit(4)
fallthrough
default:
// Can't even print! Just exit.
exit(5)
return false // Need to return something.
}
}
var didothers bool
var deadlock mutex
// gp is the crashing g running on this M, but may be a user G, while getg() is
// always g0.
runtime: perform crashes outside systemstack CL 93658 moved stack trace printing inside a systemstack call to sidestep complexity in case the runtime is in a inconsistent state. Unfortunately, debuggers generating backtraces for a Go panic will be confused and come up with a technical correct but useless stack. This CL moves just the crash performing - typically a SIGABRT signal - outside the systemstack call to improve backtraces. Unfortunately, the crash function now needs to be marked nosplit and that triggers the no split stackoverflow check. To work around that, split fatalpanic in two: fatalthrow for runtime.throw and fatalpanic for runtime.gopanic. Only Go panics really needs crashes on the right stack and there is enough stack for gopanic. Example program: package main import "runtime/debug" func main() { debug.SetTraceback("crash") crash() } func crash() { panic("panic!") } Before: (lldb) bt * thread #1, name = 'simple', stop reason = signal SIGABRT * frame #0: 0x000000000044ffe4 simple`runtime.raise at <autogenerated>:1 frame #1: 0x0000000000438cfb simple`runtime.dieFromSignal(sig=<unavailable>) at signal_unix.go:424 frame #2: 0x0000000000438ec9 simple`runtime.crash at signal_unix.go:525 frame #3: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758 frame #4: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #5: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 frame #6: 0x000000000042a980 simple at proc.go:1094 frame #7: 0x0000000000438ec9 simple`runtime.crash at signal_unix.go:525 frame #8: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758 frame #9: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #10: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 frame #11: 0x000000000042a980 simple at proc.go:1094 frame #12: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758 frame #13: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #14: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 frame #15: 0x000000000042a980 simple at proc.go:1094 frame #16: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #17: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 After: (lldb) bt * thread #7, stop reason = signal SIGABRT * frame #0: 0x0000000000450024 simple`runtime.raise at <autogenerated>:1 frame #1: 0x0000000000438d1b simple`runtime.dieFromSignal(sig=<unavailable>) at signal_unix.go:424 frame #2: 0x0000000000438ee9 simple`runtime.crash at signal_unix.go:525 frame #3: 0x00000000004264e3 simple`runtime.fatalpanic(msgs=<unavailable>) at panic.go:664 frame #4: 0x0000000000425f1b simple`runtime.gopanic(e=<unavailable>) at panic.go:537 frame #5: 0x0000000000470c62 simple`main.crash at simple.go:11 frame #6: 0x0000000000470c00 simple`main.main at simple.go:6 frame #7: 0x0000000000427be7 simple`runtime.main at proc.go:198 frame #8: 0x000000000044ef91 simple`runtime.goexit at <autogenerated>:1 Updates #22716 Change-Id: Ib5fa35c13662c1dac2f1eac8b59c4a5824b98d92 Reviewed-on: https://go-review.googlesource.com/110065 Run-TryBot: Elias Naur <elias.naur@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com>
2018-04-29 16:29:43 +02:00
func dopanic_m(gp *g, pc, sp uintptr) bool {
if gp.sig != 0 {
signame := signame(gp.sig)
if signame != "" {
print("[signal ", signame)
} else {
print("[signal ", hex(gp.sig))
}
print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n")
}
level, all, docrash := gotraceback()
if level > 0 {
if gp != gp.m.curg {
all = true
}
if gp != gp.m.g0 {
print("\n")
goroutineheader(gp)
traceback(pc, sp, 0, gp)
} else if level >= 2 || gp.m.throwing >= throwTypeRuntime {
print("\nruntime stack:\n")
traceback(pc, sp, 0, gp)
}
if !didothers && all {
didothers = true
tracebackothers(gp)
}
}
unlock(&paniclk)
if panicking.Add(-1) != 0 {
// Some other m is panicking too.
// Let it print what it needs to print.
// Wait forever without chewing up cpu.
// It will exit when it's done.
lock(&deadlock)
lock(&deadlock)
}
printDebugLog()
runtime: perform crashes outside systemstack CL 93658 moved stack trace printing inside a systemstack call to sidestep complexity in case the runtime is in a inconsistent state. Unfortunately, debuggers generating backtraces for a Go panic will be confused and come up with a technical correct but useless stack. This CL moves just the crash performing - typically a SIGABRT signal - outside the systemstack call to improve backtraces. Unfortunately, the crash function now needs to be marked nosplit and that triggers the no split stackoverflow check. To work around that, split fatalpanic in two: fatalthrow for runtime.throw and fatalpanic for runtime.gopanic. Only Go panics really needs crashes on the right stack and there is enough stack for gopanic. Example program: package main import "runtime/debug" func main() { debug.SetTraceback("crash") crash() } func crash() { panic("panic!") } Before: (lldb) bt * thread #1, name = 'simple', stop reason = signal SIGABRT * frame #0: 0x000000000044ffe4 simple`runtime.raise at <autogenerated>:1 frame #1: 0x0000000000438cfb simple`runtime.dieFromSignal(sig=<unavailable>) at signal_unix.go:424 frame #2: 0x0000000000438ec9 simple`runtime.crash at signal_unix.go:525 frame #3: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758 frame #4: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #5: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 frame #6: 0x000000000042a980 simple at proc.go:1094 frame #7: 0x0000000000438ec9 simple`runtime.crash at signal_unix.go:525 frame #8: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758 frame #9: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #10: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 frame #11: 0x000000000042a980 simple at proc.go:1094 frame #12: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758 frame #13: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #14: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 frame #15: 0x000000000042a980 simple at proc.go:1094 frame #16: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657 frame #17: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1 After: (lldb) bt * thread #7, stop reason = signal SIGABRT * frame #0: 0x0000000000450024 simple`runtime.raise at <autogenerated>:1 frame #1: 0x0000000000438d1b simple`runtime.dieFromSignal(sig=<unavailable>) at signal_unix.go:424 frame #2: 0x0000000000438ee9 simple`runtime.crash at signal_unix.go:525 frame #3: 0x00000000004264e3 simple`runtime.fatalpanic(msgs=<unavailable>) at panic.go:664 frame #4: 0x0000000000425f1b simple`runtime.gopanic(e=<unavailable>) at panic.go:537 frame #5: 0x0000000000470c62 simple`main.crash at simple.go:11 frame #6: 0x0000000000470c00 simple`main.main at simple.go:6 frame #7: 0x0000000000427be7 simple`runtime.main at proc.go:198 frame #8: 0x000000000044ef91 simple`runtime.goexit at <autogenerated>:1 Updates #22716 Change-Id: Ib5fa35c13662c1dac2f1eac8b59c4a5824b98d92 Reviewed-on: https://go-review.googlesource.com/110065 Run-TryBot: Elias Naur <elias.naur@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com>
2018-04-29 16:29:43 +02:00
return docrash
}
// canpanic returns false if a signal should throw instead of
// panicking.
//
//go:nosplit
func canpanic() bool {
gp := getg()
mp := acquirem()
// Is it okay for gp to panic instead of crashing the program?
// Yes, as long as it is running Go code, not runtime code,
// and not stuck in a system call.
if gp != mp.curg {
releasem(mp)
return false
}
// N.B. mp.locks != 1 instead of 0 to account for acquirem.
if mp.locks != 1 || mp.mallocing != 0 || mp.throwing != throwTypeNone || mp.preemptoff != "" || mp.dying != 0 {
releasem(mp)
return false
}
status := readgstatus(gp)
if status&^_Gscan != _Grunning || gp.syscallsp != 0 {
releasem(mp)
return false
}
if GOOS == "windows" && mp.libcallsp != 0 {
releasem(mp)
return false
}
releasem(mp)
return true
}
// shouldPushSigpanic reports whether pc should be used as sigpanic's
// return PC (pushing a frame for the call). Otherwise, it should be
// left alone so that LR is used as sigpanic's return PC, effectively
// replacing the top-most frame with sigpanic. This is used by
// preparePanic.
func shouldPushSigpanic(gp *g, pc, lr uintptr) bool {
if pc == 0 {
// Probably a call to a nil func. The old LR is more
// useful in the stack trace. Not pushing the frame
// will make the trace look like a call to sigpanic
// instead. (Otherwise the trace will end at sigpanic
// and we won't get to see who faulted.)
return false
}
// If we don't recognize the PC as code, but we do recognize
// the link register as code, then this assumes the panic was
// caused by a call to non-code. In this case, we want to
// ignore this call to make unwinding show the context.
2018-01-31 12:05:24 -05:00
//
// If we running C code, we're not going to recognize pc as a
// Go function, so just assume it's good. Otherwise, traceback
// may try to read a stale LR that looks like a Go code
// pointer and wander into the woods.
if gp.m.incgo || findfunc(pc).valid() {
// This wasn't a bad call, so use PC as sigpanic's
// return PC.
return true
}
if findfunc(lr).valid() {
// This was a bad call, but the LR is good, so use the
// LR as sigpanic's return PC.
return false
}
// Neither the PC or LR is good. Hopefully pushing a frame
// will work.
return true
}
// isAbortPC reports whether pc is the program counter at which
// runtime.abort raises a signal.
//
// It is nosplit because it's part of the isgoexception
// implementation.
//
//go:nosplit
func isAbortPC(pc uintptr) bool {
f := findfunc(pc)
if !f.valid() {
return false
}
return f.funcID == abi.FuncID_abort
}