mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime: panic if cleanup function closes over cleanup pointer
This would catch problems like https://go.dev/cl/696295. Benchmark effect with this CL plus CL 697535: goos: linux goarch: amd64 pkg: runtime cpu: 12th Gen Intel(R) Core(TM) i7-1260P │ /tmp/foo.1 │ /tmp/foo.2 │ │ sec/op │ sec/op vs base │ AddCleanupAndStop-16 81.93n ± 1% 82.87n ± 1% +1.14% (p=0.041 n=10) For #75066 Change-Id: Ia41d0d6b88baebf6055cb7e5d42bc8210b31630f Reviewed-on: https://go-review.googlesource.com/c/go/+/714000 Reviewed-by: Michael Pratt <mpratt@google.com> Reviewed-by: Michael Knyszek <mknyszek@google.com> Auto-Submit: Ian Lance Taylor <iant@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
This commit is contained in:
parent
06412288cf
commit
eb63ef9d66
3 changed files with 68 additions and 3 deletions
|
|
@ -71,7 +71,14 @@ import (
|
|||
// mentions it. To ensure a cleanup does not get called prematurely,
|
||||
// pass the object to the [KeepAlive] function after the last point
|
||||
// where the object must remain reachable.
|
||||
//
|
||||
//go:nocheckptr
|
||||
func AddCleanup[T, S any](ptr *T, cleanup func(S), arg S) Cleanup {
|
||||
// This is marked nocheckptr because checkptr doesn't understand the
|
||||
// pointer manipulation done when looking at closure pointers.
|
||||
// Similar code in mbitmap.go works because the functions are
|
||||
// go:nosplit, which implies go:nocheckptr (CL 202158).
|
||||
|
||||
// Explicitly force ptr and cleanup to escape to the heap.
|
||||
ptr = abi.Escape(ptr)
|
||||
cleanup = abi.Escape(cleanup)
|
||||
|
|
@ -145,6 +152,23 @@ func AddCleanup[T, S any](ptr *T, cleanup func(S), arg S) Cleanup {
|
|||
}
|
||||
}
|
||||
|
||||
// Check that the cleanup function doesn't close over the pointer.
|
||||
cleanupFV := unsafe.Pointer(*(**funcval)(unsafe.Pointer(&cleanup)))
|
||||
cBase, cSpan, _ := findObject(uintptr(cleanupFV), 0, 0)
|
||||
if cBase != 0 {
|
||||
tp := cSpan.typePointersOfUnchecked(cBase)
|
||||
for {
|
||||
var addr uintptr
|
||||
if tp, addr = tp.next(cBase + cSpan.elemsize); addr == 0 {
|
||||
break
|
||||
}
|
||||
ptr := *(*uintptr)(unsafe.Pointer(addr))
|
||||
if ptr >= base && ptr < base+span.elemsize {
|
||||
panic("runtime.AddCleanup: cleanup function closes over ptr, cleanup will never run")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create another G if necessary.
|
||||
if gcCleanups.needG() {
|
||||
gcCleanups.createGs()
|
||||
|
|
|
|||
|
|
@ -337,7 +337,7 @@ func TestCleanupLost(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCleanupUnreachable(t *testing.T) {
|
||||
func TestCleanupUnreachablePointer(t *testing.T) {
|
||||
defer func() {
|
||||
if recover() == nil {
|
||||
t.Error("AddCleanup failed to detect self-pointer")
|
||||
|
|
@ -354,6 +354,24 @@ func TestCleanupUnreachable(t *testing.T) {
|
|||
}, &v.f)
|
||||
}
|
||||
|
||||
func TestCleanupUnreachableClosure(t *testing.T) {
|
||||
defer func() {
|
||||
if recover() == nil {
|
||||
t.Error("AddCleanup failed to detect closure pointer")
|
||||
}
|
||||
}()
|
||||
|
||||
type T struct {
|
||||
p *byte // use *byte to avoid tiny allocator
|
||||
f int
|
||||
}
|
||||
v := &T{}
|
||||
runtime.AddCleanup(v, func(int) {
|
||||
t.Log(v.f)
|
||||
t.Error("cleanup ran unexpectedly")
|
||||
}, 0)
|
||||
}
|
||||
|
||||
// BenchmarkAddCleanupAndStop benchmarks adding and removing a cleanup
|
||||
// from the same allocation.
|
||||
//
|
||||
|
|
|
|||
27
src/runtime/testdata/testprog/checkfinalizers.go
vendored
27
src/runtime/testdata/testprog/checkfinalizers.go
vendored
|
|
@ -28,17 +28,40 @@ func DetectFinalizerAndCleanupLeaks() {
|
|||
|
||||
// Leak a cleanup.
|
||||
cLeak := new(T)
|
||||
|
||||
// Use an extra closure to avoid the simple
|
||||
// checking done by AddCleanup.
|
||||
var closeOverCLeak func(int)
|
||||
closeOverCLeak = func(x int) {
|
||||
// Use recursion to avoid inlining.
|
||||
if x <= 0 {
|
||||
**cLeak = x
|
||||
} else {
|
||||
closeOverCLeak(x - 1)
|
||||
}
|
||||
}
|
||||
|
||||
runtime.AddCleanup(cLeak, func(x int) {
|
||||
**cLeak = x
|
||||
closeOverCLeak(x)
|
||||
}, int(0))
|
||||
|
||||
// Have a regular cleanup to make sure it doesn't trip the detector.
|
||||
cNoLeak := new(T)
|
||||
runtime.AddCleanup(cNoLeak, func(_ int) {}, int(0))
|
||||
|
||||
// Like closeOverCLeak.
|
||||
var closeOverCNoLeak func(int)
|
||||
closeOverCNoLeak = func(x int) {
|
||||
if x <= 0 {
|
||||
**cNoLeak = x
|
||||
} else {
|
||||
closeOverCNoLeak(x - 1)
|
||||
}
|
||||
}
|
||||
|
||||
// Add a cleanup that only temporarily leaks cNoLeak.
|
||||
runtime.AddCleanup(cNoLeak, func(x int) {
|
||||
**cNoLeak = x
|
||||
closeOverCNoLeak(x)
|
||||
}, int(0)).Stop()
|
||||
|
||||
if !asan.Enabled && !race.Enabled {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue