mirror of
https://github.com/golang/go.git
synced 2025-10-19 11:03:18 +00:00
runtime,net/http/pprof: goroutine leak detection by using the garbage collector
Proposal #74609
Change-Id: I97a754b128aac1bc5b7b9ab607fcd5bb390058c8
GitHub-Last-Rev: 60f2a192ba
GitHub-Pull-Request: golang/go#74622
Reviewed-on: https://go-review.googlesource.com/c/go/+/688335
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: t hepudds <thepudds1460@gmail.com>
Auto-Submit: Michael Knyszek <mknyszek@google.com>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
Reviewed-by: Carlos Amedee <carlos@golang.org>
This commit is contained in:
parent
84db201ae1
commit
8c68a1c1ab
95 changed files with 10766 additions and 89 deletions
|
@ -2450,6 +2450,9 @@ var blockedLinknames = map[string][]string{
|
|||
"sync_test.runtime_blockUntilEmptyCleanupQueue": {"sync_test"},
|
||||
"time.runtimeIsBubbled": {"time"},
|
||||
"unique.runtime_blockUntilEmptyCleanupQueue": {"unique"},
|
||||
// Experimental features
|
||||
"runtime.goroutineLeakGC": {"runtime/pprof"},
|
||||
"runtime.goroutineleakcount": {"runtime/pprof"},
|
||||
// Others
|
||||
"net.newWindowsFile": {"net"}, // pushed from os
|
||||
"testing/synctest.testingSynctestTest": {"testing/synctest"}, // pushed from testing
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
// Code generated by mkconsts.go. DO NOT EDIT.
|
||||
|
||||
//go:build !goexperiment.goroutineleakprofile
|
||||
|
||||
package goexperiment
|
||||
|
||||
const GoroutineLeakProfile = false
|
||||
const GoroutineLeakProfileInt = 0
|
8
src/internal/goexperiment/exp_goroutineleakprofile_on.go
Normal file
8
src/internal/goexperiment/exp_goroutineleakprofile_on.go
Normal file
|
@ -0,0 +1,8 @@
|
|||
// Code generated by mkconsts.go. DO NOT EDIT.
|
||||
|
||||
//go:build goexperiment.goroutineleakprofile
|
||||
|
||||
package goexperiment
|
||||
|
||||
const GoroutineLeakProfile = true
|
||||
const GoroutineLeakProfileInt = 1
|
|
@ -118,4 +118,7 @@ type Flags struct {
|
|||
|
||||
// SizeSpecializedMalloc enables malloc implementations that are specialized per size class.
|
||||
SizeSpecializedMalloc bool
|
||||
|
||||
// GoroutineLeakProfile enables the collection of goroutine leak profiles.
|
||||
GoroutineLeakProfile bool
|
||||
}
|
||||
|
|
|
@ -77,6 +77,7 @@ import (
|
|||
"fmt"
|
||||
"html"
|
||||
"internal/godebug"
|
||||
"internal/goexperiment"
|
||||
"internal/profile"
|
||||
"io"
|
||||
"log"
|
||||
|
@ -353,6 +354,7 @@ func collectProfile(p *pprof.Profile) (*profile.Profile, error) {
|
|||
var profileSupportsDelta = map[handler]bool{
|
||||
"allocs": true,
|
||||
"block": true,
|
||||
"goroutineleak": true,
|
||||
"goroutine": true,
|
||||
"heap": true,
|
||||
"mutex": true,
|
||||
|
@ -372,6 +374,12 @@ var profileDescriptions = map[string]string{
|
|||
"trace": "A trace of execution of the current program. You can specify the duration in the seconds GET parameter. After you get the trace file, use the go tool trace command to investigate the trace.",
|
||||
}
|
||||
|
||||
func init() {
|
||||
if goexperiment.GoroutineLeakProfile {
|
||||
profileDescriptions["goroutineleak"] = "Stack traces of all leaked goroutines. Use debug=2 as a query parameter to export in the same format as an unrecovered panic."
|
||||
}
|
||||
}
|
||||
|
||||
type profileEntry struct {
|
||||
Name string
|
||||
Href string
|
||||
|
|
|
@ -263,11 +263,11 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
|
|||
}
|
||||
// No stack splits between assigning elem and enqueuing mysg
|
||||
// on gp.waiting where copystack can find it.
|
||||
mysg.elem = ep
|
||||
mysg.elem.set(ep)
|
||||
mysg.waitlink = nil
|
||||
mysg.g = gp
|
||||
mysg.isSelect = false
|
||||
mysg.c = c
|
||||
mysg.c.set(c)
|
||||
gp.waiting = mysg
|
||||
gp.param = nil
|
||||
c.sendq.enqueue(mysg)
|
||||
|
@ -298,7 +298,7 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
|
|||
if mysg.releasetime > 0 {
|
||||
blockevent(mysg.releasetime-t0, 2)
|
||||
}
|
||||
mysg.c = nil
|
||||
mysg.c.set(nil)
|
||||
releaseSudog(mysg)
|
||||
if closed {
|
||||
if c.closed == 0 {
|
||||
|
@ -336,9 +336,9 @@ func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) {
|
|||
c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
|
||||
}
|
||||
}
|
||||
if sg.elem != nil {
|
||||
if sg.elem.get() != nil {
|
||||
sendDirect(c.elemtype, sg, ep)
|
||||
sg.elem = nil
|
||||
sg.elem.set(nil)
|
||||
}
|
||||
gp := sg.g
|
||||
unlockf()
|
||||
|
@ -395,7 +395,7 @@ func sendDirect(t *_type, sg *sudog, src unsafe.Pointer) {
|
|||
// Once we read sg.elem out of sg, it will no longer
|
||||
// be updated if the destination's stack gets copied (shrunk).
|
||||
// So make sure that no preemption points can happen between read & use.
|
||||
dst := sg.elem
|
||||
dst := sg.elem.get()
|
||||
typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.Size_)
|
||||
// No need for cgo write barrier checks because dst is always
|
||||
// Go memory.
|
||||
|
@ -406,7 +406,7 @@ func recvDirect(t *_type, sg *sudog, dst unsafe.Pointer) {
|
|||
// dst is on our stack or the heap, src is on another stack.
|
||||
// The channel is locked, so src will not move during this
|
||||
// operation.
|
||||
src := sg.elem
|
||||
src := sg.elem.get()
|
||||
typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.Size_)
|
||||
memmove(dst, src, t.Size_)
|
||||
}
|
||||
|
@ -441,9 +441,9 @@ func closechan(c *hchan) {
|
|||
if sg == nil {
|
||||
break
|
||||
}
|
||||
if sg.elem != nil {
|
||||
typedmemclr(c.elemtype, sg.elem)
|
||||
sg.elem = nil
|
||||
if sg.elem.get() != nil {
|
||||
typedmemclr(c.elemtype, sg.elem.get())
|
||||
sg.elem.set(nil)
|
||||
}
|
||||
if sg.releasetime != 0 {
|
||||
sg.releasetime = cputicks()
|
||||
|
@ -463,7 +463,7 @@ func closechan(c *hchan) {
|
|||
if sg == nil {
|
||||
break
|
||||
}
|
||||
sg.elem = nil
|
||||
sg.elem.set(nil)
|
||||
if sg.releasetime != 0 {
|
||||
sg.releasetime = cputicks()
|
||||
}
|
||||
|
@ -642,13 +642,13 @@ func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool)
|
|||
}
|
||||
// No stack splits between assigning elem and enqueuing mysg
|
||||
// on gp.waiting where copystack can find it.
|
||||
mysg.elem = ep
|
||||
mysg.elem.set(ep)
|
||||
mysg.waitlink = nil
|
||||
gp.waiting = mysg
|
||||
|
||||
mysg.g = gp
|
||||
mysg.isSelect = false
|
||||
mysg.c = c
|
||||
mysg.c.set(c)
|
||||
gp.param = nil
|
||||
c.recvq.enqueue(mysg)
|
||||
if c.timer != nil {
|
||||
|
@ -680,7 +680,7 @@ func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool)
|
|||
}
|
||||
success := mysg.success
|
||||
gp.param = nil
|
||||
mysg.c = nil
|
||||
mysg.c.set(nil)
|
||||
releaseSudog(mysg)
|
||||
return true, success
|
||||
}
|
||||
|
@ -727,14 +727,14 @@ func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) {
|
|||
typedmemmove(c.elemtype, ep, qp)
|
||||
}
|
||||
// copy data from sender to queue
|
||||
typedmemmove(c.elemtype, qp, sg.elem)
|
||||
typedmemmove(c.elemtype, qp, sg.elem.get())
|
||||
c.recvx++
|
||||
if c.recvx == c.dataqsiz {
|
||||
c.recvx = 0
|
||||
}
|
||||
c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
|
||||
}
|
||||
sg.elem = nil
|
||||
sg.elem.set(nil)
|
||||
gp := sg.g
|
||||
unlockf()
|
||||
gp.param = unsafe.Pointer(sg)
|
||||
|
|
|
@ -186,6 +186,22 @@ func buildTestProg(t *testing.T, binary string, flags ...string) (string, error)
|
|||
t.Logf("running %v", cmd)
|
||||
cmd.Dir = "testdata/" + binary
|
||||
cmd = testenv.CleanCmdEnv(cmd)
|
||||
|
||||
// If tests need any experimental flags, add them here.
|
||||
//
|
||||
// TODO(vsaioc): Remove `goroutineleakprofile` once the feature is no longer experimental.
|
||||
edited := false
|
||||
for i := range cmd.Env {
|
||||
e := cmd.Env[i]
|
||||
if _, vars, ok := strings.Cut(e, "GOEXPERIMENT="); ok {
|
||||
cmd.Env[i] = "GOEXPERIMENT=" + vars + ",goroutineleakprofile"
|
||||
edited, _ = true, vars
|
||||
}
|
||||
}
|
||||
if !edited {
|
||||
cmd.Env = append(cmd.Env, "GOEXPERIMENT=goroutineleakprofile")
|
||||
}
|
||||
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
target.err = fmt.Errorf("building %s %v: %v\n%s", binary, flags, err, out)
|
||||
|
|
595
src/runtime/goroutineleakprofile_test.go
Normal file
595
src/runtime/goroutineleakprofile_test.go
Normal file
|
@ -0,0 +1,595 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package runtime_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGoroutineLeakProfile(t *testing.T) {
|
||||
// Goroutine leak test case.
|
||||
//
|
||||
// Test cases can be configured with test name, the name of the entry point function,
|
||||
// a set of expected leaks identified by regular expressions, and the number of times
|
||||
// the test should be repeated.
|
||||
//
|
||||
// Repeated runs reduce flakiness in some tests.
|
||||
type testCase struct {
|
||||
name string
|
||||
simple bool
|
||||
repetitions int
|
||||
expectedLeaks map[*regexp.Regexp]bool
|
||||
|
||||
// flakyLeaks are goroutine leaks that are too flaky to be reliably detected.
|
||||
// Still, they might pop up every once in a while. The test will pass regardless
|
||||
// if they occur or nor, as they are not unexpected.
|
||||
//
|
||||
// Note that all flaky leaks are true positives, i.e. real goroutine leaks,
|
||||
// and it is only their detection that is unreliable due to scheduling
|
||||
// non-determinism.
|
||||
flakyLeaks map[*regexp.Regexp]struct{}
|
||||
}
|
||||
|
||||
// makeAnyTest is a short-hand for creating test cases.
|
||||
// Each of the leaks in the list is identified by a regular expression.
|
||||
// If a leak is flaky, it is added to the flakyLeaks map.
|
||||
makeAnyTest := func(name string, flaky bool, repetitions int, leaks ...string) testCase {
|
||||
tc := testCase{
|
||||
name: name,
|
||||
expectedLeaks: make(map[*regexp.Regexp]bool, len(leaks)),
|
||||
flakyLeaks: make(map[*regexp.Regexp]struct{}, len(leaks)),
|
||||
// Make sure the test is repeated at least once.
|
||||
repetitions: repetitions | 1,
|
||||
}
|
||||
|
||||
for _, leak := range leaks {
|
||||
if !flaky {
|
||||
tc.expectedLeaks[regexp.MustCompile(leak)] = false
|
||||
} else {
|
||||
tc.flakyLeaks[regexp.MustCompile(leak)] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
return tc
|
||||
}
|
||||
|
||||
// makeTest is a short-hand for creating non-flaky test cases.
|
||||
makeTest := func(name string, leaks ...string) testCase {
|
||||
tcase := makeAnyTest(name, false, 2, leaks...)
|
||||
tcase.simple = true
|
||||
return tcase
|
||||
}
|
||||
|
||||
// makeFlakyTest is a short-hand for creating flaky test cases.
|
||||
makeFlakyTest := func(name string, leaks ...string) testCase {
|
||||
if testing.Short() {
|
||||
return makeAnyTest(name, true, 2, leaks...)
|
||||
}
|
||||
return makeAnyTest(name, true, 10, leaks...)
|
||||
}
|
||||
|
||||
goroutineHeader := regexp.MustCompile(`goroutine \d+ \[`)
|
||||
|
||||
// extractLeaks takes the output of a test and splits it into a
|
||||
// list of strings denoting goroutine leaks.
|
||||
//
|
||||
// If the input is:
|
||||
//
|
||||
// goroutine 1 [wait reason (leaked)]:
|
||||
// main.leaked()
|
||||
// ./testdata/testgoroutineleakprofile/foo.go:37 +0x100
|
||||
// created by main.main()
|
||||
// ./testdata/testgoroutineleakprofile/main.go:10 +0x20
|
||||
//
|
||||
// goroutine 2 [wait reason (leaked)]:
|
||||
// main.leaked2()
|
||||
// ./testdata/testgoroutineleakprofile/foo.go:37 +0x100
|
||||
// created by main.main()
|
||||
// ./testdata/testgoroutineleakprofile/main.go:10 +0x20
|
||||
//
|
||||
// The output is (as a list of strings):
|
||||
//
|
||||
// leaked() [wait reason]
|
||||
// leaked2() [wait reason]
|
||||
extractLeaks := func(output string) []string {
|
||||
stacks := strings.Split(output, "\n\ngoroutine")
|
||||
var leaks []string
|
||||
for _, stack := range stacks {
|
||||
lines := strings.Split(stack, "\n")
|
||||
if len(lines) < 5 {
|
||||
// Expecting at least the following lines (where n=len(lines)-1):
|
||||
//
|
||||
// [0] goroutine n [wait reason (leaked)]
|
||||
// ...
|
||||
// [n-3] bottom.leak.frame(...)
|
||||
// [n-2] ./bottom/leak/frame/source.go:line
|
||||
// [n-1] created by go.instruction()
|
||||
// [n] ./go/instruction/source.go:line
|
||||
continue
|
||||
}
|
||||
|
||||
if !strings.Contains(lines[0], "(leaked)") {
|
||||
// Ignore non-leaked goroutines.
|
||||
continue
|
||||
}
|
||||
|
||||
// Get the wait reason from the goroutine header.
|
||||
header := lines[0]
|
||||
waitReason := goroutineHeader.ReplaceAllString(header, "[")
|
||||
waitReason = strings.ReplaceAll(waitReason, " (leaked)", "")
|
||||
|
||||
// Get the function name from the stack trace (should be two lines above `created by`).
|
||||
var funcName string
|
||||
for i := len(lines) - 1; i >= 0; i-- {
|
||||
if strings.Contains(lines[i], "created by") {
|
||||
funcName = strings.TrimPrefix(lines[i-2], "main.")
|
||||
break
|
||||
}
|
||||
}
|
||||
if funcName == "" {
|
||||
t.Fatalf("failed to extract function name from stack trace: %s", lines)
|
||||
}
|
||||
|
||||
leaks = append(leaks, funcName+" "+waitReason)
|
||||
}
|
||||
return leaks
|
||||
}
|
||||
|
||||
// Micro tests involve very simple leaks for each type of concurrency primitive operation.
|
||||
microTests := []testCase{
|
||||
makeTest("NilRecv",
|
||||
`NilRecv\.func1\(.* \[chan receive \(nil chan\)\]`,
|
||||
),
|
||||
makeTest("NilSend",
|
||||
`NilSend\.func1\(.* \[chan send \(nil chan\)\]`,
|
||||
),
|
||||
makeTest("SelectNoCases",
|
||||
`SelectNoCases\.func1\(.* \[select \(no cases\)\]`,
|
||||
),
|
||||
makeTest("ChanRecv",
|
||||
`ChanRecv\.func1\(.* \[chan receive\]`,
|
||||
),
|
||||
makeTest("ChanSend",
|
||||
`ChanSend\.func1\(.* \[chan send\]`,
|
||||
),
|
||||
makeTest("Select",
|
||||
`Select\.func1\(.* \[select\]`,
|
||||
),
|
||||
makeTest("WaitGroup",
|
||||
`WaitGroup\.func1\(.* \[sync\.WaitGroup\.Wait\]`,
|
||||
),
|
||||
makeTest("MutexStack",
|
||||
`MutexStack\.func1\(.* \[sync\.Mutex\.Lock\]`,
|
||||
),
|
||||
makeTest("MutexHeap",
|
||||
`MutexHeap\.func1.1\(.* \[sync\.Mutex\.Lock\]`,
|
||||
),
|
||||
makeTest("Cond",
|
||||
`Cond\.func1\(.* \[sync\.Cond\.Wait\]`,
|
||||
),
|
||||
makeTest("RWMutexRLock",
|
||||
`RWMutexRLock\.func1\(.* \[sync\.RWMutex\.RLock\]`,
|
||||
),
|
||||
makeTest("RWMutexLock",
|
||||
`RWMutexLock\.func1\(.* \[sync\.(RW)?Mutex\.Lock\]`,
|
||||
),
|
||||
makeTest("Mixed",
|
||||
`Mixed\.func1\(.* \[sync\.WaitGroup\.Wait\]`,
|
||||
`Mixed\.func1.1\(.* \[chan send\]`,
|
||||
),
|
||||
makeTest("NoLeakGlobal"),
|
||||
}
|
||||
|
||||
// Stress tests are flaky and we do not strictly care about their output.
|
||||
// They are only intended to stress the goroutine leak detector and profiling
|
||||
// infrastructure in interesting ways.
|
||||
stressTestCases := []testCase{
|
||||
makeFlakyTest("SpawnGC",
|
||||
`spawnGC.func1\(.* \[chan receive\]`,
|
||||
),
|
||||
makeTest("DaisyChain"),
|
||||
}
|
||||
|
||||
// Common goroutine leak patterns.
|
||||
// Extracted from "Unveiling and Vanquishing Goroutine Leaks in Enterprise Microservices: A Dynamic Analysis Approach"
|
||||
// doi:10.1109/CGO57630.2024.10444835
|
||||
patternTestCases := []testCase{
|
||||
makeTest("NoCloseRange",
|
||||
`noCloseRange\(.* \[chan send\]`,
|
||||
`noCloseRange\.func1\(.* \[chan receive\]`,
|
||||
),
|
||||
makeTest("MethodContractViolation",
|
||||
`worker\.Start\.func1\(.* \[select\]`,
|
||||
),
|
||||
makeTest("DoubleSend",
|
||||
`DoubleSend\.func3\(.* \[chan send\]`,
|
||||
),
|
||||
makeTest("EarlyReturn",
|
||||
`earlyReturn\.func1\(.* \[chan send\]`,
|
||||
),
|
||||
makeTest("NCastLeak",
|
||||
`nCastLeak\.func1\(.* \[chan send\]`,
|
||||
`NCastLeak\.func2\(.* \[chan receive\]`,
|
||||
),
|
||||
makeTest("Timeout",
|
||||
// (vsaioc): Timeout is *theoretically* flaky, but the
|
||||
// pseudo-random choice for select case branches makes it
|
||||
// practically impossible for it to fail.
|
||||
`timeout\.func1\(.* \[chan send\]`,
|
||||
),
|
||||
}
|
||||
|
||||
// GoKer tests from "GoBench: A Benchmark Suite of Real-World Go Concurrency Bugs".
|
||||
// Refer to testdata/testgoroutineleakprofile/goker/README.md.
|
||||
//
|
||||
// This list is curated for tests that are not excessively flaky.
|
||||
// Some tests are also excluded because they are redundant.
|
||||
//
|
||||
// TODO(vsaioc): Some of these might be removable (their patterns may overlap).
|
||||
gokerTestCases := []testCase{
|
||||
makeFlakyTest("Cockroach584",
|
||||
`Cockroach584\.func2\(.* \[sync\.Mutex\.Lock\]`,
|
||||
),
|
||||
makeFlakyTest("Cockroach1055",
|
||||
`Cockroach1055\.func2\(.* \[chan receive\]`,
|
||||
`Cockroach1055\.func2\.2\(.* \[sync\.WaitGroup\.Wait\]`,
|
||||
`Cockroach1055\.func2\.1\(.* \[chan receive\]`,
|
||||
`Cockroach1055\.func2\.1\(.* \[sync\.Mutex\.Lock\]`,
|
||||
),
|
||||
makeFlakyTest("Cockroach1462",
|
||||
`\(\*Stopper_cockroach1462\)\.RunWorker\.func1\(.* \[chan send\]`,
|
||||
`Cockroach1462\.func2\(.* \[sync\.WaitGroup\.Wait\]`,
|
||||
),
|
||||
makeFlakyTest("Cockroach2448",
|
||||
`\(\*Store_cockroach2448\)\.processRaft\(.* \[select\]`,
|
||||
`\(\*state_cockroach2448\)\.start\(.* \[select\]`,
|
||||
),
|
||||
makeFlakyTest("Cockroach3710",
|
||||
`\(\*Store_cockroach3710\)\.ForceRaftLogScanAndProcess\(.* \[sync\.RWMutex\.RLock\]`,
|
||||
`\(\*Store_cockroach3710\)\.processRaft\.func1\(.* \[sync\.RWMutex\.Lock\]`,
|
||||
),
|
||||
makeFlakyTest("Cockroach6181",
|
||||
`testRangeCacheCoalescedRequests_cockroach6181\(.* \[sync\.WaitGroup\.Wait\]`,
|
||||
`testRangeCacheCoalescedRequests_cockroach6181\.func1\.1\(.* \[sync\.(RW)?Mutex\.Lock\]`,
|
||||
`testRangeCacheCoalescedRequests_cockroach6181\.func1\.1\(.* \[sync\.RWMutex\.RLock\]`,
|
||||
),
|
||||
makeTest("Cockroach7504",
|
||||
`Cockroach7504\.func2\.1.* \[sync\.Mutex\.Lock\]`,
|
||||
`Cockroach7504\.func2\.2.* \[sync\.Mutex\.Lock\]`,
|
||||
),
|
||||
makeFlakyTest("Cockroach9935",
|
||||
`\(\*loggingT_cockroach9935\)\.outputLogEntry\(.* \[sync\.Mutex\.Lock\]`,
|
||||
),
|
||||
makeFlakyTest("Cockroach10214",
|
||||
`\(*Store_cockroach10214\)\.sendQueuedHeartbeats\(.* \[sync\.Mutex\.Lock\]`,
|
||||
`\(*Replica_cockroach10214\)\.tick\(.* \[sync\.Mutex\.Lock\]`,
|
||||
),
|
||||
makeFlakyTest("Cockroach10790",
|
||||
`\(\*Replica_cockroach10790\)\.beginCmds\.func1\(.* \[chan receive\]`,
|
||||
),
|
||||
makeTest("Cockroach13197",
|
||||
`\(\*Tx_cockroach13197\)\.awaitDone\(.* \[chan receive\]`,
|
||||
),
|
||||
makeTest("Cockroach13755",
|
||||
`\(\*Rows_cockroach13755\)\.awaitDone\(.* \[chan receive\]`,
|
||||
),
|
||||
makeFlakyTest("Cockroach16167",
|
||||
`Cockroach16167\.func2\(.* \[sync\.RWMutex\.RLock\]`,
|
||||
`\(\*Executor_cockroach16167\)\.Start\(.* \[sync\.RWMutex\.Lock\]`,
|
||||
),
|
||||
makeFlakyTest("Cockroach18101",
|
||||
`restore_cockroach18101\.func1\(.* \[chan send\]`,
|
||||
),
|
||||
makeTest("Cockroach24808",
|
||||
`Cockroach24808\.func2\(.* \[chan send\]`,
|
||||
),
|
||||
makeTest("Cockroach25456",
|
||||
`Cockroach25456\.func2\(.* \[chan receive\]`,
|
||||
),
|
||||
makeTest("Cockroach35073",
|
||||
`Cockroach35073\.func2.1\(.* \[chan send\]`,
|
||||
`Cockroach35073\.func2\(.* \[chan send\]`,
|
||||
),
|
||||
makeTest("Cockroach35931",
|
||||
`Cockroach35931\.func2\(.* \[chan send\]`,
|
||||
),
|
||||
makeTest("Etcd5509",
|
||||
`Etcd5509\.func2\(.* \[sync\.RWMutex\.Lock\]`,
|
||||
),
|
||||
makeTest("Etcd6708",
|
||||
`Etcd6708\.func2\(.* \[sync\.RWMutex\.RLock\]`,
|
||||
),
|
||||
makeFlakyTest("Etcd6857",
|
||||
`\(\*node_etcd6857\)\.Status\(.* \[chan send\]`,
|
||||
),
|
||||
makeFlakyTest("Etcd6873",
|
||||
`\(\*watchBroadcasts_etcd6873\)\.stop\(.* \[chan receive\]`,
|
||||
`newWatchBroadcasts_etcd6873\.func1\(.* \[sync\.Mutex\.Lock\]`,
|
||||
),
|
||||
makeFlakyTest("Etcd7492",
|
||||
`Etcd7492\.func2\(.* \[sync\.WaitGroup\.Wait\]`,
|
||||
`Etcd7492\.func2\.1\(.* \[chan send\]`,
|
||||
`\(\*simpleTokenTTLKeeper_etcd7492\)\.run\(.* \[sync\.Mutex\.Lock\]`,
|
||||
),
|
||||
makeFlakyTest("Etcd7902",
|
||||
`doRounds_etcd7902\.func1\(.* \[chan receive\]`,
|
||||
`doRounds_etcd7902\.func1\(.* \[sync\.Mutex\.Lock\]`,
|
||||
`runElectionFunc_etcd7902\(.* \[sync\.WaitGroup\.Wait\]`,
|
||||
),
|
||||
makeTest("Etcd10492",
|
||||
`Etcd10492\.func2\(.* \[sync\.Mutex\.Lock\]`,
|
||||
),
|
||||
makeTest("Grpc660",
|
||||
`\(\*benchmarkClient_grpc660\)\.doCloseLoopUnary\.func1\(.* \[chan send\]`,
|
||||
),
|
||||
makeFlakyTest("Grpc795",
|
||||
`\(\*Server_grpc795\)\.Serve\(.* \[sync\.Mutex\.Lock\]`,
|
||||
`testServerGracefulStopIdempotent_grpc795\(.* \[sync\.Mutex\.Lock\]`,
|
||||
),
|
||||
makeTest("Grpc862",
|
||||
`DialContext_grpc862\.func2\(.* \[chan receive\]`),
|
||||
makeTest("Grpc1275",
|
||||
`testInflightStreamClosing_grpc1275\.func1\(.* \[chan receive\]`),
|
||||
makeTest("Grpc1424",
|
||||
`DialContext_grpc1424\.func1\(.* \[chan receive\]`),
|
||||
makeFlakyTest("Grpc1460",
|
||||
`\(\*http2Client_grpc1460\)\.keepalive\(.* \[chan receive\]`,
|
||||
`\(\*http2Client_grpc1460\)\.NewStream\(.* \[sync\.Mutex\.Lock\]`,
|
||||
),
|
||||
makeFlakyTest("Grpc3017",
|
||||
// grpc/3017 involves a goroutine leak that also simultaneously engages many GC assists.
|
||||
`Grpc3017\.func2\(.* \[chan receive\]`,
|
||||
`Grpc3017\.func2\.1\(.* \[sync\.Mutex\.Lock\]`,
|
||||
`\(\*lbCacheClientConn_grpc3017\)\.RemoveSubConn\.func1\(.* \[sync\.Mutex\.Lock\]`,
|
||||
),
|
||||
makeFlakyTest("Hugo3251",
|
||||
`Hugo3251\.func2\(.* \[sync\.WaitGroup\.Wait\]`,
|
||||
`Hugo3251\.func2\.1\(.* \[sync\.Mutex\.Lock\]`,
|
||||
`Hugo3251\.func2\.1\(.* \[sync\.RWMutex\.RLock\]`,
|
||||
),
|
||||
makeFlakyTest("Hugo5379",
|
||||
`\(\*Page_hugo5379\)\.initContent\.func1\.1\(.* \[sync\.Mutex\.Lock\]`,
|
||||
`pageRenderer_hugo5379\(.* \[sync\.Mutex\.Lock\]`,
|
||||
`Hugo5379\.func2\(.* \[sync\.WaitGroup\.Wait\]`,
|
||||
),
|
||||
makeFlakyTest("Istio16224",
|
||||
`Istio16224\.func2\(.* \[sync\.Mutex\.Lock\]`,
|
||||
`\(\*controller_istio16224\)\.Run\(.* \[chan send\]`,
|
||||
`\(\*controller_istio16224\)\.Run\(.* \[chan receive\]`,
|
||||
),
|
||||
makeFlakyTest("Istio17860",
|
||||
`\(\*agent_istio17860\)\.runWait\(.* \[chan send\]`,
|
||||
),
|
||||
makeFlakyTest("Istio18454",
|
||||
`\(\*Worker_istio18454\)\.Start\.func1\(.* \[chan receive\]`,
|
||||
`\(\*Worker_istio18454\)\.Start\.func1\(.* \[chan send\]`,
|
||||
),
|
||||
// NOTE(vsaioc):
|
||||
// Kubernetes/1321 is excluded due to a race condition in the original program
|
||||
// that may, in extremely rare cases, lead to nil pointer dereference crashes.
|
||||
// (Reproducible even with regular GC). Only kept here for posterity.
|
||||
//
|
||||
// makeTest(testCase{name: "Kubernetes1321"},
|
||||
// `NewMux_kubernetes1321\.gowrap1\(.* \[chan send\]`,
|
||||
// `testMuxWatcherClose_kubernetes1321\(.* \[sync\.Mutex\.Lock\]`),
|
||||
makeTest("Kubernetes5316",
|
||||
`finishRequest_kubernetes5316\.func1\(.* \[chan send\]`,
|
||||
),
|
||||
makeFlakyTest("Kubernetes6632",
|
||||
`\(\*idleAwareFramer_kubernetes6632\)\.monitor\(.* \[sync\.Mutex\.Lock\]`,
|
||||
`\(\*idleAwareFramer_kubernetes6632\)\.WriteFrame\(.* \[chan send\]`,
|
||||
),
|
||||
makeFlakyTest("Kubernetes10182",
|
||||
`\(\*statusManager_kubernetes10182\)\.Start\.func1\(.* \[sync\.Mutex\.Lock\]`,
|
||||
`\(\*statusManager_kubernetes10182\)\.SetPodStatus\(.* \[chan send\]`,
|
||||
),
|
||||
makeFlakyTest("Kubernetes11298",
|
||||
`After_kubernetes11298\.func1\(.* \[chan receive\]`,
|
||||
`After_kubernetes11298\.func1\(.* \[sync\.Cond\.Wait\]`,
|
||||
`Kubernetes11298\.func2\(.* \[chan receive\]`,
|
||||
),
|
||||
makeFlakyTest("Kubernetes13135",
|
||||
`Util_kubernetes13135\(.* \[sync\.Mutex\.Lock\]`,
|
||||
`\(\*WatchCache_kubernetes13135\)\.Add\(.* \[sync\.Mutex\.Lock\]`,
|
||||
),
|
||||
makeTest("Kubernetes25331",
|
||||
`\(\*watchChan_kubernetes25331\)\.run\(.* \[chan send\]`,
|
||||
),
|
||||
makeFlakyTest("Kubernetes26980",
|
||||
`Kubernetes26980\.func2\(.* \[chan receive\]`,
|
||||
`Kubernetes26980\.func2\.1\(.* \[sync\.Mutex\.Lock\]`,
|
||||
`\(\*processorListener_kubernetes26980\)\.pop\(.* \[chan receive\]`,
|
||||
),
|
||||
makeFlakyTest("Kubernetes30872",
|
||||
`\(\*DelayingDeliverer_kubernetes30872\)\.StartWithHandler\.func1\(.* \[sync\.Mutex\.Lock\]`,
|
||||
`\(\*Controller_kubernetes30872\)\.Run\(.* \[sync\.Mutex\.Lock\]`,
|
||||
`\(\*NamespaceController_kubernetes30872\)\.Run\.func1\(.* \[sync\.Mutex\.Lock\]`,
|
||||
),
|
||||
makeTest("Kubernetes38669",
|
||||
`\(\*cacheWatcher_kubernetes38669\)\.process\(.* \[chan send\]`,
|
||||
),
|
||||
makeFlakyTest("Kubernetes58107",
|
||||
`\(\*ResourceQuotaController_kubernetes58107\)\.worker\(.* \[sync\.Cond\.Wait\]`,
|
||||
`\(\*ResourceQuotaController_kubernetes58107\)\.worker\(.* \[sync\.RWMutex\.RLock\]`,
|
||||
`\(\*ResourceQuotaController_kubernetes58107\)\.Sync\(.* \[sync\.RWMutex\.Lock\]`,
|
||||
),
|
||||
makeFlakyTest("Kubernetes62464",
|
||||
`\(\*manager_kubernetes62464\)\.reconcileState\(.* \[sync\.RWMutex\.RLock\]`,
|
||||
`\(\*staticPolicy_kubernetes62464\)\.RemoveContainer\(.* \[sync\.(RW)?Mutex\.Lock\]`,
|
||||
),
|
||||
makeFlakyTest("Kubernetes70277",
|
||||
`Kubernetes70277\.func2\(.* \[chan receive\]`,
|
||||
),
|
||||
makeFlakyTest("Moby4951",
|
||||
`\(\*DeviceSet_moby4951\)\.DeleteDevice\(.* \[sync\.Mutex\.Lock\]`,
|
||||
),
|
||||
makeTest("Moby7559",
|
||||
`\(\*UDPProxy_moby7559\)\.Run\(.* \[sync\.Mutex\.Lock\]`,
|
||||
),
|
||||
makeTest("Moby17176",
|
||||
`testDevmapperLockReleasedDeviceDeletion_moby17176\.func1\(.* \[sync\.Mutex\.Lock\]`,
|
||||
),
|
||||
makeFlakyTest("Moby21233",
|
||||
`\(\*Transfer_moby21233\)\.Watch\.func1\(.* \[chan send\]`,
|
||||
`\(\*Transfer_moby21233\)\.Watch\.func1\(.* \[select\]`,
|
||||
`testTransfer_moby21233\(.* \[chan receive\]`,
|
||||
),
|
||||
makeTest("Moby25348",
|
||||
`\(\*Manager_moby25348\)\.init\(.* \[sync\.WaitGroup\.Wait\]`,
|
||||
),
|
||||
makeFlakyTest("Moby27782",
|
||||
`\(\*JSONFileLogger_moby27782\)\.readLogs\(.* \[sync\.Cond\.Wait\]`,
|
||||
`\(\*Watcher_moby27782\)\.readEvents\(.* \[select\]`,
|
||||
),
|
||||
makeFlakyTest("Moby28462",
|
||||
`monitor_moby28462\(.* \[sync\.Mutex\.Lock\]`,
|
||||
`\(\*Daemon_moby28462\)\.StateChanged\(.* \[chan send\]`,
|
||||
),
|
||||
makeTest("Moby30408",
|
||||
`Moby30408\.func2\(.* \[chan receive\]`,
|
||||
`testActive_moby30408\.func1\(.* \[sync\.Cond\.Wait\]`,
|
||||
),
|
||||
makeFlakyTest("Moby33781",
|
||||
`monitor_moby33781\.func1\(.* \[chan send\]`,
|
||||
),
|
||||
makeFlakyTest("Moby36114",
|
||||
`\(\*serviceVM_moby36114\)\.hotAddVHDsAtStart\(.* \[sync\.Mutex\.Lock\]`,
|
||||
),
|
||||
makeFlakyTest("Serving2137",
|
||||
`\(\*Breaker_serving2137\)\.concurrentRequest\.func1\(.* \[chan send\]`,
|
||||
`\(\*Breaker_serving2137\)\.concurrentRequest\.func1\(.* \[sync\.Mutex\.Lock\]`,
|
||||
`Serving2137\.func2\(.* \[chan receive\]`,
|
||||
),
|
||||
makeTest("Syncthing4829",
|
||||
`Syncthing4829\.func2\(.* \[sync\.RWMutex\.RLock\]`,
|
||||
),
|
||||
makeTest("Syncthing5795",
|
||||
`\(\*rawConnection_syncthing5795\)\.dispatcherLoop\(.* \[chan receive\]`,
|
||||
`Syncthing5795\.func2.* \[chan receive\]`,
|
||||
),
|
||||
}
|
||||
|
||||
// Combine all test cases into a single list.
|
||||
testCases := append(microTests, stressTestCases...)
|
||||
testCases = append(testCases, patternTestCases...)
|
||||
|
||||
// Test cases must not panic or cause fatal exceptions.
|
||||
failStates := regexp.MustCompile(`fatal|panic`)
|
||||
|
||||
testApp := func(exepath string, testCases []testCase) {
|
||||
|
||||
// Build the test program once.
|
||||
exe, err := buildTestProg(t, exepath)
|
||||
if err != nil {
|
||||
t.Fatal(fmt.Sprintf("building testgoroutineleakprofile failed: %v", err))
|
||||
}
|
||||
|
||||
for _, tcase := range testCases {
|
||||
t.Run(tcase.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cmdEnv := []string{
|
||||
"GODEBUG=asyncpreemptoff=1",
|
||||
"GOEXPERIMENT=greenteagc,goroutineleakprofile",
|
||||
}
|
||||
|
||||
if tcase.simple {
|
||||
// If the test is simple, set GOMAXPROCS=1 in order to better
|
||||
// control the behavior of the scheduler.
|
||||
cmdEnv = append(cmdEnv, "GOMAXPROCS=1")
|
||||
}
|
||||
|
||||
var output string
|
||||
for i := 0; i < tcase.repetitions; i++ {
|
||||
// Run program for one repetition and get runOutput trace.
|
||||
runOutput := runBuiltTestProg(t, exe, tcase.name, cmdEnv...)
|
||||
if len(runOutput) == 0 {
|
||||
t.Errorf("Test %s produced no output. Is the goroutine leak profile collected?", tcase.name)
|
||||
}
|
||||
|
||||
// Zero tolerance policy for fatal exceptions or panics.
|
||||
if failStates.MatchString(runOutput) {
|
||||
t.Errorf("unexpected fatal exception or panic!\noutput:\n%s\n\n", runOutput)
|
||||
}
|
||||
|
||||
output += runOutput + "\n\n"
|
||||
}
|
||||
|
||||
// Extract all the goroutine leaks
|
||||
foundLeaks := extractLeaks(output)
|
||||
|
||||
// If the test case was not expected to produce leaks, but some were reported,
|
||||
// stop the test immediately. Zero tolerance policy for false positives.
|
||||
if len(tcase.expectedLeaks)+len(tcase.flakyLeaks) == 0 && len(foundLeaks) > 0 {
|
||||
t.Errorf("output:\n%s\n\ngoroutines leaks detected in case with no leaks", output)
|
||||
}
|
||||
|
||||
unexpectedLeaks := make([]string, 0, len(foundLeaks))
|
||||
|
||||
// Parse every leak and check if it is expected (maybe as a flaky leak).
|
||||
LEAKS:
|
||||
for _, leak := range foundLeaks {
|
||||
// Check if the leak is expected.
|
||||
// If it is, check whether it has been encountered before.
|
||||
var foundNew bool
|
||||
var leakPattern *regexp.Regexp
|
||||
|
||||
for expectedLeak, ok := range tcase.expectedLeaks {
|
||||
if expectedLeak.MatchString(leak) {
|
||||
if !ok {
|
||||
foundNew = true
|
||||
}
|
||||
|
||||
leakPattern = expectedLeak
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if foundNew {
|
||||
// Only bother writing if we found a new leak.
|
||||
tcase.expectedLeaks[leakPattern] = true
|
||||
}
|
||||
|
||||
if leakPattern == nil {
|
||||
// We are dealing with a leak not marked as expected.
|
||||
// Check if it is a flaky leak.
|
||||
for flakyLeak := range tcase.flakyLeaks {
|
||||
if flakyLeak.MatchString(leak) {
|
||||
// The leak is flaky. Carry on to the next line.
|
||||
continue LEAKS
|
||||
}
|
||||
}
|
||||
|
||||
unexpectedLeaks = append(unexpectedLeaks, leak)
|
||||
}
|
||||
}
|
||||
|
||||
missingLeakStrs := make([]string, 0, len(tcase.expectedLeaks))
|
||||
for expectedLeak, found := range tcase.expectedLeaks {
|
||||
if !found {
|
||||
missingLeakStrs = append(missingLeakStrs, expectedLeak.String())
|
||||
}
|
||||
}
|
||||
|
||||
var errors []error
|
||||
if len(unexpectedLeaks) > 0 {
|
||||
errors = append(errors, fmt.Errorf("unexpected goroutine leaks:\n%s\n", strings.Join(unexpectedLeaks, "\n")))
|
||||
}
|
||||
if len(missingLeakStrs) > 0 {
|
||||
errors = append(errors, fmt.Errorf("missing expected leaks:\n%s\n", strings.Join(missingLeakStrs, ", ")))
|
||||
}
|
||||
if len(errors) > 0 {
|
||||
t.Fatalf("Failed with the following errors:\n%s\n\noutput:\n%s", errors, output)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
testApp("testgoroutineleakprofile", testCases)
|
||||
testApp("testgoroutineleakprofile/goker", gokerTestCases)
|
||||
}
|
|
@ -1267,6 +1267,28 @@ func markBitsForSpan(base uintptr) (mbits markBits) {
|
|||
return mbits
|
||||
}
|
||||
|
||||
// isMarkedOrNotInHeap returns true if a pointer is in the heap and marked,
|
||||
// or if the pointer is not in the heap. Used by goroutine leak detection
|
||||
// to determine if concurrency resources are reachable in memory.
|
||||
func isMarkedOrNotInHeap(p unsafe.Pointer) bool {
|
||||
obj, span, objIndex := findObject(uintptr(p), 0, 0)
|
||||
if obj != 0 {
|
||||
mbits := span.markBitsForIndex(objIndex)
|
||||
return mbits.isMarked()
|
||||
}
|
||||
|
||||
// If we fall through to get here, the object is not in the heap.
|
||||
// In this case, it is either a pointer to a stack object or a global resource.
|
||||
// Treat it as reachable in memory by default, to be safe.
|
||||
//
|
||||
// TODO(vsaioc): we could be more precise by checking against the stacks
|
||||
// of runnable goroutines. I don't think this is necessary, based on what we've seen, but
|
||||
// let's keep the option open in case the runtime evolves.
|
||||
// This will (naively) lead to quadratic blow-up for goroutine leak detection,
|
||||
// but if it is only run on demand, maybe the extra cost is not a show-stopper.
|
||||
return true
|
||||
}
|
||||
|
||||
// advance advances the markBits to the next object in the span.
|
||||
func (m *markBits) advance() {
|
||||
if m.mask == 1<<7 {
|
||||
|
|
|
@ -376,8 +376,8 @@ type workType struct {
|
|||
// (and thus 8-byte alignment even on 32-bit architectures).
|
||||
bytesMarked uint64
|
||||
|
||||
markrootNext uint32 // next markroot job
|
||||
markrootJobs uint32 // number of markroot jobs
|
||||
markrootNext atomic.Uint32 // next markroot job
|
||||
markrootJobs atomic.Uint32 // number of markroot jobs
|
||||
|
||||
nproc uint32
|
||||
tstart int64
|
||||
|
@ -385,17 +385,44 @@ type workType struct {
|
|||
|
||||
// Number of roots of various root types. Set by gcPrepareMarkRoots.
|
||||
//
|
||||
// nStackRoots == len(stackRoots), but we have nStackRoots for
|
||||
// consistency.
|
||||
nDataRoots, nBSSRoots, nSpanRoots, nStackRoots int
|
||||
// During normal GC cycle, nStackRoots == nMaybeRunnableStackRoots == len(stackRoots);
|
||||
// during goroutine leak detection, nMaybeRunnableStackRoots is the number of stackRoots
|
||||
// scheduled for marking.
|
||||
// In both variants, nStackRoots == len(stackRoots).
|
||||
nDataRoots, nBSSRoots, nSpanRoots, nStackRoots, nMaybeRunnableStackRoots int
|
||||
|
||||
// The following fields monitor the GC phase of the current cycle during
|
||||
// goroutine leak detection.
|
||||
goroutineLeak struct {
|
||||
// Once set, it indicates that the GC will perform goroutine leak detection during
|
||||
// the next GC cycle; it is set by goroutineLeakGC and unset during gcStart.
|
||||
pending atomic.Bool
|
||||
// Once set, it indicates that the GC has started a goroutine leak detection run;
|
||||
// it is set during gcStart and unset during gcMarkTermination;
|
||||
//
|
||||
// Protected by STW.
|
||||
enabled bool
|
||||
// Once set, it indicates that the GC has performed goroutine leak detection during
|
||||
// the current GC cycle; it is set during gcMarkDone, right after goroutine leak detection,
|
||||
// and unset during gcMarkTermination;
|
||||
//
|
||||
// Protected by STW.
|
||||
done bool
|
||||
// The number of leaked goroutines during the last leak detection GC cycle.
|
||||
//
|
||||
// Write-protected by STW in findGoroutineLeaks.
|
||||
count int
|
||||
}
|
||||
|
||||
// Base indexes of each root type. Set by gcPrepareMarkRoots.
|
||||
baseData, baseBSS, baseSpans, baseStacks, baseEnd uint32
|
||||
|
||||
// stackRoots is a snapshot of all of the Gs that existed
|
||||
// before the beginning of concurrent marking. The backing
|
||||
// store of this must not be modified because it might be
|
||||
// shared with allgs.
|
||||
// stackRoots is a snapshot of all of the Gs that existed before the
|
||||
// beginning of concurrent marking. During goroutine leak detection, stackRoots
|
||||
// is partitioned into two sets; to the left of nMaybeRunnableStackRoots are stackRoots
|
||||
// of running / runnable goroutines and to the right of nMaybeRunnableStackRoots are
|
||||
// stackRoots of unmarked / not runnable goroutines
|
||||
// The stackRoots array is re-partitioned after each marking phase iteration.
|
||||
stackRoots []*g
|
||||
|
||||
// Each type of GC state transition is protected by a lock.
|
||||
|
@ -562,6 +589,55 @@ func GC() {
|
|||
releasem(mp)
|
||||
}
|
||||
|
||||
// goroutineLeakGC runs a GC cycle that performs goroutine leak detection.
|
||||
//
|
||||
//go:linkname goroutineLeakGC runtime/pprof.runtime_goroutineLeakGC
|
||||
func goroutineLeakGC() {
|
||||
// Set the pending flag to true, instructing the next GC cycle to
|
||||
// perform goroutine leak detection.
|
||||
work.goroutineLeak.pending.Store(true)
|
||||
|
||||
// Spin GC cycles until the pending flag is unset.
|
||||
// This ensures that goroutineLeakGC waits for a GC cycle that
|
||||
// actually performs goroutine leak detection.
|
||||
//
|
||||
// This is needed in case multiple concurrent calls to GC
|
||||
// are simultaneously fired by the system, wherein some
|
||||
// of them are dropped.
|
||||
//
|
||||
// In the vast majority of cases, only one loop iteration is needed;
|
||||
// however, multiple concurrent calls to goroutineLeakGC could lead to
|
||||
// the execution of additional GC cycles.
|
||||
//
|
||||
// Examples:
|
||||
//
|
||||
// pending? | G1 | G2
|
||||
// ---------|-------------------------|-----------------------
|
||||
// - | goroutineLeakGC() | goroutineLeakGC()
|
||||
// - | pending.Store(true) | .
|
||||
// X | for pending.Load() | .
|
||||
// X | GC() | .
|
||||
// X | > gcStart() | .
|
||||
// X | pending.Store(false) | .
|
||||
// ...
|
||||
// - | > gcMarkDone() | .
|
||||
// - | . | pending.Store(true)
|
||||
// ...
|
||||
// X | > gcMarkTermination() | .
|
||||
// X | ...
|
||||
// X | < GC returns | .
|
||||
// X | for pending.Load | .
|
||||
// X | GC() | .
|
||||
// X | . | for pending.Load()
|
||||
// X | . | GC()
|
||||
// ...
|
||||
// The first to pick up the pending flag will start a
|
||||
// leak detection cycle.
|
||||
for work.goroutineLeak.pending.Load() {
|
||||
GC()
|
||||
}
|
||||
}
|
||||
|
||||
// gcWaitOnMark blocks until GC finishes the Nth mark phase. If GC has
|
||||
// already completed this mark phase, it returns immediately.
|
||||
func gcWaitOnMark(n uint32) {
|
||||
|
@ -785,6 +861,15 @@ func gcStart(trigger gcTrigger) {
|
|||
schedEnableUser(false)
|
||||
}
|
||||
|
||||
// If goroutine leak detection is pending, enable it for this GC cycle.
|
||||
if work.goroutineLeak.pending.Load() {
|
||||
work.goroutineLeak.enabled = true
|
||||
work.goroutineLeak.pending.Store(false)
|
||||
// Set all sync objects of blocked goroutines as untraceable
|
||||
// by the GC. Only set as traceable at the end of the GC cycle.
|
||||
setSyncObjectsUntraceable()
|
||||
}
|
||||
|
||||
// Enter concurrent mark phase and enable
|
||||
// write barriers.
|
||||
//
|
||||
|
@ -995,8 +1080,20 @@ top:
|
|||
}
|
||||
}
|
||||
})
|
||||
if restart {
|
||||
gcDebugMarkDone.restartedDueTo27993 = true
|
||||
|
||||
// Check whether we need to resume the marking phase because of issue #27993
|
||||
// or because of goroutine leak detection.
|
||||
if restart || (work.goroutineLeak.enabled && !work.goroutineLeak.done) {
|
||||
if restart {
|
||||
// Restart because of issue #27993.
|
||||
gcDebugMarkDone.restartedDueTo27993 = true
|
||||
} else {
|
||||
// Marking has reached a fixed-point. Attempt to detect goroutine leaks.
|
||||
//
|
||||
// If the returned value is true, then detection already concluded for this cycle.
|
||||
// Otherwise, more runnable goroutines were discovered, requiring additional mark work.
|
||||
work.goroutineLeak.done = findGoroutineLeaks()
|
||||
}
|
||||
|
||||
getg().m.preemptoff = ""
|
||||
systemstack(func() {
|
||||
|
@ -1047,6 +1144,172 @@ top:
|
|||
gcMarkTermination(stw)
|
||||
}
|
||||
|
||||
// isMaybeRunnable checks whether a goroutine may still be semantically runnable.
|
||||
// For goroutines which are semantically runnable, this will eventually return true
|
||||
// as the GC marking phase progresses. It returns false for leaked goroutines, or for
|
||||
// goroutines which are not yet computed as possibly runnable by the GC.
|
||||
func (gp *g) isMaybeRunnable() bool {
|
||||
// Check whether the goroutine is actually in a waiting state first.
|
||||
if readgstatus(gp) != _Gwaiting {
|
||||
// If the goroutine is not waiting, then clearly it is maybe runnable.
|
||||
return true
|
||||
}
|
||||
|
||||
switch gp.waitreason {
|
||||
case waitReasonSelectNoCases,
|
||||
waitReasonChanSendNilChan,
|
||||
waitReasonChanReceiveNilChan:
|
||||
// Select with no cases or communicating on nil channels
|
||||
// make goroutines unrunnable by definition.
|
||||
return false
|
||||
case waitReasonChanReceive,
|
||||
waitReasonSelect,
|
||||
waitReasonChanSend:
|
||||
// Cycle all through all *sudog to check whether
|
||||
// the goroutine is waiting on a marked channel.
|
||||
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
|
||||
if isMarkedOrNotInHeap(unsafe.Pointer(sg.c.get())) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
case waitReasonSyncCondWait,
|
||||
waitReasonSyncWaitGroupWait,
|
||||
waitReasonSyncMutexLock,
|
||||
waitReasonSyncRWMutexLock,
|
||||
waitReasonSyncRWMutexRLock:
|
||||
// If waiting on mutexes, wait groups, or condition variables,
|
||||
// check if the synchronization primitive attached to the sudog is marked.
|
||||
if gp.waiting != nil {
|
||||
return isMarkedOrNotInHeap(gp.waiting.elem.get())
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// findMaybeRunnableGoroutines checks to see if more blocked but maybe-runnable goroutines exist.
|
||||
// If so, it adds them into root set and increments work.markrootJobs accordingly.
|
||||
// Returns true if we need to run another phase of markroots; returns false otherwise.
|
||||
func findMaybeRunnableGoroutines() (moreWork bool) {
|
||||
oldRootJobs := work.markrootJobs.Load()
|
||||
|
||||
// To begin with we have a set of unchecked stackRoots between
|
||||
// vIndex and ivIndex. During the loop, anything < vIndex should be
|
||||
// valid stackRoots and anything >= ivIndex should be invalid stackRoots.
|
||||
// The loop terminates when the two indices meet.
|
||||
var vIndex, ivIndex int = work.nMaybeRunnableStackRoots, work.nStackRoots
|
||||
// Reorder goroutine list
|
||||
for vIndex < ivIndex {
|
||||
if work.stackRoots[vIndex].isMaybeRunnable() {
|
||||
vIndex = vIndex + 1
|
||||
continue
|
||||
}
|
||||
for ivIndex = ivIndex - 1; ivIndex != vIndex; ivIndex = ivIndex - 1 {
|
||||
if gp := work.stackRoots[ivIndex]; gp.isMaybeRunnable() {
|
||||
work.stackRoots[ivIndex] = work.stackRoots[vIndex]
|
||||
work.stackRoots[vIndex] = gp
|
||||
vIndex = vIndex + 1
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newRootJobs := work.baseStacks + uint32(vIndex)
|
||||
if newRootJobs > oldRootJobs {
|
||||
work.nMaybeRunnableStackRoots = vIndex
|
||||
work.markrootJobs.Store(newRootJobs)
|
||||
}
|
||||
return newRootJobs > oldRootJobs
|
||||
}
|
||||
|
||||
// setSyncObjectsUntraceable scans allgs and sets the elem and c fields of all sudogs to
|
||||
// an untrackable pointer. This prevents the GC from marking these objects as live in memory
|
||||
// by following these pointers when runnning deadlock detection.
|
||||
func setSyncObjectsUntraceable() {
|
||||
assertWorldStopped()
|
||||
|
||||
forEachGRace(func(gp *g) {
|
||||
// Set as untraceable all synchronization objects of goroutines
|
||||
// blocked at concurrency operations that could leak.
|
||||
switch {
|
||||
case gp.waitreason.isSyncWait():
|
||||
// Synchronization primitives are reachable from the *sudog via
|
||||
// via the elem field.
|
||||
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
|
||||
sg.elem.setUntraceable()
|
||||
}
|
||||
case gp.waitreason.isChanWait():
|
||||
// Channels and select statements are reachable from the *sudog via the c field.
|
||||
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
|
||||
sg.c.setUntraceable()
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// gcRestoreSyncObjects restores the elem and c fields of all sudogs to their original values.
|
||||
// Should be invoked after the goroutine leak detection phase.
|
||||
func gcRestoreSyncObjects() {
|
||||
assertWorldStopped()
|
||||
|
||||
forEachGRace(func(gp *g) {
|
||||
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
|
||||
sg.elem.setTraceable()
|
||||
sg.c.setTraceable()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// findGoroutineLeaks scans the remaining stackRoots and marks any which are
|
||||
// blocked over exclusively unreachable concurrency primitives as leaked (deadlocked).
|
||||
// Returns true if the goroutine leak check was performed (or unnecessary).
|
||||
// Returns false if the GC cycle has not yet computed all maybe-runnable goroutines.
|
||||
func findGoroutineLeaks() bool {
|
||||
assertWorldStopped()
|
||||
|
||||
// Report goroutine leaks and mark them unreachable, and resume marking
|
||||
// we still need to mark these unreachable *g structs as they
|
||||
// get reused, but their stack won't get scanned
|
||||
if work.nMaybeRunnableStackRoots == work.nStackRoots {
|
||||
// nMaybeRunnableStackRoots == nStackRoots means that all goroutines are marked.
|
||||
return true
|
||||
}
|
||||
|
||||
// Check whether any more maybe-runnable goroutines can be found by the GC.
|
||||
if findMaybeRunnableGoroutines() {
|
||||
// We found more work, so we need to resume the marking phase.
|
||||
return false
|
||||
}
|
||||
|
||||
// For the remaining goroutines, mark them as unreachable and leaked.
|
||||
work.goroutineLeak.count = work.nStackRoots - work.nMaybeRunnableStackRoots
|
||||
|
||||
for i := work.nMaybeRunnableStackRoots; i < work.nStackRoots; i++ {
|
||||
gp := work.stackRoots[i]
|
||||
casgstatus(gp, _Gwaiting, _Gleaked)
|
||||
|
||||
// Add the primitives causing the goroutine leaks
|
||||
// to the GC work queue, to ensure they are marked.
|
||||
//
|
||||
// NOTE(vsaioc): these primitives should also be reachable
|
||||
// from the goroutine's stack, but let's play it safe.
|
||||
switch {
|
||||
case gp.waitreason.isChanWait():
|
||||
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
|
||||
shade(sg.c.uintptr())
|
||||
}
|
||||
case gp.waitreason.isSyncWait():
|
||||
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
|
||||
shade(sg.elem.uintptr())
|
||||
}
|
||||
}
|
||||
}
|
||||
// Put the remaining roots as ready for marking and drain them.
|
||||
work.markrootJobs.Add(int32(work.nStackRoots - work.nMaybeRunnableStackRoots))
|
||||
work.nMaybeRunnableStackRoots = work.nStackRoots
|
||||
return true
|
||||
}
|
||||
|
||||
// World must be stopped and mark assists and background workers must be
|
||||
// disabled.
|
||||
func gcMarkTermination(stw worldStop) {
|
||||
|
@ -1199,7 +1462,18 @@ func gcMarkTermination(stw worldStop) {
|
|||
throw("non-concurrent sweep failed to drain all sweep queues")
|
||||
}
|
||||
|
||||
if work.goroutineLeak.enabled {
|
||||
// Restore the elem and c fields of all sudogs to their original values.
|
||||
gcRestoreSyncObjects()
|
||||
}
|
||||
|
||||
var goroutineLeakDone bool
|
||||
systemstack(func() {
|
||||
// Pull the GC out of goroutine leak detection mode.
|
||||
work.goroutineLeak.enabled = false
|
||||
goroutineLeakDone = work.goroutineLeak.done
|
||||
work.goroutineLeak.done = false
|
||||
|
||||
// The memstats updated above must be updated with the world
|
||||
// stopped to ensure consistency of some values, such as
|
||||
// sched.idleTime and sched.totaltime. memstats also include
|
||||
|
@ -1273,7 +1547,11 @@ func gcMarkTermination(stw worldStop) {
|
|||
printlock()
|
||||
print("gc ", memstats.numgc,
|
||||
" @", string(itoaDiv(sbuf[:], uint64(work.tSweepTerm-runtimeInitTime)/1e6, 3)), "s ",
|
||||
util, "%: ")
|
||||
util, "%")
|
||||
if goroutineLeakDone {
|
||||
print(" (checking for goroutine leaks)")
|
||||
}
|
||||
print(": ")
|
||||
prev := work.tSweepTerm
|
||||
for i, ns := range []int64{work.tMark, work.tMarkTerm, work.tEnd} {
|
||||
if i != 0 {
|
||||
|
@ -1647,8 +1925,8 @@ func gcMark(startTime int64) {
|
|||
work.tstart = startTime
|
||||
|
||||
// Check that there's no marking work remaining.
|
||||
if work.full != 0 || work.markrootNext < work.markrootJobs {
|
||||
print("runtime: full=", hex(work.full), " next=", work.markrootNext, " jobs=", work.markrootJobs, " nDataRoots=", work.nDataRoots, " nBSSRoots=", work.nBSSRoots, " nSpanRoots=", work.nSpanRoots, " nStackRoots=", work.nStackRoots, "\n")
|
||||
if next, jobs := work.markrootNext.Load(), work.markrootJobs.Load(); work.full != 0 || next < jobs {
|
||||
print("runtime: full=", hex(work.full), " next=", next, " jobs=", jobs, " nDataRoots=", work.nDataRoots, " nBSSRoots=", work.nBSSRoots, " nSpanRoots=", work.nSpanRoots, " nStackRoots=", work.nStackRoots, "\n")
|
||||
panic("non-empty mark queue after concurrent mark")
|
||||
}
|
||||
|
||||
|
|
|
@ -53,6 +53,55 @@ const (
|
|||
pagesPerSpanRoot = min(512, pagesPerArena)
|
||||
)
|
||||
|
||||
// internalBlocked returns true if the goroutine is blocked due to an
|
||||
// internal (non-leaking) waitReason, e.g. waiting for the netpoller or garbage collector.
|
||||
// Such goroutines are never leak detection candidates according to the GC.
|
||||
//
|
||||
//go:nosplit
|
||||
func (gp *g) internalBlocked() bool {
|
||||
reason := gp.waitreason
|
||||
return reason < waitReasonChanReceiveNilChan || waitReasonSyncWaitGroupWait < reason
|
||||
}
|
||||
|
||||
// allGsSnapshotSortedForGC takes a snapshot of allgs and returns a sorted
|
||||
// array of Gs. The array is sorted by the G's status, with running Gs
|
||||
// first, followed by blocked Gs. The returned index indicates the cutoff
|
||||
// between runnable and blocked Gs.
|
||||
//
|
||||
// The world must be stopped or allglock must be held.
|
||||
func allGsSnapshotSortedForGC() ([]*g, int) {
|
||||
assertWorldStoppedOrLockHeld(&allglock)
|
||||
|
||||
// Reset the status of leaked goroutines in order to improve
|
||||
// the precision of goroutine leak detection.
|
||||
for _, gp := range allgs {
|
||||
gp.atomicstatus.CompareAndSwap(_Gleaked, _Gwaiting)
|
||||
}
|
||||
|
||||
allgsSorted := make([]*g, len(allgs))
|
||||
|
||||
// Indices cutting off runnable and blocked Gs.
|
||||
var currIndex, blockedIndex = 0, len(allgsSorted) - 1
|
||||
for _, gp := range allgs {
|
||||
// not sure if we need atomic load because we are stopping the world,
|
||||
// but do it just to be safe for now
|
||||
if status := readgstatus(gp); status != _Gwaiting || gp.internalBlocked() {
|
||||
allgsSorted[currIndex] = gp
|
||||
currIndex++
|
||||
} else {
|
||||
allgsSorted[blockedIndex] = gp
|
||||
blockedIndex--
|
||||
}
|
||||
}
|
||||
|
||||
// Because the world is stopped or allglock is held, allgadd
|
||||
// cannot happen concurrently with this. allgs grows
|
||||
// monotonically and existing entries never change, so we can
|
||||
// simply return a copy of the slice header. For added safety,
|
||||
// we trim everything past len because that can still change.
|
||||
return allgsSorted, blockedIndex + 1
|
||||
}
|
||||
|
||||
// gcPrepareMarkRoots queues root scanning jobs (stacks, globals, and
|
||||
// some miscellany) and initializes scanning-related state.
|
||||
//
|
||||
|
@ -102,11 +151,20 @@ func gcPrepareMarkRoots() {
|
|||
// ignore them because they begin life without any roots, so
|
||||
// there's nothing to scan, and any roots they create during
|
||||
// the concurrent phase will be caught by the write barrier.
|
||||
work.stackRoots = allGsSnapshot()
|
||||
if work.goroutineLeak.enabled {
|
||||
// goroutine leak finder GC --- only prepare runnable
|
||||
// goroutines for marking.
|
||||
work.stackRoots, work.nMaybeRunnableStackRoots = allGsSnapshotSortedForGC()
|
||||
} else {
|
||||
// regular GC --- scan every goroutine
|
||||
work.stackRoots = allGsSnapshot()
|
||||
work.nMaybeRunnableStackRoots = len(work.stackRoots)
|
||||
}
|
||||
|
||||
work.nStackRoots = len(work.stackRoots)
|
||||
|
||||
work.markrootNext = 0
|
||||
work.markrootJobs = uint32(fixedRootCount + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots)
|
||||
work.markrootNext.Store(0)
|
||||
work.markrootJobs.Store(uint32(fixedRootCount + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nMaybeRunnableStackRoots))
|
||||
|
||||
// Calculate base indexes of each root type
|
||||
work.baseData = uint32(fixedRootCount)
|
||||
|
@ -119,8 +177,8 @@ func gcPrepareMarkRoots() {
|
|||
// gcMarkRootCheck checks that all roots have been scanned. It is
|
||||
// purely for debugging.
|
||||
func gcMarkRootCheck() {
|
||||
if work.markrootNext < work.markrootJobs {
|
||||
print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n")
|
||||
if next, jobs := work.markrootNext.Load(), work.markrootJobs.Load(); next < jobs {
|
||||
print(next, " of ", jobs, " markroot jobs done\n")
|
||||
throw("left over markroot jobs")
|
||||
}
|
||||
|
||||
|
@ -858,7 +916,7 @@ func scanstack(gp *g, gcw *gcWork) int64 {
|
|||
case _Grunning:
|
||||
print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
|
||||
throw("scanstack: goroutine not stopped")
|
||||
case _Grunnable, _Gsyscall, _Gwaiting:
|
||||
case _Grunnable, _Gsyscall, _Gwaiting, _Gleaked:
|
||||
// ok
|
||||
}
|
||||
|
||||
|
@ -1126,6 +1184,28 @@ func gcDrainMarkWorkerFractional(gcw *gcWork) {
|
|||
gcDrain(gcw, gcDrainFractional|gcDrainUntilPreempt|gcDrainFlushBgCredit)
|
||||
}
|
||||
|
||||
// gcNextMarkRoot safely increments work.markrootNext and returns the
|
||||
// index of the next root job. The returned boolean is true if the root job
|
||||
// is valid, and false if there are no more root jobs to be claimed,
|
||||
// i.e. work.markrootNext >= work.markrootJobs.
|
||||
func gcNextMarkRoot() (uint32, bool) {
|
||||
if !work.goroutineLeak.enabled {
|
||||
// If not running goroutine leak detection, assume regular GC behavior.
|
||||
job := work.markrootNext.Add(1) - 1
|
||||
return job, job < work.markrootJobs.Load()
|
||||
}
|
||||
|
||||
// Otherwise, use a CAS loop to increment markrootNext.
|
||||
for next, jobs := work.markrootNext.Load(), work.markrootJobs.Load(); next < jobs; next = work.markrootNext.Load() {
|
||||
// There is still work available at the moment.
|
||||
if work.markrootNext.CompareAndSwap(next, next+1) {
|
||||
// We manage to snatch a root job. Return the root index.
|
||||
return next, true
|
||||
}
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// gcDrain scans roots and objects in work buffers, blackening grey
|
||||
// objects until it is unable to get more work. It may return before
|
||||
// GC is done; it's the caller's responsibility to balance work from
|
||||
|
@ -1184,13 +1264,12 @@ func gcDrain(gcw *gcWork, flags gcDrainFlags) {
|
|||
}
|
||||
}
|
||||
|
||||
// Drain root marking jobs.
|
||||
if work.markrootNext < work.markrootJobs {
|
||||
if work.markrootNext.Load() < work.markrootJobs.Load() {
|
||||
// Stop if we're preemptible, if someone wants to STW, or if
|
||||
// someone is calling forEachP.
|
||||
for !(gp.preempt && (preemptible || sched.gcwaiting.Load() || pp.runSafePointFn != 0)) {
|
||||
job := atomic.Xadd(&work.markrootNext, +1) - 1
|
||||
if job >= work.markrootJobs {
|
||||
job, ok := gcNextMarkRoot()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
markroot(gcw, job, flushBgCredit)
|
||||
|
@ -1342,9 +1421,9 @@ func gcDrainN(gcw *gcWork, scanWork int64) int64 {
|
|||
if b = gcw.tryGetObj(); b == 0 {
|
||||
if s = gcw.tryGetSpan(); s == 0 {
|
||||
// Try to do a root job.
|
||||
if work.markrootNext < work.markrootJobs {
|
||||
job := atomic.Xadd(&work.markrootNext, +1) - 1
|
||||
if job < work.markrootJobs {
|
||||
if work.markrootNext.Load() < work.markrootJobs.Load() {
|
||||
job, ok := gcNextMarkRoot()
|
||||
if ok {
|
||||
workFlushed += markroot(gcw, job, false)
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -1143,7 +1143,7 @@ func gcMarkWorkAvailable() bool {
|
|||
if !work.full.empty() {
|
||||
return true // global work available
|
||||
}
|
||||
if work.markrootNext < work.markrootJobs {
|
||||
if work.markrootNext.Load() < work.markrootJobs.Load() {
|
||||
return true // root scan work available
|
||||
}
|
||||
if work.spanqMask.any() {
|
||||
|
|
|
@ -124,7 +124,7 @@ func gcMarkWorkAvailable() bool {
|
|||
if !work.full.empty() {
|
||||
return true // global work available
|
||||
}
|
||||
if work.markrootNext < work.markrootJobs {
|
||||
if work.markrootNext.Load() < work.markrootJobs.Load() {
|
||||
return true // root scan work available
|
||||
}
|
||||
return false
|
||||
|
|
|
@ -1259,6 +1259,20 @@ func goroutineProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.P
|
|||
return goroutineProfileWithLabelsConcurrent(p, labels)
|
||||
}
|
||||
|
||||
//go:linkname pprof_goroutineLeakProfileWithLabels
|
||||
func pprof_goroutineLeakProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
|
||||
return goroutineLeakProfileWithLabelsConcurrent(p, labels)
|
||||
}
|
||||
|
||||
// labels may be nil. If labels is non-nil, it must have the same length as p.
|
||||
func goroutineLeakProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
|
||||
if labels != nil && len(labels) != len(p) {
|
||||
labels = nil
|
||||
}
|
||||
|
||||
return goroutineLeakProfileWithLabelsConcurrent(p, labels)
|
||||
}
|
||||
|
||||
var goroutineProfile = struct {
|
||||
sema uint32
|
||||
active bool
|
||||
|
@ -1302,6 +1316,48 @@ func (p *goroutineProfileStateHolder) CompareAndSwap(old, new goroutineProfileSt
|
|||
return (*atomic.Uint32)(p).CompareAndSwap(uint32(old), uint32(new))
|
||||
}
|
||||
|
||||
func goroutineLeakProfileWithLabelsConcurrent(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
|
||||
if len(p) == 0 {
|
||||
// An empty slice is obviously too small. Return a rough
|
||||
// allocation estimate.
|
||||
return work.goroutineLeak.count, false
|
||||
}
|
||||
|
||||
// Use the same semaphore as goroutineProfileWithLabelsConcurrent,
|
||||
// because ultimately we still use goroutine profiles.
|
||||
semacquire(&goroutineProfile.sema)
|
||||
|
||||
// Unlike in goroutineProfileWithLabelsConcurrent, we don't need to
|
||||
// save the current goroutine stack, because it is obviously not leaked.
|
||||
|
||||
pcbuf := makeProfStack() // see saveg() for explanation
|
||||
|
||||
// Prepare a profile large enough to store all leaked goroutines.
|
||||
n = work.goroutineLeak.count
|
||||
|
||||
if n > len(p) {
|
||||
// There's not enough space in p to store the whole profile, so (per the
|
||||
// contract of runtime.GoroutineProfile) we're not allowed to write to p
|
||||
// at all and must return n, false.
|
||||
semrelease(&goroutineProfile.sema)
|
||||
return n, false
|
||||
}
|
||||
|
||||
// Visit each leaked goroutine and try to record its stack.
|
||||
forEachGRace(func(gp1 *g) {
|
||||
if readgstatus(gp1) == _Gleaked {
|
||||
doRecordGoroutineProfile(gp1, pcbuf)
|
||||
}
|
||||
})
|
||||
|
||||
if raceenabled {
|
||||
raceacquire(unsafe.Pointer(&labelSync))
|
||||
}
|
||||
|
||||
semrelease(&goroutineProfile.sema)
|
||||
return n, true
|
||||
}
|
||||
|
||||
func goroutineProfileWithLabelsConcurrent(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
|
||||
if len(p) == 0 {
|
||||
// An empty slice is obviously too small. Return a rough
|
||||
|
|
|
@ -80,6 +80,7 @@ import (
|
|||
"cmp"
|
||||
"fmt"
|
||||
"internal/abi"
|
||||
"internal/goexperiment"
|
||||
"internal/profilerecord"
|
||||
"io"
|
||||
"runtime"
|
||||
|
@ -105,12 +106,13 @@ import (
|
|||
//
|
||||
// Each Profile has a unique name. A few profiles are predefined:
|
||||
//
|
||||
// goroutine - stack traces of all current goroutines
|
||||
// heap - a sampling of memory allocations of live objects
|
||||
// allocs - a sampling of all past memory allocations
|
||||
// threadcreate - stack traces that led to the creation of new OS threads
|
||||
// block - stack traces that led to blocking on synchronization primitives
|
||||
// mutex - stack traces of holders of contended mutexes
|
||||
// goroutine - stack traces of all current goroutines
|
||||
// goroutineleak - stack traces of all leaked goroutines
|
||||
// allocs - a sampling of all past memory allocations
|
||||
// heap - a sampling of memory allocations of live objects
|
||||
// threadcreate - stack traces that led to the creation of new OS threads
|
||||
// block - stack traces that led to blocking on synchronization primitives
|
||||
// mutex - stack traces of holders of contended mutexes
|
||||
//
|
||||
// These predefined profiles maintain themselves and panic on an explicit
|
||||
// [Profile.Add] or [Profile.Remove] method call.
|
||||
|
@ -169,6 +171,7 @@ import (
|
|||
// holds a lock for 1s while 5 other goroutines are waiting for the entire
|
||||
// second to acquire the lock, its unlock call stack will report 5s of
|
||||
// contention.
|
||||
|
||||
type Profile struct {
|
||||
name string
|
||||
mu sync.Mutex
|
||||
|
@ -189,6 +192,12 @@ var goroutineProfile = &Profile{
|
|||
write: writeGoroutine,
|
||||
}
|
||||
|
||||
var goroutineLeakProfile = &Profile{
|
||||
name: "goroutineleak",
|
||||
count: runtime_goroutineleakcount,
|
||||
write: writeGoroutineLeak,
|
||||
}
|
||||
|
||||
var threadcreateProfile = &Profile{
|
||||
name: "threadcreate",
|
||||
count: countThreadCreate,
|
||||
|
@ -231,6 +240,9 @@ func lockProfiles() {
|
|||
"block": blockProfile,
|
||||
"mutex": mutexProfile,
|
||||
}
|
||||
if goexperiment.GoroutineLeakProfile {
|
||||
profiles.m["goroutineleak"] = goroutineLeakProfile
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -275,6 +287,7 @@ func Profiles() []*Profile {
|
|||
|
||||
all := make([]*Profile, 0, len(profiles.m))
|
||||
for _, p := range profiles.m {
|
||||
|
||||
all = append(all, p)
|
||||
}
|
||||
|
||||
|
@ -747,6 +760,23 @@ func writeGoroutine(w io.Writer, debug int) error {
|
|||
return writeRuntimeProfile(w, debug, "goroutine", pprof_goroutineProfileWithLabels)
|
||||
}
|
||||
|
||||
// writeGoroutineLeak first invokes a GC cycle that performs goroutine leak detection.
|
||||
// It then writes the goroutine profile, filtering for leaked goroutines.
|
||||
func writeGoroutineLeak(w io.Writer, debug int) error {
|
||||
// Run the GC with leak detection first so that leaked goroutines
|
||||
// may transition to the leaked state.
|
||||
runtime_goroutineLeakGC()
|
||||
|
||||
// If the debug flag is set sufficiently high, just defer to writing goroutine stacks
|
||||
// like in a regular goroutine profile. Include non-leaked goroutines, too.
|
||||
if debug >= 2 {
|
||||
return writeGoroutineStacks(w)
|
||||
}
|
||||
|
||||
// Otherwise, write the goroutine leak profile.
|
||||
return writeRuntimeProfile(w, debug, "goroutineleak", pprof_goroutineLeakProfileWithLabels)
|
||||
}
|
||||
|
||||
func writeGoroutineStacks(w io.Writer) error {
|
||||
// We don't know how big the buffer needs to be to collect
|
||||
// all the goroutines. Start with 1 MB and try a few times, doubling each time.
|
||||
|
@ -969,6 +999,9 @@ func writeProfileInternal(w io.Writer, debug int, name string, runtimeProfile fu
|
|||
//go:linkname pprof_goroutineProfileWithLabels runtime.pprof_goroutineProfileWithLabels
|
||||
func pprof_goroutineProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool)
|
||||
|
||||
//go:linkname pprof_goroutineLeakProfileWithLabels runtime.pprof_goroutineLeakProfileWithLabels
|
||||
func pprof_goroutineLeakProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool)
|
||||
|
||||
//go:linkname pprof_cyclesPerSecond runtime/pprof.runtime_cyclesPerSecond
|
||||
func pprof_cyclesPerSecond() int64
|
||||
|
||||
|
|
|
@ -29,6 +29,12 @@ func runtime_setProfLabel(labels unsafe.Pointer)
|
|||
// runtime_getProfLabel is defined in runtime/proflabel.go.
|
||||
func runtime_getProfLabel() unsafe.Pointer
|
||||
|
||||
// runtime_goroutineleakcount is defined in runtime/proc.go.
|
||||
func runtime_goroutineleakcount() int
|
||||
|
||||
// runtime_goroutineLeakGC is defined in runtime/mgc.go.
|
||||
func runtime_goroutineLeakGC()
|
||||
|
||||
// SetGoroutineLabels sets the current goroutine's labels to match ctx.
|
||||
// A new goroutine inherits the labels of the goroutine that created it.
|
||||
// This is a lower-level API than [Do], which should be used instead when possible.
|
||||
|
|
|
@ -160,7 +160,7 @@ func suspendG(gp *g) suspendGState {
|
|||
s = _Gwaiting
|
||||
fallthrough
|
||||
|
||||
case _Grunnable, _Gsyscall, _Gwaiting:
|
||||
case _Grunnable, _Gsyscall, _Gwaiting, _Gleaked:
|
||||
// Claim goroutine by setting scan bit.
|
||||
// This may race with execution or readying of gp.
|
||||
// The scan bit keeps it from transition state.
|
||||
|
@ -269,6 +269,7 @@ func resumeG(state suspendGState) {
|
|||
|
||||
case _Grunnable | _Gscan,
|
||||
_Gwaiting | _Gscan,
|
||||
_Gleaked | _Gscan,
|
||||
_Gsyscall | _Gscan:
|
||||
casfrom_Gscanstatus(gp, s, s&^_Gscan)
|
||||
}
|
||||
|
|
|
@ -517,7 +517,7 @@ func acquireSudog() *sudog {
|
|||
s := pp.sudogcache[n-1]
|
||||
pp.sudogcache[n-1] = nil
|
||||
pp.sudogcache = pp.sudogcache[:n-1]
|
||||
if s.elem != nil {
|
||||
if s.elem.get() != nil {
|
||||
throw("acquireSudog: found s.elem != nil in cache")
|
||||
}
|
||||
releasem(mp)
|
||||
|
@ -526,7 +526,7 @@ func acquireSudog() *sudog {
|
|||
|
||||
//go:nosplit
|
||||
func releaseSudog(s *sudog) {
|
||||
if s.elem != nil {
|
||||
if s.elem.get() != nil {
|
||||
throw("runtime: sudog with non-nil elem")
|
||||
}
|
||||
if s.isSelect {
|
||||
|
@ -541,7 +541,7 @@ func releaseSudog(s *sudog) {
|
|||
if s.waitlink != nil {
|
||||
throw("runtime: sudog with non-nil waitlink")
|
||||
}
|
||||
if s.c != nil {
|
||||
if s.c.get() != nil {
|
||||
throw("runtime: sudog with non-nil c")
|
||||
}
|
||||
gp := getg()
|
||||
|
@ -1222,6 +1222,7 @@ func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
|
|||
_Gscanwaiting,
|
||||
_Gscanrunning,
|
||||
_Gscansyscall,
|
||||
_Gscanleaked,
|
||||
_Gscanpreempted:
|
||||
if newval == oldval&^_Gscan {
|
||||
success = gp.atomicstatus.CompareAndSwap(oldval, newval)
|
||||
|
@ -1242,6 +1243,7 @@ func castogscanstatus(gp *g, oldval, newval uint32) bool {
|
|||
case _Grunnable,
|
||||
_Grunning,
|
||||
_Gwaiting,
|
||||
_Gleaked,
|
||||
_Gsyscall:
|
||||
if newval == oldval|_Gscan {
|
||||
r := gp.atomicstatus.CompareAndSwap(oldval, newval)
|
||||
|
@ -5565,6 +5567,14 @@ func gcount(includeSys bool) int32 {
|
|||
return n
|
||||
}
|
||||
|
||||
// goroutineleakcount returns the number of leaked goroutines last reported by
|
||||
// the runtime.
|
||||
//
|
||||
//go:linkname goroutineleakcount runtime/pprof.runtime_goroutineleakcount
|
||||
func goroutineleakcount() int {
|
||||
return work.goroutineLeak.count
|
||||
}
|
||||
|
||||
func mcount() int32 {
|
||||
return int32(sched.mnext - sched.nmfreed)
|
||||
}
|
||||
|
|
|
@ -87,6 +87,9 @@ const (
|
|||
// ready()ing this G.
|
||||
_Gpreempted // 9
|
||||
|
||||
// _Gleaked represents a leaked goroutine caught by the GC.
|
||||
_Gleaked // 10
|
||||
|
||||
// _Gscan combined with one of the above states other than
|
||||
// _Grunning indicates that GC is scanning the stack. The
|
||||
// goroutine is not executing user code and the stack is owned
|
||||
|
@ -104,6 +107,7 @@ const (
|
|||
_Gscansyscall = _Gscan + _Gsyscall // 0x1003
|
||||
_Gscanwaiting = _Gscan + _Gwaiting // 0x1004
|
||||
_Gscanpreempted = _Gscan + _Gpreempted // 0x1009
|
||||
_Gscanleaked = _Gscan + _Gleaked // 0x100a
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -315,6 +319,78 @@ type gobuf struct {
|
|||
bp uintptr // for framepointer-enabled architectures
|
||||
}
|
||||
|
||||
// maybeTraceablePtr is a special pointer that is conditionally trackable
|
||||
// by the GC. It consists of an address as a uintptr (vu) and a pointer
|
||||
// to a data element (vp).
|
||||
//
|
||||
// maybeTraceablePtr values can be in one of three states:
|
||||
// 1. Unset: vu == 0 && vp == nil
|
||||
// 2. Untracked: vu != 0 && vp == nil
|
||||
// 3. Tracked: vu != 0 && vp != nil
|
||||
//
|
||||
// Do not set fields manually. Use methods instead.
|
||||
// Extend this type with additional methods if needed.
|
||||
type maybeTraceablePtr struct {
|
||||
vp unsafe.Pointer // For liveness only.
|
||||
vu uintptr // Source of truth.
|
||||
}
|
||||
|
||||
// untrack unsets the pointer but preserves the address.
|
||||
// This is used to hide the pointer from the GC.
|
||||
//
|
||||
//go:nosplit
|
||||
func (p *maybeTraceablePtr) setUntraceable() {
|
||||
p.vp = nil
|
||||
}
|
||||
|
||||
// setTraceable resets the pointer to the stored address.
|
||||
// This is used to make the pointer visible to the GC.
|
||||
//
|
||||
//go:nosplit
|
||||
func (p *maybeTraceablePtr) setTraceable() {
|
||||
p.vp = unsafe.Pointer(p.vu)
|
||||
}
|
||||
|
||||
// set sets the pointer to the data element and updates the address.
|
||||
//
|
||||
//go:nosplit
|
||||
func (p *maybeTraceablePtr) set(v unsafe.Pointer) {
|
||||
p.vp = v
|
||||
p.vu = uintptr(v)
|
||||
}
|
||||
|
||||
// get retrieves the pointer to the data element.
|
||||
//
|
||||
//go:nosplit
|
||||
func (p *maybeTraceablePtr) get() unsafe.Pointer {
|
||||
return unsafe.Pointer(p.vu)
|
||||
}
|
||||
|
||||
// uintptr returns the uintptr address of the pointer.
|
||||
//
|
||||
//go:nosplit
|
||||
func (p *maybeTraceablePtr) uintptr() uintptr {
|
||||
return p.vu
|
||||
}
|
||||
|
||||
// maybeTraceableChan extends conditionally trackable pointers (maybeTraceablePtr)
|
||||
// to track hchan pointers.
|
||||
//
|
||||
// Do not set fields manually. Use methods instead.
|
||||
type maybeTraceableChan struct {
|
||||
maybeTraceablePtr
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func (p *maybeTraceableChan) set(c *hchan) {
|
||||
p.maybeTraceablePtr.set(unsafe.Pointer(c))
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func (p *maybeTraceableChan) get() *hchan {
|
||||
return (*hchan)(p.maybeTraceablePtr.get())
|
||||
}
|
||||
|
||||
// sudog (pseudo-g) represents a g in a wait list, such as for sending/receiving
|
||||
// on a channel.
|
||||
//
|
||||
|
@ -334,7 +410,8 @@ type sudog struct {
|
|||
|
||||
next *sudog
|
||||
prev *sudog
|
||||
elem unsafe.Pointer // data element (may point to stack)
|
||||
|
||||
elem maybeTraceablePtr // data element (may point to stack)
|
||||
|
||||
// The following fields are never accessed concurrently.
|
||||
// For channels, waitlink is only accessed by g.
|
||||
|
@ -362,10 +439,10 @@ type sudog struct {
|
|||
// in the second entry in the list.)
|
||||
waiters uint16
|
||||
|
||||
parent *sudog // semaRoot binary tree
|
||||
waitlink *sudog // g.waiting list or semaRoot
|
||||
waittail *sudog // semaRoot
|
||||
c *hchan // channel
|
||||
parent *sudog // semaRoot binary tree
|
||||
waitlink *sudog // g.waiting list or semaRoot
|
||||
waittail *sudog // semaRoot
|
||||
c maybeTraceableChan // channel
|
||||
}
|
||||
|
||||
type libcall struct {
|
||||
|
@ -1072,24 +1149,24 @@ const (
|
|||
waitReasonZero waitReason = iota // ""
|
||||
waitReasonGCAssistMarking // "GC assist marking"
|
||||
waitReasonIOWait // "IO wait"
|
||||
waitReasonChanReceiveNilChan // "chan receive (nil chan)"
|
||||
waitReasonChanSendNilChan // "chan send (nil chan)"
|
||||
waitReasonDumpingHeap // "dumping heap"
|
||||
waitReasonGarbageCollection // "garbage collection"
|
||||
waitReasonGarbageCollectionScan // "garbage collection scan"
|
||||
waitReasonPanicWait // "panicwait"
|
||||
waitReasonSelect // "select"
|
||||
waitReasonSelectNoCases // "select (no cases)"
|
||||
waitReasonGCAssistWait // "GC assist wait"
|
||||
waitReasonGCSweepWait // "GC sweep wait"
|
||||
waitReasonGCScavengeWait // "GC scavenge wait"
|
||||
waitReasonChanReceive // "chan receive"
|
||||
waitReasonChanSend // "chan send"
|
||||
waitReasonFinalizerWait // "finalizer wait"
|
||||
waitReasonForceGCIdle // "force gc (idle)"
|
||||
waitReasonUpdateGOMAXPROCSIdle // "GOMAXPROCS updater (idle)"
|
||||
waitReasonSemacquire // "semacquire"
|
||||
waitReasonSleep // "sleep"
|
||||
waitReasonChanReceiveNilChan // "chan receive (nil chan)"
|
||||
waitReasonChanSendNilChan // "chan send (nil chan)"
|
||||
waitReasonSelectNoCases // "select (no cases)"
|
||||
waitReasonSelect // "select"
|
||||
waitReasonChanReceive // "chan receive"
|
||||
waitReasonChanSend // "chan send"
|
||||
waitReasonSyncCondWait // "sync.Cond.Wait"
|
||||
waitReasonSyncMutexLock // "sync.Mutex.Lock"
|
||||
waitReasonSyncRWMutexRLock // "sync.RWMutex.RLock"
|
||||
|
@ -1175,12 +1252,34 @@ func (w waitReason) String() string {
|
|||
return waitReasonStrings[w]
|
||||
}
|
||||
|
||||
// isMutexWait returns true if the goroutine is blocked because of
|
||||
// sync.Mutex.Lock or sync.RWMutex.[R]Lock.
|
||||
//
|
||||
//go:nosplit
|
||||
func (w waitReason) isMutexWait() bool {
|
||||
return w == waitReasonSyncMutexLock ||
|
||||
w == waitReasonSyncRWMutexRLock ||
|
||||
w == waitReasonSyncRWMutexLock
|
||||
}
|
||||
|
||||
// isSyncWait returns true if the goroutine is blocked because of
|
||||
// sync library primitive operations.
|
||||
//
|
||||
//go:nosplit
|
||||
func (w waitReason) isSyncWait() bool {
|
||||
return waitReasonSyncCondWait <= w && w <= waitReasonSyncWaitGroupWait
|
||||
}
|
||||
|
||||
// isChanWait is true if the goroutine is blocked because of non-nil
|
||||
// channel operations or a select statement with at least one case.
|
||||
//
|
||||
//go:nosplit
|
||||
func (w waitReason) isChanWait() bool {
|
||||
return w == waitReasonSelect ||
|
||||
w == waitReasonChanReceive ||
|
||||
w == waitReasonChanSend
|
||||
}
|
||||
|
||||
func (w waitReason) isWaitingForSuspendG() bool {
|
||||
return isWaitingForSuspendG[w]
|
||||
}
|
||||
|
|
|
@ -83,7 +83,7 @@ func selparkcommit(gp *g, _ unsafe.Pointer) bool {
|
|||
// channels in lock order.
|
||||
var lastc *hchan
|
||||
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
|
||||
if sg.c != lastc && lastc != nil {
|
||||
if sg.c.get() != lastc && lastc != nil {
|
||||
// As soon as we unlock the channel, fields in
|
||||
// any sudog with that channel may change,
|
||||
// including c and waitlink. Since multiple
|
||||
|
@ -92,7 +92,7 @@ func selparkcommit(gp *g, _ unsafe.Pointer) bool {
|
|||
// of a channel.
|
||||
unlock(&lastc.lock)
|
||||
}
|
||||
lastc = sg.c
|
||||
lastc = sg.c.get()
|
||||
}
|
||||
if lastc != nil {
|
||||
unlock(&lastc.lock)
|
||||
|
@ -320,12 +320,12 @@ func selectgo(cas0 *scase, order0 *uint16, pc0 *uintptr, nsends, nrecvs int, blo
|
|||
sg.isSelect = true
|
||||
// No stack splits between assigning elem and enqueuing
|
||||
// sg on gp.waiting where copystack can find it.
|
||||
sg.elem = cas.elem
|
||||
sg.elem.set(cas.elem)
|
||||
sg.releasetime = 0
|
||||
if t0 != 0 {
|
||||
sg.releasetime = -1
|
||||
}
|
||||
sg.c = c
|
||||
sg.c.set(c)
|
||||
// Construct waiting list in lock order.
|
||||
*nextp = sg
|
||||
nextp = &sg.waitlink
|
||||
|
@ -368,8 +368,8 @@ func selectgo(cas0 *scase, order0 *uint16, pc0 *uintptr, nsends, nrecvs int, blo
|
|||
// Clear all elem before unlinking from gp.waiting.
|
||||
for sg1 := gp.waiting; sg1 != nil; sg1 = sg1.waitlink {
|
||||
sg1.isSelect = false
|
||||
sg1.elem = nil
|
||||
sg1.c = nil
|
||||
sg1.elem.set(nil)
|
||||
sg1.c.set(nil)
|
||||
}
|
||||
gp.waiting = nil
|
||||
|
||||
|
|
|
@ -303,7 +303,10 @@ func cansemacquire(addr *uint32) bool {
|
|||
// queue adds s to the blocked goroutines in semaRoot.
|
||||
func (root *semaRoot) queue(addr *uint32, s *sudog, lifo bool) {
|
||||
s.g = getg()
|
||||
s.elem = unsafe.Pointer(addr)
|
||||
s.elem.set(unsafe.Pointer(addr))
|
||||
// Storing this pointer so that we can trace the semaphore address
|
||||
// from the blocked goroutine when checking for goroutine leaks.
|
||||
s.g.waiting = s
|
||||
s.next = nil
|
||||
s.prev = nil
|
||||
s.waiters = 0
|
||||
|
@ -311,7 +314,7 @@ func (root *semaRoot) queue(addr *uint32, s *sudog, lifo bool) {
|
|||
var last *sudog
|
||||
pt := &root.treap
|
||||
for t := *pt; t != nil; t = *pt {
|
||||
if t.elem == unsafe.Pointer(addr) {
|
||||
if uintptr(unsafe.Pointer(addr)) == t.elem.uintptr() {
|
||||
// Already have addr in list.
|
||||
if lifo {
|
||||
// Substitute s in t's place in treap.
|
||||
|
@ -357,7 +360,7 @@ func (root *semaRoot) queue(addr *uint32, s *sudog, lifo bool) {
|
|||
return
|
||||
}
|
||||
last = t
|
||||
if uintptr(unsafe.Pointer(addr)) < uintptr(t.elem) {
|
||||
if uintptr(unsafe.Pointer(addr)) < t.elem.uintptr() {
|
||||
pt = &t.prev
|
||||
} else {
|
||||
pt = &t.next
|
||||
|
@ -402,11 +405,13 @@ func (root *semaRoot) queue(addr *uint32, s *sudog, lifo bool) {
|
|||
func (root *semaRoot) dequeue(addr *uint32) (found *sudog, now, tailtime int64) {
|
||||
ps := &root.treap
|
||||
s := *ps
|
||||
|
||||
for ; s != nil; s = *ps {
|
||||
if s.elem == unsafe.Pointer(addr) {
|
||||
if uintptr(unsafe.Pointer(addr)) == s.elem.uintptr() {
|
||||
goto Found
|
||||
}
|
||||
if uintptr(unsafe.Pointer(addr)) < uintptr(s.elem) {
|
||||
|
||||
if uintptr(unsafe.Pointer(addr)) < s.elem.uintptr() {
|
||||
ps = &s.prev
|
||||
} else {
|
||||
ps = &s.next
|
||||
|
@ -470,8 +475,10 @@ Found:
|
|||
}
|
||||
tailtime = s.acquiretime
|
||||
}
|
||||
// Goroutine is no longer blocked. Clear the waiting pointer.
|
||||
s.g.waiting = nil
|
||||
s.parent = nil
|
||||
s.elem = nil
|
||||
s.elem.set(nil)
|
||||
s.next = nil
|
||||
s.prev = nil
|
||||
s.ticket = 0
|
||||
|
@ -590,6 +597,10 @@ func notifyListWait(l *notifyList, t uint32) {
|
|||
// Enqueue itself.
|
||||
s := acquireSudog()
|
||||
s.g = getg()
|
||||
// Storing this pointer so that we can trace the condvar address
|
||||
// from the blocked goroutine when checking for goroutine leaks.
|
||||
s.elem.set(unsafe.Pointer(l))
|
||||
s.g.waiting = s
|
||||
s.ticket = t
|
||||
s.releasetime = 0
|
||||
t0 := int64(0)
|
||||
|
@ -607,6 +618,10 @@ func notifyListWait(l *notifyList, t uint32) {
|
|||
if t0 != 0 {
|
||||
blockevent(s.releasetime-t0, 2)
|
||||
}
|
||||
// Goroutine is no longer blocked. Clear up its waiting pointer,
|
||||
// and clean up the sudog before releasing it.
|
||||
s.g.waiting = nil
|
||||
s.elem.set(nil)
|
||||
releaseSudog(s)
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ func TestSizeof(t *testing.T) {
|
|||
_64bit uintptr // size on 64bit platforms
|
||||
}{
|
||||
{runtime.G{}, 280 + xreg, 440 + xreg}, // g, but exported for testing
|
||||
{runtime.Sudog{}, 56, 88}, // sudog, but exported for testing
|
||||
{runtime.Sudog{}, 64, 104}, // sudog, but exported for testing
|
||||
}
|
||||
|
||||
if xreg > runtime.PtrSize {
|
||||
|
|
|
@ -821,7 +821,8 @@ func adjustsudogs(gp *g, adjinfo *adjustinfo) {
|
|||
// the data elements pointed to by a SudoG structure
|
||||
// might be in the stack.
|
||||
for s := gp.waiting; s != nil; s = s.waitlink {
|
||||
adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
|
||||
adjustpointer(adjinfo, unsafe.Pointer(&s.elem.vu))
|
||||
adjustpointer(adjinfo, unsafe.Pointer(&s.elem.vp))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -834,7 +835,7 @@ func fillstack(stk stack, b byte) {
|
|||
func findsghi(gp *g, stk stack) uintptr {
|
||||
var sghi uintptr
|
||||
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
|
||||
p := uintptr(sg.elem) + uintptr(sg.c.elemsize)
|
||||
p := sg.elem.uintptr() + uintptr(sg.c.get().elemsize)
|
||||
if stk.lo <= p && p < stk.hi && p > sghi {
|
||||
sghi = p
|
||||
}
|
||||
|
@ -853,7 +854,7 @@ func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
|
|||
// Lock channels to prevent concurrent send/receive.
|
||||
var lastc *hchan
|
||||
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
|
||||
if sg.c != lastc {
|
||||
if sg.c.get() != lastc {
|
||||
// There is a ranking cycle here between gscan bit and
|
||||
// hchan locks. Normally, we only allow acquiring hchan
|
||||
// locks and then getting a gscan bit. In this case, we
|
||||
|
@ -863,9 +864,9 @@ func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
|
|||
// suspended. So, we get a special hchan lock rank here
|
||||
// that is lower than gscan, but doesn't allow acquiring
|
||||
// any other locks other than hchan.
|
||||
lockWithRank(&sg.c.lock, lockRankHchanLeaf)
|
||||
lockWithRank(&sg.c.get().lock, lockRankHchanLeaf)
|
||||
}
|
||||
lastc = sg.c
|
||||
lastc = sg.c.get()
|
||||
}
|
||||
|
||||
// Adjust sudogs.
|
||||
|
@ -885,10 +886,10 @@ func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
|
|||
// Unlock channels.
|
||||
lastc = nil
|
||||
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
|
||||
if sg.c != lastc {
|
||||
unlock(&sg.c.lock)
|
||||
if sg.c.get() != lastc {
|
||||
unlock(&sg.c.get().lock)
|
||||
}
|
||||
lastc = sg.c
|
||||
lastc = sg.c.get()
|
||||
}
|
||||
|
||||
return sgsize
|
||||
|
|
277
src/runtime/testdata/testgoroutineleakprofile/commonpatterns.go
vendored
Normal file
277
src/runtime/testdata/testgoroutineleakprofile/commonpatterns.go
vendored
Normal file
|
@ -0,0 +1,277 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Common goroutine leak patterns. Extracted from:
|
||||
// "Unveiling and Vanquishing Goroutine Leaks in Enterprise Microservices: A Dynamic Analysis Approach"
|
||||
// doi:10.1109/CGO57630.2024.10444835
|
||||
//
|
||||
// Tests in this file are not flaky iff. the test is run with GOMAXPROCS=1.
|
||||
// The main goroutine forcefully yields via `runtime.Gosched()` before
|
||||
// running the profiler. This moves them to the back of the run queue,
|
||||
// allowing the leaky goroutines to be scheduled beforehand and get stuck.
|
||||
|
||||
func init() {
|
||||
register("NoCloseRange", NoCloseRange)
|
||||
register("MethodContractViolation", MethodContractViolation)
|
||||
register("DoubleSend", DoubleSend)
|
||||
register("EarlyReturn", EarlyReturn)
|
||||
register("NCastLeak", NCastLeak)
|
||||
register("Timeout", Timeout)
|
||||
}
|
||||
|
||||
// Incoming list of items and the number of workers.
|
||||
func noCloseRange(list []any, workers int) {
|
||||
ch := make(chan any)
|
||||
|
||||
// Create each worker
|
||||
for i := 0; i < workers; i++ {
|
||||
go func() {
|
||||
|
||||
// Each worker waits for an item and processes it.
|
||||
for item := range ch {
|
||||
// Process each item
|
||||
_ = item
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Send each item to one of the workers.
|
||||
for _, item := range list {
|
||||
// Sending can leak if workers == 0 or if one of the workers panics
|
||||
ch <- item
|
||||
}
|
||||
// The channel is never closed, so workers leak once there are no more
|
||||
// items left to process.
|
||||
}
|
||||
|
||||
func NoCloseRange() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
go noCloseRange([]any{1, 2, 3}, 0)
|
||||
go noCloseRange([]any{1, 2, 3}, 3)
|
||||
}
|
||||
|
||||
// A worker processes items pushed to `ch` one by one in the background.
|
||||
// When the worker is no longer needed, it must be closed with `Stop`.
|
||||
//
|
||||
// Specifications:
|
||||
//
|
||||
// A worker may be started any number of times, but must be stopped only once.
|
||||
// Stopping a worker multiple times will lead to a close panic.
|
||||
// Any worker that is started must eventually be stopped.
|
||||
// Failing to stop a worker results in a goroutine leak
|
||||
type worker struct {
|
||||
ch chan any
|
||||
done chan any
|
||||
}
|
||||
|
||||
// Start spawns a background goroutine that extracts items pushed to the queue.
|
||||
func (w worker) Start() {
|
||||
go func() {
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-w.ch: // Normal workflow
|
||||
case <-w.done:
|
||||
return // Shut down
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (w worker) Stop() {
|
||||
// Allows goroutine created by Start to terminate
|
||||
close(w.done)
|
||||
}
|
||||
|
||||
func (w worker) AddToQueue(item any) {
|
||||
w.ch <- item
|
||||
}
|
||||
|
||||
// worker limited in scope by workerLifecycle
|
||||
func workerLifecycle(items []any) {
|
||||
// Create a new worker
|
||||
w := worker{
|
||||
ch: make(chan any),
|
||||
done: make(chan any),
|
||||
}
|
||||
// Start worker
|
||||
w.Start()
|
||||
|
||||
// Operate on worker
|
||||
for _, item := range items {
|
||||
w.AddToQueue(item)
|
||||
}
|
||||
|
||||
runtime.Gosched()
|
||||
// Exits without calling ’Stop’. Goroutine created by `Start` eventually leaks.
|
||||
}
|
||||
|
||||
func MethodContractViolation() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
runtime.Gosched()
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
workerLifecycle(make([]any, 10))
|
||||
runtime.Gosched()
|
||||
}
|
||||
|
||||
// doubleSend incoming channel must send a message (incoming error simulates an error generated internally).
|
||||
func doubleSend(ch chan any, err error) {
|
||||
if err != nil {
|
||||
// In case of an error, send nil.
|
||||
ch <- nil
|
||||
// Return is missing here.
|
||||
}
|
||||
// Otherwise, continue with normal behaviour
|
||||
// This send is still executed in the error case, which may lead to a goroutine leak.
|
||||
ch <- struct{}{}
|
||||
}
|
||||
|
||||
func DoubleSend() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
ch := make(chan any)
|
||||
defer func() {
|
||||
runtime.Gosched()
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
doubleSend(ch, nil)
|
||||
}()
|
||||
<-ch
|
||||
|
||||
go func() {
|
||||
doubleSend(ch, fmt.Errorf("error"))
|
||||
}()
|
||||
<-ch
|
||||
|
||||
ch1 := make(chan any, 1)
|
||||
go func() {
|
||||
doubleSend(ch1, fmt.Errorf("error"))
|
||||
}()
|
||||
<-ch1
|
||||
}
|
||||
|
||||
// earlyReturn demonstrates a common pattern of goroutine leaks.
|
||||
// A return statement interrupts the evaluation of the parent goroutine before it can consume a message.
|
||||
// Incoming error simulates an error produced internally.
|
||||
func earlyReturn(err error) {
|
||||
// Create a synchronous channel
|
||||
ch := make(chan any)
|
||||
|
||||
go func() {
|
||||
|
||||
// Send something to the channel.
|
||||
// Leaks if the parent goroutine terminates early.
|
||||
ch <- struct{}{}
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
// Interrupt evaluation of parent early in case of error.
|
||||
// Sender leaks.
|
||||
return
|
||||
}
|
||||
|
||||
// Only receive if there is no error.
|
||||
<-ch
|
||||
}
|
||||
|
||||
func EarlyReturn() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
runtime.Gosched()
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
go earlyReturn(fmt.Errorf("error"))
|
||||
}
|
||||
|
||||
// nCastLeak processes a number of items. First result to pass the post is retrieved from the channel queue.
|
||||
func nCastLeak(items []any) {
|
||||
// Channel is synchronous.
|
||||
ch := make(chan any)
|
||||
|
||||
// Iterate over every item
|
||||
for range items {
|
||||
go func() {
|
||||
|
||||
// Process item and send result to channel
|
||||
ch <- struct{}{}
|
||||
// Channel is synchronous: only one sender will synchronise
|
||||
}()
|
||||
}
|
||||
// Retrieve first result. All other senders block.
|
||||
// Receiver blocks if there are no senders.
|
||||
<-ch
|
||||
}
|
||||
|
||||
func NCastLeak() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
// Yield enough times to allow all the leaky goroutines to
|
||||
// reach the execution point.
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
nCastLeak(nil)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
nCastLeak(make([]any, 5))
|
||||
}()
|
||||
}
|
||||
|
||||
// A context is provided to short-circuit evaluation, leading
|
||||
// the sender goroutine to leak.
|
||||
func timeout(ctx context.Context) {
|
||||
ch := make(chan any)
|
||||
|
||||
go func() {
|
||||
ch <- struct{}{}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ch: // Receive message
|
||||
// Sender is released
|
||||
case <-ctx.Done(): // Context was cancelled or timed out
|
||||
// Sender is leaked
|
||||
}
|
||||
}
|
||||
|
||||
func Timeout() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
runtime.Gosched()
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
go timeout(ctx)
|
||||
}
|
||||
}
|
21
src/runtime/testdata/testgoroutineleakprofile/goker/LICENSE
vendored
Normal file
21
src/runtime/testdata/testgoroutineleakprofile/goker/LICENSE
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
Copyright © 2021 Institute of Computing Technology, University of New South Wales
|
||||
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the “Software”),
|
||||
to deal in the Software without restriction, including without limitation
|
||||
the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
and/or sell copies of the Software, and to permit persons to whom the
|
||||
Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included
|
||||
in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
1847
src/runtime/testdata/testgoroutineleakprofile/goker/README.md
vendored
Normal file
1847
src/runtime/testdata/testgoroutineleakprofile/goker/README.md
vendored
Normal file
File diff suppressed because it is too large
Load diff
145
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10214.go
vendored
Normal file
145
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10214.go
vendored
Normal file
|
@ -0,0 +1,145 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: cockroach
|
||||
* Issue or PR : https://github.com/cockroachdb/cockroach/pull/10214
|
||||
* Buggy version: 7207111aa3a43df0552509365fdec741a53f873f
|
||||
* fix commit-id: 27e863d90ab0660494778f1c35966cc5ddc38e32
|
||||
* Flaky: 3/100
|
||||
* Description: This goroutine leak is caused by different order when acquiring
|
||||
* coalescedMu.Lock() and raftMu.Lock(). The fix is to refactor sendQueuedHeartbeats()
|
||||
* so that cockroachdb can unlock coalescedMu before locking raftMu.
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Cockroach10214", Cockroach10214)
|
||||
}
|
||||
|
||||
type Store_cockroach10214 struct {
|
||||
coalescedMu struct {
|
||||
sync.Mutex // L1
|
||||
heartbeatResponses []int
|
||||
}
|
||||
mu struct {
|
||||
replicas map[int]*Replica_cockroach10214
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Store_cockroach10214) sendQueuedHeartbeats() {
|
||||
s.coalescedMu.Lock() // L1 acquire
|
||||
defer s.coalescedMu.Unlock() // L2 release
|
||||
for i := 0; i < len(s.coalescedMu.heartbeatResponses); i++ {
|
||||
s.sendQueuedHeartbeatsToNode() // L2
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Store_cockroach10214) sendQueuedHeartbeatsToNode() {
|
||||
for i := 0; i < len(s.mu.replicas); i++ {
|
||||
r := s.mu.replicas[i]
|
||||
r.reportUnreachable() // L2
|
||||
}
|
||||
}
|
||||
|
||||
type Replica_cockroach10214 struct {
|
||||
raftMu sync.Mutex // L2
|
||||
mu sync.Mutex // L3
|
||||
store *Store_cockroach10214
|
||||
}
|
||||
|
||||
func (r *Replica_cockroach10214) reportUnreachable() {
|
||||
r.raftMu.Lock() // L2 acquire
|
||||
time.Sleep(time.Millisecond)
|
||||
defer r.raftMu.Unlock() // L2 release
|
||||
}
|
||||
|
||||
func (r *Replica_cockroach10214) tick() {
|
||||
r.raftMu.Lock() // L2 acquire
|
||||
defer r.raftMu.Unlock() // L2 release
|
||||
r.tickRaftMuLocked()
|
||||
}
|
||||
|
||||
func (r *Replica_cockroach10214) tickRaftMuLocked() {
|
||||
r.mu.Lock() // L3 acquire
|
||||
defer r.mu.Unlock() // L3 release
|
||||
if r.maybeQuiesceLocked() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Replica_cockroach10214) maybeQuiesceLocked() bool {
|
||||
for i := 0; i < 2; i++ {
|
||||
if !r.maybeCoalesceHeartbeat() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *Replica_cockroach10214) maybeCoalesceHeartbeat() bool {
|
||||
msgtype := uintptr(unsafe.Pointer(r)) % 3
|
||||
switch msgtype {
|
||||
case 0, 1, 2:
|
||||
r.store.coalescedMu.Lock() // L1 acquire
|
||||
default:
|
||||
return false
|
||||
}
|
||||
r.store.coalescedMu.Unlock() // L1 release
|
||||
return true
|
||||
}
|
||||
|
||||
func Cockroach10214() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
for i := 0; i < 1000; i++ {
|
||||
go func() {
|
||||
store := &Store_cockroach10214{}
|
||||
responses := &store.coalescedMu.heartbeatResponses
|
||||
*responses = append(*responses, 1, 2)
|
||||
store.mu.replicas = make(map[int]*Replica_cockroach10214)
|
||||
|
||||
rp1 := &Replica_cockroach10214{ // L2,3[0]
|
||||
store: store,
|
||||
}
|
||||
rp2 := &Replica_cockroach10214{ // L2,3[1]
|
||||
store: store,
|
||||
}
|
||||
store.mu.replicas[0] = rp1
|
||||
store.mu.replicas[1] = rp2
|
||||
|
||||
go store.sendQueuedHeartbeats() // G1
|
||||
go rp1.tick() // G2
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// Example of goroutine leak trace:
|
||||
//
|
||||
// G1 G2
|
||||
//------------------------------------------------------------------------------------
|
||||
// s.sendQueuedHeartbeats() .
|
||||
// s.coalescedMu.Lock() [L1] .
|
||||
// s.sendQueuedHeartbeatsToNode() .
|
||||
// s.mu.replicas[0].reportUnreachable() .
|
||||
// s.mu.replicas[0].raftMu.Lock() [L2] .
|
||||
// . s.mu.replicas[0].tick()
|
||||
// . s.mu.replicas[0].raftMu.Lock() [L2]
|
||||
// . s.mu.replicas[0].tickRaftMuLocked()
|
||||
// . s.mu.replicas[0].mu.Lock() [L3]
|
||||
// . s.mu.replicas[0].maybeQuiesceLocked()
|
||||
// . s.mu.replicas[0].maybeCoalesceHeartbeat()
|
||||
// . s.coalescedMu.Lock() [L1]
|
||||
//--------------------------------G1,G2 leak------------------------------------------
|
115
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1055.go
vendored
Normal file
115
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1055.go
vendored
Normal file
|
@ -0,0 +1,115 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Cockroach1055", Cockroach1055)
|
||||
}
|
||||
|
||||
type Stopper_cockroach1055 struct {
|
||||
stopper chan struct{}
|
||||
stop sync.WaitGroup
|
||||
mu sync.Mutex
|
||||
draining int32
|
||||
drain sync.WaitGroup
|
||||
}
|
||||
|
||||
func (s *Stopper_cockroach1055) AddWorker() {
|
||||
s.stop.Add(1)
|
||||
}
|
||||
|
||||
func (s *Stopper_cockroach1055) ShouldStop() <-chan struct{} {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
return s.stopper
|
||||
}
|
||||
|
||||
func (s *Stopper_cockroach1055) SetStopped() {
|
||||
if s != nil {
|
||||
s.stop.Done()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Stopper_cockroach1055) Quiesce() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.draining = 1
|
||||
s.drain.Wait()
|
||||
s.draining = 0
|
||||
}
|
||||
|
||||
func (s *Stopper_cockroach1055) Stop() {
|
||||
s.mu.Lock() // L1
|
||||
defer s.mu.Unlock()
|
||||
atomic.StoreInt32(&s.draining, 1)
|
||||
s.drain.Wait()
|
||||
close(s.stopper)
|
||||
s.stop.Wait()
|
||||
}
|
||||
|
||||
func (s *Stopper_cockroach1055) StartTask() bool {
|
||||
if atomic.LoadInt32(&s.draining) == 0 {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.drain.Add(1)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func NewStopper_cockroach1055() *Stopper_cockroach1055 {
|
||||
return &Stopper_cockroach1055{
|
||||
stopper: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func Cockroach1055() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
for i := 0; i <= 1000; i++ {
|
||||
go func() { // G1
|
||||
var stoppers []*Stopper_cockroach1055
|
||||
for i := 0; i < 2; i++ {
|
||||
stoppers = append(stoppers, NewStopper_cockroach1055())
|
||||
}
|
||||
|
||||
for i := range stoppers {
|
||||
s := stoppers[i]
|
||||
s.AddWorker()
|
||||
go func() { // G2
|
||||
s.StartTask()
|
||||
<-s.ShouldStop()
|
||||
s.SetStopped()
|
||||
}()
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() { // G3
|
||||
for _, s := range stoppers {
|
||||
s.Quiesce()
|
||||
}
|
||||
for _, s := range stoppers {
|
||||
s.Stop()
|
||||
}
|
||||
close(done)
|
||||
}()
|
||||
|
||||
<-done
|
||||
}()
|
||||
}
|
||||
}
|
98
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10790.go
vendored
Normal file
98
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10790.go
vendored
Normal file
|
@ -0,0 +1,98 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: cockroach
|
||||
* Issue or PR : https://github.com/cockroachdb/cockroach/pull/10790
|
||||
* Buggy version: 96b5452557ebe26bd9d85fe7905155009204d893
|
||||
* fix commit-id: f1a5c19125c65129b966fbdc0e6408e8df214aba
|
||||
* Flaky: 28/100
|
||||
* Description:
|
||||
* It is possible that a message from ctxDone will make the function beginCmds
|
||||
* returns without draining the channel ch, so that goroutines created by anonymous
|
||||
* function will leak.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Cockroach10790", Cockroach10790)
|
||||
}
|
||||
|
||||
type Replica_cockroach10790 struct {
|
||||
chans []chan bool
|
||||
}
|
||||
|
||||
func (r *Replica_cockroach10790) beginCmds(ctx context.Context) {
|
||||
ctxDone := ctx.Done()
|
||||
for _, ch := range r.chans {
|
||||
select {
|
||||
case <-ch:
|
||||
case <-ctxDone:
|
||||
go func() { // G3
|
||||
for _, ch := range r.chans {
|
||||
<-ch
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Replica_cockroach10790) sendChans(ctx context.Context) {
|
||||
for _, ch := range r.chans {
|
||||
select {
|
||||
case ch <- true:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func NewReplica_cockroach10790() *Replica_cockroach10790 {
|
||||
r := &Replica_cockroach10790{}
|
||||
r.chans = append(r.chans, make(chan bool), make(chan bool))
|
||||
return r
|
||||
}
|
||||
|
||||
// Example of goroutine leak trace:
|
||||
//
|
||||
// G1 G2 G3 helper goroutine
|
||||
//--------------------------------------------------------------------------------------
|
||||
// . . r.sendChans()
|
||||
// r.beginCmds() . .
|
||||
// . . ch1 <-
|
||||
// <-ch1 <================================================> ch1 <-
|
||||
// . . select [ch2<-, <-ctx.Done()]
|
||||
// . cancel() .
|
||||
// . <<done>> [<-ctx.Done()] ==> return
|
||||
// . <<done>>
|
||||
// go func() [G3] .
|
||||
// . <-ch1
|
||||
// ------------------------------G3 leaks----------------------------------------------
|
||||
//
|
||||
|
||||
func Cockroach10790() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
go func() {
|
||||
r := NewReplica_cockroach10790()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go r.sendChans(ctx) // helper goroutine
|
||||
go r.beginCmds(ctx) // G1
|
||||
go cancel() // G2
|
||||
}()
|
||||
}
|
||||
}
|
82
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13197.go
vendored
Normal file
82
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13197.go
vendored
Normal file
|
@ -0,0 +1,82 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: cockroach
|
||||
* Issue or PR : https://github.com/cockroachdb/cockroach/pull/13197
|
||||
* Buggy version: fff27aedabafe20cef57f75905fe340cab48c2a4
|
||||
* fix commit-id: 9bf770cd8f6eaff5441b80d3aec1a5614e8747e1
|
||||
* Flaky: 100/100
|
||||
* Description: One goroutine executing (*Tx).awaitDone() blocks
|
||||
* waiting for a signal over context.Done() that never comes.
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Cockroach13197", Cockroach13197)
|
||||
}
|
||||
|
||||
type DB_cockroach13197 struct{}
|
||||
|
||||
func (db *DB_cockroach13197) begin(ctx context.Context) *Tx_cockroach13197 {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
tx := &Tx_cockroach13197{
|
||||
cancel: cancel,
|
||||
ctx: ctx,
|
||||
}
|
||||
go tx.awaitDone() // G2
|
||||
return tx
|
||||
}
|
||||
|
||||
type Tx_cockroach13197 struct {
|
||||
cancel context.CancelFunc
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (tx *Tx_cockroach13197) awaitDone() {
|
||||
<-tx.ctx.Done()
|
||||
}
|
||||
|
||||
func (tx *Tx_cockroach13197) Rollback() {
|
||||
tx.rollback()
|
||||
}
|
||||
|
||||
func (tx *Tx_cockroach13197) rollback() {
|
||||
tx.close()
|
||||
}
|
||||
|
||||
func (tx *Tx_cockroach13197) close() {
|
||||
tx.cancel()
|
||||
}
|
||||
|
||||
// Example of goroutine leak trace:
|
||||
//
|
||||
// G1 G2
|
||||
//--------------------------------
|
||||
// begin()
|
||||
// . awaitDone()
|
||||
// <<done>> .
|
||||
// <-tx.ctx.Done()
|
||||
//------------G2 leak-------------
|
||||
|
||||
func Cockroach13197() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
db := &DB_cockroach13197{}
|
||||
db.begin(context.Background()) // G1
|
||||
}
|
66
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13755.go
vendored
Normal file
66
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13755.go
vendored
Normal file
|
@ -0,0 +1,66 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: cockroach
|
||||
* Issue or PR : https://github.com/cockroachdb/cockroach/pull/13755
|
||||
* Buggy version: 7acb881bbb8f23e87b69fce9568d9a3316b5259c
|
||||
* fix commit-id: ef906076adc1d0e3721944829cfedfed51810088
|
||||
* Flaky: 100/100
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Cockroach13755", Cockroach13755)
|
||||
}
|
||||
|
||||
type Rows_cockroach13755 struct {
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
func (rs *Rows_cockroach13755) initContextClose(ctx context.Context) {
|
||||
ctx, rs.cancel = context.WithCancel(ctx)
|
||||
go rs.awaitDone(ctx)
|
||||
}
|
||||
|
||||
func (rs *Rows_cockroach13755) awaitDone(ctx context.Context) {
|
||||
<-ctx.Done()
|
||||
rs.close(ctx.Err())
|
||||
}
|
||||
|
||||
func (rs *Rows_cockroach13755) close(err error) {
|
||||
rs.cancel()
|
||||
}
|
||||
|
||||
// Example of goroutine leak trace:
|
||||
//
|
||||
// G1 G2
|
||||
//----------------------------------------
|
||||
// initContextClose()
|
||||
// . awaitDone()
|
||||
// <<done>> .
|
||||
// <-tx.ctx.Done()
|
||||
//----------------G2 leak-----------------
|
||||
|
||||
func Cockroach13755() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
rs := &Rows_cockroach13755{}
|
||||
rs.initContextClose(context.Background())
|
||||
}
|
167
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1462.go
vendored
Normal file
167
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1462.go
vendored
Normal file
|
@ -0,0 +1,167 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Cockroach1462", Cockroach1462)
|
||||
}
|
||||
|
||||
type Stopper_cockroach1462 struct {
|
||||
stopper chan struct{}
|
||||
stopped chan struct{}
|
||||
stop sync.WaitGroup
|
||||
mu sync.Mutex
|
||||
drain *sync.Cond
|
||||
draining bool
|
||||
numTasks int
|
||||
}
|
||||
|
||||
func NewStopper_cockroach1462() *Stopper_cockroach1462 {
|
||||
s := &Stopper_cockroach1462{
|
||||
stopper: make(chan struct{}),
|
||||
stopped: make(chan struct{}),
|
||||
}
|
||||
s.drain = sync.NewCond(&s.mu)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Stopper_cockroach1462) RunWorker(f func()) {
|
||||
s.AddWorker()
|
||||
go func() { // G2, G3
|
||||
defer s.SetStopped()
|
||||
f()
|
||||
}()
|
||||
}
|
||||
|
||||
func (s *Stopper_cockroach1462) AddWorker() {
|
||||
s.stop.Add(1)
|
||||
}
|
||||
func (s *Stopper_cockroach1462) StartTask() bool {
|
||||
s.mu.Lock()
|
||||
runtime.Gosched()
|
||||
defer s.mu.Unlock()
|
||||
if s.draining {
|
||||
return false
|
||||
}
|
||||
s.numTasks++
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *Stopper_cockroach1462) FinishTask() {
|
||||
s.mu.Lock()
|
||||
runtime.Gosched()
|
||||
defer s.mu.Unlock()
|
||||
s.numTasks--
|
||||
s.drain.Broadcast()
|
||||
}
|
||||
func (s *Stopper_cockroach1462) SetStopped() {
|
||||
if s != nil {
|
||||
s.stop.Done()
|
||||
}
|
||||
}
|
||||
func (s *Stopper_cockroach1462) ShouldStop() <-chan struct{} {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
return s.stopper
|
||||
}
|
||||
|
||||
func (s *Stopper_cockroach1462) Quiesce() {
|
||||
s.mu.Lock()
|
||||
runtime.Gosched()
|
||||
defer s.mu.Unlock()
|
||||
s.draining = true
|
||||
for s.numTasks > 0 {
|
||||
// Unlock s.mu, wait for the signal, and lock s.mu.
|
||||
s.drain.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Stopper_cockroach1462) Stop() {
|
||||
s.Quiesce()
|
||||
close(s.stopper)
|
||||
s.stop.Wait()
|
||||
s.mu.Lock()
|
||||
runtime.Gosched()
|
||||
defer s.mu.Unlock()
|
||||
close(s.stopped)
|
||||
}
|
||||
|
||||
type interceptMessage_cockroach1462 int
|
||||
|
||||
type localInterceptableTransport_cockroach1462 struct {
|
||||
mu sync.Mutex
|
||||
Events chan interceptMessage_cockroach1462
|
||||
stopper *Stopper_cockroach1462
|
||||
}
|
||||
|
||||
func (lt *localInterceptableTransport_cockroach1462) Close() {}
|
||||
|
||||
type Transport_cockroach1462 interface {
|
||||
Close()
|
||||
}
|
||||
|
||||
func NewLocalInterceptableTransport_cockroach1462(stopper *Stopper_cockroach1462) Transport_cockroach1462 {
|
||||
lt := &localInterceptableTransport_cockroach1462{
|
||||
Events: make(chan interceptMessage_cockroach1462),
|
||||
stopper: stopper,
|
||||
}
|
||||
lt.start()
|
||||
return lt
|
||||
}
|
||||
|
||||
func (lt *localInterceptableTransport_cockroach1462) start() {
|
||||
lt.stopper.RunWorker(func() {
|
||||
for {
|
||||
select {
|
||||
case <-lt.stopper.ShouldStop():
|
||||
return
|
||||
default:
|
||||
lt.Events <- interceptMessage_cockroach1462(0)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func processEventsUntil_cockroach1462(ch <-chan interceptMessage_cockroach1462, stopper *Stopper_cockroach1462) {
|
||||
for {
|
||||
select {
|
||||
case _, ok := <-ch:
|
||||
runtime.Gosched()
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
case <-stopper.ShouldStop():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Cockroach1462() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(2000 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
for i := 0; i <= 1000; i++ {
|
||||
go func() { // G1
|
||||
stopper := NewStopper_cockroach1462()
|
||||
transport := NewLocalInterceptableTransport_cockroach1462(stopper).(*localInterceptableTransport_cockroach1462)
|
||||
stopper.RunWorker(func() {
|
||||
processEventsUntil_cockroach1462(transport.Events, stopper)
|
||||
})
|
||||
stopper.Stop()
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
108
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach16167.go
vendored
Normal file
108
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach16167.go
vendored
Normal file
|
@ -0,0 +1,108 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: cockroach
|
||||
* Issue or PR : https://github.com/cockroachdb/cockroach/pull/16167
|
||||
* Buggy version: 36fa784aa846b46c29e077634c4e362635f6e74a
|
||||
* fix commit-id: d064942b067ab84628f79cbfda001fa3138d8d6e
|
||||
* Flaky: 1/100
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Cockroach16167", Cockroach16167)
|
||||
}
|
||||
|
||||
type PreparedStatements_cockroach16167 struct {
|
||||
session *Session_cockroach16167
|
||||
}
|
||||
|
||||
func (ps PreparedStatements_cockroach16167) New(e *Executor_cockroach16167) {
|
||||
e.Prepare(ps.session)
|
||||
}
|
||||
|
||||
type Session_cockroach16167 struct {
|
||||
PreparedStatements PreparedStatements_cockroach16167
|
||||
}
|
||||
|
||||
func (s *Session_cockroach16167) resetForBatch(e *Executor_cockroach16167) {
|
||||
e.getDatabaseCache()
|
||||
}
|
||||
|
||||
type Executor_cockroach16167 struct {
|
||||
systemConfigCond *sync.Cond
|
||||
systemConfigMu sync.RWMutex // L1
|
||||
}
|
||||
|
||||
func (e *Executor_cockroach16167) Start() {
|
||||
e.updateSystemConfig()
|
||||
}
|
||||
|
||||
func (e *Executor_cockroach16167) execParsed(session *Session_cockroach16167) {
|
||||
e.systemConfigCond.L.Lock() // Same as e.systemConfigMu.RLock()
|
||||
runtime.Gosched()
|
||||
defer e.systemConfigCond.L.Unlock()
|
||||
runTxnAttempt_cockroach16167(e, session)
|
||||
}
|
||||
|
||||
func (e *Executor_cockroach16167) execStmtsInCurrentTxn(session *Session_cockroach16167) {
|
||||
e.execStmtInOpenTxn(session)
|
||||
}
|
||||
|
||||
func (e *Executor_cockroach16167) execStmtInOpenTxn(session *Session_cockroach16167) {
|
||||
session.PreparedStatements.New(e)
|
||||
}
|
||||
|
||||
func (e *Executor_cockroach16167) Prepare(session *Session_cockroach16167) {
|
||||
session.resetForBatch(e)
|
||||
}
|
||||
|
||||
func (e *Executor_cockroach16167) getDatabaseCache() {
|
||||
e.systemConfigMu.RLock()
|
||||
defer e.systemConfigMu.RUnlock()
|
||||
}
|
||||
|
||||
func (e *Executor_cockroach16167) updateSystemConfig() {
|
||||
e.systemConfigMu.Lock()
|
||||
runtime.Gosched()
|
||||
defer e.systemConfigMu.Unlock()
|
||||
}
|
||||
|
||||
func runTxnAttempt_cockroach16167(e *Executor_cockroach16167, session *Session_cockroach16167) {
|
||||
e.execStmtsInCurrentTxn(session)
|
||||
}
|
||||
|
||||
func NewExectorAndSession_cockroach16167() (*Executor_cockroach16167, *Session_cockroach16167) {
|
||||
session := &Session_cockroach16167{}
|
||||
session.PreparedStatements = PreparedStatements_cockroach16167{session}
|
||||
e := &Executor_cockroach16167{}
|
||||
return e, session
|
||||
}
|
||||
|
||||
func Cockroach16167() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
go func() { // G1
|
||||
e, s := NewExectorAndSession_cockroach16167()
|
||||
e.systemConfigCond = sync.NewCond(e.systemConfigMu.RLocker())
|
||||
go e.Start() // G2
|
||||
e.execParsed(s)
|
||||
}()
|
||||
}
|
||||
}
|
60
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach18101.go
vendored
Normal file
60
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach18101.go
vendored
Normal file
|
@ -0,0 +1,60 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: cockroach
|
||||
* Issue or PR : https://github.com/cockroachdb/cockroach/pull/18101
|
||||
* Buggy version: f7a8e2f57b6bcf00b9abaf3da00598e4acd3a57f
|
||||
* fix commit-id: 822bd176cc725c6b50905ea615023200b395e14f
|
||||
* Flaky: 100/100
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Cockroach18101", Cockroach18101)
|
||||
}
|
||||
|
||||
const chanSize_cockroach18101 = 6
|
||||
|
||||
func restore_cockroach18101(ctx context.Context) bool {
|
||||
readyForImportCh := make(chan bool, chanSize_cockroach18101)
|
||||
go func() { // G2
|
||||
defer close(readyForImportCh)
|
||||
splitAndScatter_cockroach18101(ctx, readyForImportCh)
|
||||
}()
|
||||
for readyForImportSpan := range readyForImportCh {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return readyForImportSpan
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func splitAndScatter_cockroach18101(ctx context.Context, readyForImportCh chan bool) {
|
||||
for i := 0; i < chanSize_cockroach18101+2; i++ {
|
||||
readyForImportCh <- (false || i != 0)
|
||||
}
|
||||
}
|
||||
|
||||
func Cockroach18101() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
for i := 0; i < 100; i++ {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go restore_cockroach18101(ctx) // G1
|
||||
go cancel() // helper goroutine
|
||||
}
|
||||
}
|
125
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach2448.go
vendored
Normal file
125
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach2448.go
vendored
Normal file
|
@ -0,0 +1,125 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Cockroach2448", Cockroach2448)
|
||||
}
|
||||
|
||||
type Stopper_cockroach2448 struct {
|
||||
Done chan bool
|
||||
}
|
||||
|
||||
func (s *Stopper_cockroach2448) ShouldStop() <-chan bool {
|
||||
return s.Done
|
||||
}
|
||||
|
||||
type EventMembershipChangeCommitted_cockroach2448 struct {
|
||||
Callback func()
|
||||
}
|
||||
|
||||
type MultiRaft_cockroach2448 struct {
|
||||
stopper *Stopper_cockroach2448
|
||||
Events chan interface{}
|
||||
callbackChan chan func()
|
||||
}
|
||||
|
||||
// sendEvent can be invoked many times
|
||||
func (m *MultiRaft_cockroach2448) sendEvent(event interface{}) {
|
||||
select {
|
||||
case m.Events <- event: // Waiting for events consumption
|
||||
case <-m.stopper.ShouldStop():
|
||||
}
|
||||
}
|
||||
|
||||
type state_cockroach2448 struct {
|
||||
*MultiRaft_cockroach2448
|
||||
}
|
||||
|
||||
func (s *state_cockroach2448) start() {
|
||||
for {
|
||||
select {
|
||||
case <-s.stopper.ShouldStop():
|
||||
return
|
||||
case cb := <-s.callbackChan:
|
||||
cb()
|
||||
default:
|
||||
s.handleWriteResponse()
|
||||
time.Sleep(100 * time.Microsecond)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state_cockroach2448) handleWriteResponse() {
|
||||
s.sendEvent(&EventMembershipChangeCommitted_cockroach2448{
|
||||
Callback: func() {
|
||||
select {
|
||||
case s.callbackChan <- func() { // Waiting for callbackChan consumption
|
||||
time.Sleep(time.Nanosecond)
|
||||
}:
|
||||
case <-s.stopper.ShouldStop():
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
type Store_cockroach2448 struct {
|
||||
multiraft *MultiRaft_cockroach2448
|
||||
}
|
||||
|
||||
func (s *Store_cockroach2448) processRaft() {
|
||||
for {
|
||||
select {
|
||||
case e := <-s.multiraft.Events:
|
||||
switch e := e.(type) {
|
||||
case *EventMembershipChangeCommitted_cockroach2448:
|
||||
callback := e.Callback
|
||||
runtime.Gosched()
|
||||
if callback != nil {
|
||||
callback() // Waiting for callbackChan consumption
|
||||
}
|
||||
}
|
||||
case <-s.multiraft.stopper.ShouldStop():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func NewStoreAndState_cockroach2448() (*Store_cockroach2448, *state_cockroach2448) {
|
||||
stopper := &Stopper_cockroach2448{
|
||||
Done: make(chan bool),
|
||||
}
|
||||
mltrft := &MultiRaft_cockroach2448{
|
||||
stopper: stopper,
|
||||
Events: make(chan interface{}),
|
||||
callbackChan: make(chan func()),
|
||||
}
|
||||
st := &state_cockroach2448{mltrft}
|
||||
s := &Store_cockroach2448{mltrft}
|
||||
return s, st
|
||||
}
|
||||
|
||||
func Cockroach2448() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
for i := 0; i < 1000; i++ {
|
||||
go func() {
|
||||
s, st := NewStoreAndState_cockroach2448()
|
||||
go s.processRaft() // G1
|
||||
go st.start() // G2
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
78
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach24808.go
vendored
Normal file
78
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach24808.go
vendored
Normal file
|
@ -0,0 +1,78 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Cockroach24808", Cockroach24808)
|
||||
}
|
||||
|
||||
type Compactor_cockroach24808 struct {
|
||||
ch chan struct{}
|
||||
}
|
||||
|
||||
type Stopper_cockroach24808 struct {
|
||||
stop sync.WaitGroup
|
||||
stopper chan struct{}
|
||||
}
|
||||
|
||||
func (s *Stopper_cockroach24808) RunWorker(ctx context.Context, f func(context.Context)) {
|
||||
s.stop.Add(1)
|
||||
go func() {
|
||||
defer s.stop.Done()
|
||||
f(ctx)
|
||||
}()
|
||||
}
|
||||
|
||||
func (s *Stopper_cockroach24808) ShouldStop() <-chan struct{} {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
return s.stopper
|
||||
}
|
||||
|
||||
func (s *Stopper_cockroach24808) Stop() {
|
||||
close(s.stopper)
|
||||
}
|
||||
|
||||
func (c *Compactor_cockroach24808) Start(ctx context.Context, stopper *Stopper_cockroach24808) {
|
||||
c.ch <- struct{}{}
|
||||
stopper.RunWorker(ctx, func(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-stopper.ShouldStop():
|
||||
return
|
||||
case <-c.ch:
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func Cockroach24808() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
go func() { // G1
|
||||
stopper := &Stopper_cockroach24808{stopper: make(chan struct{})}
|
||||
defer stopper.Stop()
|
||||
|
||||
compactor := &Compactor_cockroach24808{ch: make(chan struct{}, 1)}
|
||||
compactor.ch <- struct{}{}
|
||||
|
||||
compactor.Start(context.Background(), stopper)
|
||||
}()
|
||||
}
|
92
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach25456.go
vendored
Normal file
92
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach25456.go
vendored
Normal file
|
@ -0,0 +1,92 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Cockroach25456", Cockroach25456)
|
||||
}
|
||||
|
||||
type Stopper_cockroach25456 struct {
|
||||
quiescer chan struct{}
|
||||
}
|
||||
|
||||
func (s *Stopper_cockroach25456) ShouldQuiesce() <-chan struct{} {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
return s.quiescer
|
||||
}
|
||||
|
||||
func NewStopper_cockroach25456() *Stopper_cockroach25456 {
|
||||
return &Stopper_cockroach25456{quiescer: make(chan struct{})}
|
||||
}
|
||||
|
||||
type Store_cockroach25456 struct {
|
||||
stopper *Stopper_cockroach25456
|
||||
consistencyQueue *consistencyQueue_cockroach25456
|
||||
}
|
||||
|
||||
func (s *Store_cockroach25456) Stopper() *Stopper_cockroach25456 {
|
||||
return s.stopper
|
||||
}
|
||||
|
||||
type Replica_cockroach25456 struct {
|
||||
store *Store_cockroach25456
|
||||
}
|
||||
|
||||
func NewReplica_cockroach25456(store *Store_cockroach25456) *Replica_cockroach25456 {
|
||||
return &Replica_cockroach25456{store: store}
|
||||
}
|
||||
|
||||
type consistencyQueue_cockroach25456 struct{}
|
||||
|
||||
func (q *consistencyQueue_cockroach25456) process(repl *Replica_cockroach25456) {
|
||||
<-repl.store.Stopper().ShouldQuiesce()
|
||||
}
|
||||
|
||||
func newConsistencyQueue_cockroach25456() *consistencyQueue_cockroach25456 {
|
||||
return &consistencyQueue_cockroach25456{}
|
||||
}
|
||||
|
||||
type testContext_cockroach25456 struct {
|
||||
store *Store_cockroach25456
|
||||
repl *Replica_cockroach25456
|
||||
}
|
||||
|
||||
func (tc *testContext_cockroach25456) StartWithStoreConfig(stopper *Stopper_cockroach25456) {
|
||||
if tc.store == nil {
|
||||
tc.store = &Store_cockroach25456{
|
||||
consistencyQueue: newConsistencyQueue_cockroach25456(),
|
||||
}
|
||||
}
|
||||
tc.store.stopper = stopper
|
||||
tc.repl = NewReplica_cockroach25456(tc.store)
|
||||
}
|
||||
|
||||
func Cockroach25456() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
go func() { // G1
|
||||
stopper := NewStopper_cockroach25456()
|
||||
tc := testContext_cockroach25456{}
|
||||
tc.StartWithStoreConfig(stopper)
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
tc.store.consistencyQueue.process(tc.repl)
|
||||
}
|
||||
}()
|
||||
}
|
124
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35073.go
vendored
Normal file
124
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35073.go
vendored
Normal file
|
@ -0,0 +1,124 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Cockroach35073", Cockroach35073)
|
||||
}
|
||||
|
||||
type ConsumerStatus_cockroach35073 uint32
|
||||
|
||||
const (
|
||||
NeedMoreRows_cockroach35073 ConsumerStatus_cockroach35073 = iota
|
||||
DrainRequested_cockroach35073
|
||||
ConsumerClosed_cockroach35073
|
||||
)
|
||||
|
||||
const rowChannelBufSize_cockroach35073 = 16
|
||||
const outboxBufRows_cockroach35073 = 16
|
||||
|
||||
type rowSourceBase_cockroach35073 struct {
|
||||
consumerStatus ConsumerStatus_cockroach35073
|
||||
}
|
||||
|
||||
func (rb *rowSourceBase_cockroach35073) consumerClosed() {
|
||||
atomic.StoreUint32((*uint32)(&rb.consumerStatus), uint32(ConsumerClosed_cockroach35073))
|
||||
}
|
||||
|
||||
type RowChannelMsg_cockroach35073 int
|
||||
|
||||
type RowChannel_cockroach35073 struct {
|
||||
rowSourceBase_cockroach35073
|
||||
dataChan chan RowChannelMsg_cockroach35073
|
||||
}
|
||||
|
||||
func (rc *RowChannel_cockroach35073) ConsumerClosed() {
|
||||
rc.consumerClosed()
|
||||
select {
|
||||
case <-rc.dataChan:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *RowChannel_cockroach35073) Push() ConsumerStatus_cockroach35073 {
|
||||
consumerStatus := ConsumerStatus_cockroach35073(
|
||||
atomic.LoadUint32((*uint32)(&rc.consumerStatus)))
|
||||
switch consumerStatus {
|
||||
case NeedMoreRows_cockroach35073:
|
||||
rc.dataChan <- RowChannelMsg_cockroach35073(0)
|
||||
case DrainRequested_cockroach35073:
|
||||
case ConsumerClosed_cockroach35073:
|
||||
}
|
||||
return consumerStatus
|
||||
}
|
||||
|
||||
func (rc *RowChannel_cockroach35073) InitWithNumSenders() {
|
||||
rc.initWithBufSizeAndNumSenders(rowChannelBufSize_cockroach35073)
|
||||
}
|
||||
|
||||
func (rc *RowChannel_cockroach35073) initWithBufSizeAndNumSenders(chanBufSize int) {
|
||||
rc.dataChan = make(chan RowChannelMsg_cockroach35073, chanBufSize)
|
||||
}
|
||||
|
||||
type outbox_cockroach35073 struct {
|
||||
RowChannel_cockroach35073
|
||||
}
|
||||
|
||||
func (m *outbox_cockroach35073) init() {
|
||||
m.RowChannel_cockroach35073.InitWithNumSenders()
|
||||
}
|
||||
|
||||
func (m *outbox_cockroach35073) start(wg *sync.WaitGroup) {
|
||||
if wg != nil {
|
||||
wg.Add(1)
|
||||
}
|
||||
go m.run(wg)
|
||||
}
|
||||
|
||||
func (m *outbox_cockroach35073) run(wg *sync.WaitGroup) {
|
||||
if wg != nil {
|
||||
wg.Done()
|
||||
}
|
||||
}
|
||||
|
||||
func Cockroach35073() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
go func() {
|
||||
outbox := &outbox_cockroach35073{}
|
||||
outbox.init()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < outboxBufRows_cockroach35073; i++ {
|
||||
outbox.Push()
|
||||
}
|
||||
|
||||
var blockedPusherWg sync.WaitGroup
|
||||
blockedPusherWg.Add(1)
|
||||
go func() {
|
||||
outbox.Push()
|
||||
blockedPusherWg.Done()
|
||||
}()
|
||||
|
||||
outbox.start(&wg)
|
||||
|
||||
wg.Wait()
|
||||
outbox.RowChannel_cockroach35073.Push()
|
||||
}()
|
||||
}
|
135
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35931.go
vendored
Normal file
135
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35931.go
vendored
Normal file
|
@ -0,0 +1,135 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Cockroach35931", Cockroach35931)
|
||||
}
|
||||
|
||||
type RowReceiver_cockroach35931 interface {
|
||||
Push()
|
||||
}
|
||||
|
||||
type inboundStreamInfo_cockroach35931 struct {
|
||||
receiver RowReceiver_cockroach35931
|
||||
}
|
||||
|
||||
type RowChannel_cockroach35931 struct {
|
||||
dataChan chan struct{}
|
||||
}
|
||||
|
||||
func (rc *RowChannel_cockroach35931) Push() {
|
||||
// The buffer size can be either 0 or 1 when this function is entered.
|
||||
// We need context sensitivity or a path-condition on the buffer size
|
||||
// to find this bug.
|
||||
rc.dataChan <- struct{}{}
|
||||
}
|
||||
|
||||
func (rc *RowChannel_cockroach35931) initWithBufSizeAndNumSenders(chanBufSize int) {
|
||||
rc.dataChan = make(chan struct{}, chanBufSize)
|
||||
}
|
||||
|
||||
type flowEntry_cockroach35931 struct {
|
||||
flow *Flow_cockroach35931
|
||||
inboundStreams map[int]*inboundStreamInfo_cockroach35931
|
||||
}
|
||||
|
||||
type flowRegistry_cockroach35931 struct {
|
||||
sync.Mutex
|
||||
flows map[int]*flowEntry_cockroach35931
|
||||
}
|
||||
|
||||
func (fr *flowRegistry_cockroach35931) getEntryLocked(id int) *flowEntry_cockroach35931 {
|
||||
entry, ok := fr.flows[id]
|
||||
if !ok {
|
||||
entry = &flowEntry_cockroach35931{}
|
||||
fr.flows[id] = entry
|
||||
}
|
||||
return entry
|
||||
}
|
||||
|
||||
func (fr *flowRegistry_cockroach35931) cancelPendingStreamsLocked(id int) []RowReceiver_cockroach35931 {
|
||||
entry := fr.flows[id]
|
||||
pendingReceivers := make([]RowReceiver_cockroach35931, 0)
|
||||
for _, is := range entry.inboundStreams {
|
||||
pendingReceivers = append(pendingReceivers, is.receiver)
|
||||
}
|
||||
return pendingReceivers
|
||||
}
|
||||
|
||||
type Flow_cockroach35931 struct {
|
||||
id int
|
||||
flowRegistry *flowRegistry_cockroach35931
|
||||
inboundStreams map[int]*inboundStreamInfo_cockroach35931
|
||||
}
|
||||
|
||||
func (f *Flow_cockroach35931) cancel() {
|
||||
f.flowRegistry.Lock()
|
||||
timedOutReceivers := f.flowRegistry.cancelPendingStreamsLocked(f.id)
|
||||
f.flowRegistry.Unlock()
|
||||
|
||||
for _, receiver := range timedOutReceivers {
|
||||
receiver.Push()
|
||||
}
|
||||
}
|
||||
|
||||
func (fr *flowRegistry_cockroach35931) RegisterFlow(f *Flow_cockroach35931, inboundStreams map[int]*inboundStreamInfo_cockroach35931) {
|
||||
entry := fr.getEntryLocked(f.id)
|
||||
entry.flow = f
|
||||
entry.inboundStreams = inboundStreams
|
||||
}
|
||||
|
||||
func makeFlowRegistry_cockroach35931() *flowRegistry_cockroach35931 {
|
||||
return &flowRegistry_cockroach35931{
|
||||
flows: make(map[int]*flowEntry_cockroach35931),
|
||||
}
|
||||
}
|
||||
|
||||
func Cockroach35931() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
go func() {
|
||||
fr := makeFlowRegistry_cockroach35931()
|
||||
|
||||
left := &RowChannel_cockroach35931{}
|
||||
left.initWithBufSizeAndNumSenders(1)
|
||||
right := &RowChannel_cockroach35931{}
|
||||
right.initWithBufSizeAndNumSenders(1)
|
||||
|
||||
inboundStreams := map[int]*inboundStreamInfo_cockroach35931{
|
||||
0: {
|
||||
receiver: left,
|
||||
},
|
||||
1: {
|
||||
receiver: right,
|
||||
},
|
||||
}
|
||||
|
||||
left.Push()
|
||||
|
||||
flow := &Flow_cockroach35931{
|
||||
id: 0,
|
||||
flowRegistry: fr,
|
||||
inboundStreams: inboundStreams,
|
||||
}
|
||||
|
||||
fr.RegisterFlow(flow, inboundStreams)
|
||||
|
||||
flow.cancel()
|
||||
}()
|
||||
}
|
122
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach3710.go
vendored
Normal file
122
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach3710.go
vendored
Normal file
|
@ -0,0 +1,122 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: cockroach
|
||||
* Issue or PR : https://github.com/cockroachdb/cockroach/pull/3710
|
||||
* Buggy version: 4afdd4860fd7c3bd9e92489f84a95e5cc7d11a0d
|
||||
* fix commit-id: cb65190f9caaf464723e7d072b1f1b69a044ef7b
|
||||
* Flaky: 2/100
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Cockroach3710", Cockroach3710)
|
||||
}
|
||||
|
||||
type Store_cockroach3710 struct {
|
||||
raftLogQueue *baseQueue
|
||||
replicas map[int]*Replica_cockroach3710
|
||||
|
||||
mu struct {
|
||||
sync.RWMutex
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Store_cockroach3710) ForceRaftLogScanAndProcess() {
|
||||
s.mu.RLock()
|
||||
runtime.Gosched()
|
||||
for _, r := range s.replicas {
|
||||
s.raftLogQueue.MaybeAdd(r)
|
||||
}
|
||||
s.mu.RUnlock()
|
||||
}
|
||||
|
||||
func (s *Store_cockroach3710) RaftStatus() {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
}
|
||||
|
||||
func (s *Store_cockroach3710) processRaft() {
|
||||
go func() {
|
||||
for {
|
||||
var replicas []*Replica_cockroach3710
|
||||
s.mu.Lock()
|
||||
for _, r := range s.replicas {
|
||||
replicas = append(replicas, r)
|
||||
}
|
||||
s.mu.Unlock()
|
||||
break
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
type Replica_cockroach3710 struct {
|
||||
store *Store_cockroach3710
|
||||
}
|
||||
|
||||
type baseQueue struct {
|
||||
sync.Mutex
|
||||
impl *raftLogQueue
|
||||
}
|
||||
|
||||
func (bq *baseQueue) MaybeAdd(repl *Replica_cockroach3710) {
|
||||
bq.Lock()
|
||||
defer bq.Unlock()
|
||||
bq.impl.shouldQueue(repl)
|
||||
}
|
||||
|
||||
type raftLogQueue struct{}
|
||||
|
||||
func (*raftLogQueue) shouldQueue(r *Replica_cockroach3710) {
|
||||
getTruncatableIndexes(r)
|
||||
}
|
||||
|
||||
func getTruncatableIndexes(r *Replica_cockroach3710) {
|
||||
r.store.RaftStatus()
|
||||
}
|
||||
|
||||
func NewStore_cockroach3710() *Store_cockroach3710 {
|
||||
rlq := &raftLogQueue{}
|
||||
bq := &baseQueue{impl: rlq}
|
||||
store := &Store_cockroach3710{
|
||||
raftLogQueue: bq,
|
||||
replicas: make(map[int]*Replica_cockroach3710),
|
||||
}
|
||||
r1 := &Replica_cockroach3710{store}
|
||||
r2 := &Replica_cockroach3710{store}
|
||||
|
||||
makeKey := func(r *Replica_cockroach3710) int {
|
||||
return int((uintptr(unsafe.Pointer(r)) >> 1) % 7)
|
||||
}
|
||||
store.replicas[makeKey(r1)] = r1
|
||||
store.replicas[makeKey(r2)] = r2
|
||||
|
||||
return store
|
||||
}
|
||||
|
||||
func Cockroach3710() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
for i := 0; i < 10000; i++ {
|
||||
go func() {
|
||||
store := NewStore_cockroach3710()
|
||||
go store.ForceRaftLogScanAndProcess() // G1
|
||||
go store.processRaft() // G2
|
||||
}()
|
||||
}
|
||||
}
|
62
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach584.go
vendored
Normal file
62
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach584.go
vendored
Normal file
|
@ -0,0 +1,62 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Cockroach584", Cockroach584)
|
||||
}
|
||||
|
||||
type gossip_cockroach584 struct {
|
||||
mu sync.Mutex // L1
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (g *gossip_cockroach584) bootstrap() {
|
||||
for {
|
||||
g.mu.Lock()
|
||||
if g.closed {
|
||||
// Missing g.mu.Unlock
|
||||
break
|
||||
}
|
||||
g.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (g *gossip_cockroach584) manage() {
|
||||
for {
|
||||
g.mu.Lock()
|
||||
if g.closed {
|
||||
// Missing g.mu.Unlock
|
||||
break
|
||||
}
|
||||
g.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func Cockroach584() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
g := &gossip_cockroach584{
|
||||
closed: true,
|
||||
}
|
||||
go func() { // G1
|
||||
g.bootstrap()
|
||||
g.manage()
|
||||
}()
|
||||
}
|
87
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach6181.go
vendored
Normal file
87
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach6181.go
vendored
Normal file
|
@ -0,0 +1,87 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: cockroach
|
||||
* Issue or PR : https://github.com/cockroachdb/cockroach/pull/6181
|
||||
* Buggy version: c0a232b5521565904b851699853bdbd0c670cf1e
|
||||
* fix commit-id: d5814e4886a776bf7789b3c51b31f5206480d184
|
||||
* Flaky: 57/100
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Cockroach6181", Cockroach6181)
|
||||
}
|
||||
|
||||
type testDescriptorDB_cockroach6181 struct {
|
||||
cache *rangeDescriptorCache_cockroach6181
|
||||
}
|
||||
|
||||
func initTestDescriptorDB_cockroach6181() *testDescriptorDB_cockroach6181 {
|
||||
return &testDescriptorDB_cockroach6181{&rangeDescriptorCache_cockroach6181{}}
|
||||
}
|
||||
|
||||
type rangeDescriptorCache_cockroach6181 struct {
|
||||
rangeCacheMu sync.RWMutex
|
||||
}
|
||||
|
||||
func (rdc *rangeDescriptorCache_cockroach6181) LookupRangeDescriptor() {
|
||||
rdc.rangeCacheMu.RLock()
|
||||
runtime.Gosched()
|
||||
io.Discard.Write([]byte(rdc.String()))
|
||||
rdc.rangeCacheMu.RUnlock()
|
||||
rdc.rangeCacheMu.Lock()
|
||||
rdc.rangeCacheMu.Unlock()
|
||||
}
|
||||
|
||||
func (rdc *rangeDescriptorCache_cockroach6181) String() string {
|
||||
rdc.rangeCacheMu.RLock()
|
||||
defer rdc.rangeCacheMu.RUnlock()
|
||||
return rdc.stringLocked()
|
||||
}
|
||||
|
||||
func (rdc *rangeDescriptorCache_cockroach6181) stringLocked() string {
|
||||
return "something here"
|
||||
}
|
||||
|
||||
func doLookupWithToken_cockroach6181(rc *rangeDescriptorCache_cockroach6181) {
|
||||
rc.LookupRangeDescriptor()
|
||||
}
|
||||
|
||||
func testRangeCacheCoalescedRequests_cockroach6181() {
|
||||
db := initTestDescriptorDB_cockroach6181()
|
||||
pauseLookupResumeAndAssert := func() {
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 3; i++ {
|
||||
wg.Add(1)
|
||||
go func() { // G2,G3,...
|
||||
doLookupWithToken_cockroach6181(db.cache)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
pauseLookupResumeAndAssert()
|
||||
}
|
||||
|
||||
func Cockroach6181() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
for i := 0; i < 100; i++ {
|
||||
go testRangeCacheCoalescedRequests_cockroach6181() // G1
|
||||
}
|
||||
}
|
183
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach7504.go
vendored
Normal file
183
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach7504.go
vendored
Normal file
|
@ -0,0 +1,183 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: cockroach
|
||||
* Issue or PR : https://github.com/cockroachdb/cockroach/pull/7504
|
||||
* Buggy version: bc963b438cdc3e0ad058a5282358e5aee0595e17
|
||||
* fix commit-id: cab761b9f5ee5dee1448bc5d6b1d9f5a0ff0bad5
|
||||
* Flaky: 1/100
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Cockroach7504", Cockroach7504)
|
||||
}
|
||||
|
||||
func MakeCacheKey_cockroach7504(lease *LeaseState_cockroach7504) int {
|
||||
return lease.id
|
||||
}
|
||||
|
||||
type LeaseState_cockroach7504 struct {
|
||||
mu sync.Mutex // L1
|
||||
id int
|
||||
}
|
||||
type LeaseSet_cockroach7504 struct {
|
||||
data []*LeaseState_cockroach7504
|
||||
}
|
||||
|
||||
func (l *LeaseSet_cockroach7504) find(id int) *LeaseState_cockroach7504 {
|
||||
return l.data[id]
|
||||
}
|
||||
|
||||
func (l *LeaseSet_cockroach7504) remove(s *LeaseState_cockroach7504) {
|
||||
for i := 0; i < len(l.data); i++ {
|
||||
if s == l.data[i] {
|
||||
l.data = append(l.data[:i], l.data[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type tableState_cockroach7504 struct {
|
||||
tableNameCache *tableNameCache_cockroach7504
|
||||
mu sync.Mutex // L3
|
||||
active *LeaseSet_cockroach7504
|
||||
}
|
||||
|
||||
func (t *tableState_cockroach7504) release(lease *LeaseState_cockroach7504) {
|
||||
t.mu.Lock() // L3
|
||||
defer t.mu.Unlock() // L3
|
||||
|
||||
s := t.active.find(MakeCacheKey_cockroach7504(lease))
|
||||
s.mu.Lock() // L1
|
||||
runtime.Gosched()
|
||||
defer s.mu.Unlock() // L1
|
||||
|
||||
t.removeLease(s)
|
||||
}
|
||||
func (t *tableState_cockroach7504) removeLease(lease *LeaseState_cockroach7504) {
|
||||
t.active.remove(lease)
|
||||
t.tableNameCache.remove(lease) // L1 acquire/release
|
||||
}
|
||||
|
||||
type tableNameCache_cockroach7504 struct {
|
||||
mu sync.Mutex // L2
|
||||
tables map[int]*LeaseState_cockroach7504
|
||||
}
|
||||
|
||||
func (c *tableNameCache_cockroach7504) get(id int) {
|
||||
c.mu.Lock() // L2
|
||||
defer c.mu.Unlock() // L2
|
||||
lease, ok := c.tables[id]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if lease == nil {
|
||||
panic("nil lease in name cache")
|
||||
}
|
||||
lease.mu.Lock() // L1
|
||||
defer lease.mu.Unlock() // L1
|
||||
}
|
||||
|
||||
func (c *tableNameCache_cockroach7504) remove(lease *LeaseState_cockroach7504) {
|
||||
c.mu.Lock() // L2
|
||||
runtime.Gosched()
|
||||
defer c.mu.Unlock() // L2
|
||||
key := MakeCacheKey_cockroach7504(lease)
|
||||
existing, ok := c.tables[key]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if existing == lease {
|
||||
delete(c.tables, key)
|
||||
}
|
||||
}
|
||||
|
||||
type LeaseManager_cockroach7504 struct {
|
||||
_ [64]byte
|
||||
tableNames *tableNameCache_cockroach7504
|
||||
tables map[int]*tableState_cockroach7504
|
||||
}
|
||||
|
||||
func (m *LeaseManager_cockroach7504) AcquireByName(id int) {
|
||||
m.tableNames.get(id)
|
||||
}
|
||||
|
||||
func (m *LeaseManager_cockroach7504) findTableState(lease *LeaseState_cockroach7504) *tableState_cockroach7504 {
|
||||
existing, ok := m.tables[lease.id]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return existing
|
||||
}
|
||||
|
||||
func (m *LeaseManager_cockroach7504) Release(lease *LeaseState_cockroach7504) {
|
||||
t := m.findTableState(lease)
|
||||
t.release(lease)
|
||||
}
|
||||
func NewLeaseManager_cockroach7504(tname *tableNameCache_cockroach7504, ts *tableState_cockroach7504) *LeaseManager_cockroach7504 {
|
||||
mgr := &LeaseManager_cockroach7504{
|
||||
tableNames: tname,
|
||||
tables: make(map[int]*tableState_cockroach7504),
|
||||
}
|
||||
mgr.tables[0] = ts
|
||||
return mgr
|
||||
}
|
||||
func NewLeaseSet_cockroach7504(n int) *LeaseSet_cockroach7504 {
|
||||
lset := &LeaseSet_cockroach7504{}
|
||||
for i := 0; i < n; i++ {
|
||||
lease := new(LeaseState_cockroach7504)
|
||||
lset.data = append(lset.data, lease)
|
||||
}
|
||||
return lset
|
||||
}
|
||||
|
||||
func Cockroach7504() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
for i := 0; i < 100; i++ {
|
||||
go func() {
|
||||
leaseNum := 2
|
||||
lset := NewLeaseSet_cockroach7504(leaseNum)
|
||||
|
||||
nc := &tableNameCache_cockroach7504{
|
||||
tables: make(map[int]*LeaseState_cockroach7504),
|
||||
}
|
||||
for i := 0; i < leaseNum; i++ {
|
||||
nc.tables[i] = lset.find(i)
|
||||
}
|
||||
|
||||
ts := &tableState_cockroach7504{
|
||||
tableNameCache: nc,
|
||||
active: lset,
|
||||
}
|
||||
|
||||
mgr := NewLeaseManager_cockroach7504(nc, ts)
|
||||
|
||||
// G1
|
||||
go func() {
|
||||
// lock L2-L1
|
||||
mgr.AcquireByName(0)
|
||||
}()
|
||||
|
||||
// G2
|
||||
go func() {
|
||||
// lock L1-L2
|
||||
mgr.Release(lset.find(0))
|
||||
}()
|
||||
}()
|
||||
}
|
||||
}
|
77
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach9935.go
vendored
Normal file
77
src/runtime/testdata/testgoroutineleakprofile/goker/cockroach9935.go
vendored
Normal file
|
@ -0,0 +1,77 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: cockroach
|
||||
* Issue or PR : https://github.com/cockroachdb/cockroach/pull/9935
|
||||
* Buggy version: 4df302cc3f03328395dc3fefbfba58b7718e4f2f
|
||||
* fix commit-id: ed6a100ba38dd51b0888b9a3d3ac6bdbb26c528c
|
||||
* Flaky: 100/100
|
||||
* Description: This leak is caused by acquiring l.mu.Lock() twice. The fix is
|
||||
* to release l.mu.Lock() before acquiring l.mu.Lock for the second time.
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/rand"
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Cockroach9935", Cockroach9935)
|
||||
}
|
||||
|
||||
type loggingT_cockroach9935 struct {
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (l *loggingT_cockroach9935) outputLogEntry() {
|
||||
l.mu.Lock()
|
||||
if err := l.createFile(); err != nil {
|
||||
l.exit(err)
|
||||
}
|
||||
l.mu.Unlock()
|
||||
}
|
||||
|
||||
func (l *loggingT_cockroach9935) createFile() error {
|
||||
if rand.Intn(8)%4 > 0 {
|
||||
return errors.New("")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loggingT_cockroach9935) exit(err error) {
|
||||
l.mu.Lock() // Blocked forever
|
||||
defer l.mu.Unlock()
|
||||
}
|
||||
|
||||
// Example of goroutine leak trace:
|
||||
//
|
||||
// G1
|
||||
//----------------------------
|
||||
// l.outputLogEntry()
|
||||
// l.mu.Lock()
|
||||
// l.createFile()
|
||||
// l.exit()
|
||||
// l.mu.Lock()
|
||||
//-----------G1 leaks---------
|
||||
|
||||
func Cockroach9935() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
go func() {
|
||||
l := &loggingT_cockroach9935{}
|
||||
go l.outputLogEntry() // G1
|
||||
}()
|
||||
}
|
||||
}
|
72
src/runtime/testdata/testgoroutineleakprofile/goker/etcd10492.go
vendored
Normal file
72
src/runtime/testdata/testgoroutineleakprofile/goker/etcd10492.go
vendored
Normal file
|
@ -0,0 +1,72 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Etcd10492", Etcd10492)
|
||||
}
|
||||
|
||||
type Checkpointer_etcd10492 func(ctx context.Context)
|
||||
|
||||
type lessor_etcd10492 struct {
|
||||
mu sync.RWMutex
|
||||
cp Checkpointer_etcd10492
|
||||
checkpointInterval time.Duration
|
||||
}
|
||||
|
||||
func (le *lessor_etcd10492) Checkpoint() {
|
||||
le.mu.Lock() // Lock acquired twice here
|
||||
defer le.mu.Unlock()
|
||||
}
|
||||
|
||||
func (le *lessor_etcd10492) SetCheckpointer(cp Checkpointer_etcd10492) {
|
||||
le.mu.Lock()
|
||||
defer le.mu.Unlock()
|
||||
|
||||
le.cp = cp
|
||||
}
|
||||
|
||||
func (le *lessor_etcd10492) Renew() {
|
||||
le.mu.Lock()
|
||||
unlock := func() { le.mu.Unlock() }
|
||||
defer func() { unlock() }()
|
||||
|
||||
if le.cp != nil {
|
||||
le.cp(context.Background())
|
||||
}
|
||||
}
|
||||
|
||||
func Etcd10492() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
go func() { // G1
|
||||
le := &lessor_etcd10492{
|
||||
checkpointInterval: 0,
|
||||
}
|
||||
fakerCheckerpointer_etcd10492 := func(ctx context.Context) {
|
||||
le.Checkpoint()
|
||||
}
|
||||
le.SetCheckpointer(fakerCheckerpointer_etcd10492)
|
||||
le.mu.Lock()
|
||||
le.mu.Unlock()
|
||||
le.Renew()
|
||||
}()
|
||||
}
|
126
src/runtime/testdata/testgoroutineleakprofile/goker/etcd5509.go
vendored
Normal file
126
src/runtime/testdata/testgoroutineleakprofile/goker/etcd5509.go
vendored
Normal file
|
@ -0,0 +1,126 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Etcd5509", Etcd5509)
|
||||
}
|
||||
|
||||
var ErrConnClosed_etcd5509 error
|
||||
|
||||
type Client_etcd5509 struct {
|
||||
mu sync.RWMutex
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
func (c *Client_etcd5509) Close() {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.cancel == nil {
|
||||
return
|
||||
}
|
||||
c.cancel()
|
||||
c.cancel = nil
|
||||
c.mu.Unlock()
|
||||
c.mu.Lock()
|
||||
}
|
||||
|
||||
type remoteClient_etcd5509 struct {
|
||||
client *Client_etcd5509
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (r *remoteClient_etcd5509) acquire(ctx context.Context) error {
|
||||
for {
|
||||
r.client.mu.RLock()
|
||||
closed := r.client.cancel == nil
|
||||
r.mu.Lock()
|
||||
r.mu.Unlock()
|
||||
if closed {
|
||||
return ErrConnClosed_etcd5509 // Missing RUnlock before return
|
||||
}
|
||||
r.client.mu.RUnlock()
|
||||
}
|
||||
}
|
||||
|
||||
type kv_etcd5509 struct {
|
||||
rc *remoteClient_etcd5509
|
||||
}
|
||||
|
||||
func (kv *kv_etcd5509) Get(ctx context.Context) error {
|
||||
return kv.Do(ctx)
|
||||
}
|
||||
|
||||
func (kv *kv_etcd5509) Do(ctx context.Context) error {
|
||||
for {
|
||||
err := kv.do(ctx)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (kv *kv_etcd5509) do(ctx context.Context) error {
|
||||
err := kv.getRemote(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
func (kv *kv_etcd5509) getRemote(ctx context.Context) error {
|
||||
return kv.rc.acquire(ctx)
|
||||
}
|
||||
|
||||
type KV interface {
|
||||
Get(ctx context.Context) error
|
||||
Do(ctx context.Context) error
|
||||
}
|
||||
|
||||
func NewKV_etcd5509(c *Client_etcd5509) KV {
|
||||
return &kv_etcd5509{rc: &remoteClient_etcd5509{
|
||||
client: c,
|
||||
}}
|
||||
}
|
||||
|
||||
func Etcd5509() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
ctx, _ := context.WithCancel(context.TODO())
|
||||
cli := &Client_etcd5509{
|
||||
ctx: ctx,
|
||||
}
|
||||
kv := NewKV_etcd5509(cli)
|
||||
donec := make(chan struct{})
|
||||
go func() {
|
||||
defer close(donec)
|
||||
err := kv.Get(context.TODO())
|
||||
if err != nil && err != ErrConnClosed_etcd5509 {
|
||||
io.Discard.Write([]byte("Expect ErrConnClosed"))
|
||||
}
|
||||
}()
|
||||
|
||||
runtime.Gosched()
|
||||
cli.Close()
|
||||
|
||||
<-donec
|
||||
}()
|
||||
}
|
100
src/runtime/testdata/testgoroutineleakprofile/goker/etcd6708.go
vendored
Normal file
100
src/runtime/testdata/testgoroutineleakprofile/goker/etcd6708.go
vendored
Normal file
|
@ -0,0 +1,100 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Etcd6708", Etcd6708)
|
||||
}
|
||||
|
||||
type EndpointSelectionMode_etcd6708 int
|
||||
|
||||
const (
|
||||
EndpointSelectionRandom_etcd6708 EndpointSelectionMode_etcd6708 = iota
|
||||
EndpointSelectionPrioritizeLeader_etcd6708
|
||||
)
|
||||
|
||||
type MembersAPI_etcd6708 interface {
|
||||
Leader(ctx context.Context)
|
||||
}
|
||||
|
||||
type Client_etcd6708 interface {
|
||||
Sync(ctx context.Context)
|
||||
SetEndpoints()
|
||||
httpClient_etcd6708
|
||||
}
|
||||
|
||||
type httpClient_etcd6708 interface {
|
||||
Do(context.Context)
|
||||
}
|
||||
|
||||
type httpClusterClient_etcd6708 struct {
|
||||
sync.RWMutex
|
||||
selectionMode EndpointSelectionMode_etcd6708
|
||||
}
|
||||
|
||||
func (c *httpClusterClient_etcd6708) getLeaderEndpoint() {
|
||||
mAPI := NewMembersAPI_etcd6708(c)
|
||||
mAPI.Leader(context.Background())
|
||||
}
|
||||
|
||||
func (c *httpClusterClient_etcd6708) SetEndpoints() {
|
||||
switch c.selectionMode {
|
||||
case EndpointSelectionRandom_etcd6708:
|
||||
case EndpointSelectionPrioritizeLeader_etcd6708:
|
||||
c.getLeaderEndpoint()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *httpClusterClient_etcd6708) Do(ctx context.Context) {
|
||||
c.RLock()
|
||||
c.RUnlock()
|
||||
}
|
||||
|
||||
func (c *httpClusterClient_etcd6708) Sync(ctx context.Context) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
c.SetEndpoints()
|
||||
}
|
||||
|
||||
type httpMembersAPI_etcd6708 struct {
|
||||
client httpClient_etcd6708
|
||||
}
|
||||
|
||||
func (m *httpMembersAPI_etcd6708) Leader(ctx context.Context) {
|
||||
m.client.Do(ctx)
|
||||
}
|
||||
|
||||
func NewMembersAPI_etcd6708(c Client_etcd6708) MembersAPI_etcd6708 {
|
||||
return &httpMembersAPI_etcd6708{
|
||||
client: c,
|
||||
}
|
||||
}
|
||||
|
||||
func Etcd6708() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
hc := &httpClusterClient_etcd6708{
|
||||
selectionMode: EndpointSelectionPrioritizeLeader_etcd6708,
|
||||
}
|
||||
hc.Sync(context.Background())
|
||||
}()
|
||||
}
|
81
src/runtime/testdata/testgoroutineleakprofile/goker/etcd6857.go
vendored
Normal file
81
src/runtime/testdata/testgoroutineleakprofile/goker/etcd6857.go
vendored
Normal file
|
@ -0,0 +1,81 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: etcd
|
||||
* Issue or PR : https://github.com/etcd-io/etcd/pull/6857
|
||||
* Buggy version: 7c8f13aed7fe251e7066ed6fc1a090699c2cae0e
|
||||
* fix commit-id: 7afc490c95789c408fbc256d8e790273d331c984
|
||||
* Flaky: 19/100
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Etcd6857", Etcd6857)
|
||||
}
|
||||
|
||||
type Status_etcd6857 struct{}
|
||||
|
||||
type node_etcd6857 struct {
|
||||
status chan chan Status_etcd6857
|
||||
stop chan struct{}
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
func (n *node_etcd6857) Status() Status_etcd6857 {
|
||||
c := make(chan Status_etcd6857)
|
||||
n.status <- c
|
||||
return <-c
|
||||
}
|
||||
|
||||
func (n *node_etcd6857) run() {
|
||||
for {
|
||||
select {
|
||||
case c := <-n.status:
|
||||
c <- Status_etcd6857{}
|
||||
case <-n.stop:
|
||||
close(n.done)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *node_etcd6857) Stop() {
|
||||
select {
|
||||
case n.stop <- struct{}{}:
|
||||
case <-n.done:
|
||||
return
|
||||
}
|
||||
<-n.done
|
||||
}
|
||||
|
||||
func NewNode_etcd6857() *node_etcd6857 {
|
||||
return &node_etcd6857{
|
||||
status: make(chan chan Status_etcd6857),
|
||||
stop: make(chan struct{}),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func Etcd6857() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
for i := 0; i <= 100; i++ {
|
||||
go func() {
|
||||
n := NewNode_etcd6857()
|
||||
go n.run() // G1
|
||||
go n.Status() // G2
|
||||
go n.Stop() // G3
|
||||
}()
|
||||
}
|
||||
}
|
98
src/runtime/testdata/testgoroutineleakprofile/goker/etcd6873.go
vendored
Normal file
98
src/runtime/testdata/testgoroutineleakprofile/goker/etcd6873.go
vendored
Normal file
|
@ -0,0 +1,98 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: etcd
|
||||
* Issue or PR : https://github.com/etcd-io/etcd/commit/7618fdd1d642e47cac70c03f637b0fd798a53a6e
|
||||
* Buggy version: 377f19b0031f9c0aafe2aec28b6f9019311f52f9
|
||||
* fix commit-id: 7618fdd1d642e47cac70c03f637b0fd798a53a6e
|
||||
* Flaky: 9/100
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Etcd6873", Etcd6873)
|
||||
}
|
||||
|
||||
type watchBroadcast_etcd6873 struct{}
|
||||
|
||||
type watchBroadcasts_etcd6873 struct {
|
||||
mu sync.Mutex
|
||||
updatec chan *watchBroadcast_etcd6873
|
||||
donec chan struct{}
|
||||
}
|
||||
|
||||
func newWatchBroadcasts_etcd6873() *watchBroadcasts_etcd6873 {
|
||||
wbs := &watchBroadcasts_etcd6873{
|
||||
updatec: make(chan *watchBroadcast_etcd6873, 1),
|
||||
donec: make(chan struct{}),
|
||||
}
|
||||
go func() { // G2
|
||||
defer close(wbs.donec)
|
||||
for wb := range wbs.updatec {
|
||||
wbs.coalesce(wb)
|
||||
}
|
||||
}()
|
||||
return wbs
|
||||
}
|
||||
|
||||
func (wbs *watchBroadcasts_etcd6873) coalesce(wb *watchBroadcast_etcd6873) {
|
||||
wbs.mu.Lock()
|
||||
wbs.mu.Unlock()
|
||||
}
|
||||
|
||||
func (wbs *watchBroadcasts_etcd6873) stop() {
|
||||
wbs.mu.Lock()
|
||||
defer wbs.mu.Unlock()
|
||||
close(wbs.updatec)
|
||||
<-wbs.donec
|
||||
}
|
||||
|
||||
func (wbs *watchBroadcasts_etcd6873) update(wb *watchBroadcast_etcd6873) {
|
||||
select {
|
||||
case wbs.updatec <- wb:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Example of goroutine leak trace:
|
||||
//
|
||||
// G1 G2 G3
|
||||
//---------------------------------------------------------
|
||||
// newWatchBroadcasts()
|
||||
// wbs.update()
|
||||
// wbs.updatec <-
|
||||
// return
|
||||
// <-wbs.updatec
|
||||
// wbs.coalesce()
|
||||
// wbs.stop()
|
||||
// wbs.mu.Lock()
|
||||
// close(wbs.updatec)
|
||||
// <-wbs.donec
|
||||
// wbs.mu.Lock()
|
||||
//---------------------G2,G3 leak-------------------------
|
||||
//
|
||||
|
||||
func Etcd6873() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
go func() {
|
||||
wbs := newWatchBroadcasts_etcd6873() // G1
|
||||
wbs.update(&watchBroadcast_etcd6873{})
|
||||
go wbs.stop() // G3
|
||||
}()
|
||||
}
|
||||
}
|
163
src/runtime/testdata/testgoroutineleakprofile/goker/etcd7492.go
vendored
Normal file
163
src/runtime/testdata/testgoroutineleakprofile/goker/etcd7492.go
vendored
Normal file
|
@ -0,0 +1,163 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: etcd
|
||||
* Issue or PR : https://github.com/etcd-io/etcd/pull/7492
|
||||
* Buggy version: 51939650057d602bb5ab090633138fffe36854dc
|
||||
* fix commit-id: 1b1fabef8ffec606909f01c3983300fff539f214
|
||||
* Flaky: 40/100
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Etcd7492", Etcd7492)
|
||||
}
|
||||
|
||||
type TokenProvider_etcd7492 interface {
|
||||
assign()
|
||||
enable()
|
||||
disable()
|
||||
}
|
||||
|
||||
type simpleTokenTTLKeeper_etcd7492 struct {
|
||||
tokens map[string]time.Time
|
||||
addSimpleTokenCh chan struct{}
|
||||
stopCh chan chan struct{}
|
||||
deleteTokenFunc func(string)
|
||||
}
|
||||
|
||||
type authStore_etcd7492 struct {
|
||||
tokenProvider TokenProvider_etcd7492
|
||||
}
|
||||
|
||||
func (as *authStore_etcd7492) Authenticate() {
|
||||
as.tokenProvider.assign()
|
||||
}
|
||||
|
||||
func NewSimpleTokenTTLKeeper_etcd7492(deletefunc func(string)) *simpleTokenTTLKeeper_etcd7492 {
|
||||
stk := &simpleTokenTTLKeeper_etcd7492{
|
||||
tokens: make(map[string]time.Time),
|
||||
addSimpleTokenCh: make(chan struct{}, 1),
|
||||
stopCh: make(chan chan struct{}),
|
||||
deleteTokenFunc: deletefunc,
|
||||
}
|
||||
go stk.run() // G1
|
||||
return stk
|
||||
}
|
||||
|
||||
func (tm *simpleTokenTTLKeeper_etcd7492) run() {
|
||||
tokenTicker := time.NewTicker(time.Nanosecond)
|
||||
defer tokenTicker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-tm.addSimpleTokenCh:
|
||||
runtime.Gosched()
|
||||
/// Make tm.tokens not empty is enough
|
||||
tm.tokens["1"] = time.Now()
|
||||
case <-tokenTicker.C:
|
||||
runtime.Gosched()
|
||||
for t, _ := range tm.tokens {
|
||||
tm.deleteTokenFunc(t)
|
||||
delete(tm.tokens, t)
|
||||
}
|
||||
case waitCh := <-tm.stopCh:
|
||||
waitCh <- struct{}{}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (tm *simpleTokenTTLKeeper_etcd7492) addSimpleToken() {
|
||||
tm.addSimpleTokenCh <- struct{}{}
|
||||
runtime.Gosched()
|
||||
}
|
||||
|
||||
func (tm *simpleTokenTTLKeeper_etcd7492) stop() {
|
||||
waitCh := make(chan struct{})
|
||||
tm.stopCh <- waitCh
|
||||
<-waitCh
|
||||
close(tm.stopCh)
|
||||
}
|
||||
|
||||
type tokenSimple_etcd7492 struct {
|
||||
simpleTokenKeeper *simpleTokenTTLKeeper_etcd7492
|
||||
simpleTokensMu sync.RWMutex
|
||||
}
|
||||
|
||||
func (t *tokenSimple_etcd7492) assign() {
|
||||
t.assignSimpleTokenToUser()
|
||||
}
|
||||
|
||||
func (t *tokenSimple_etcd7492) assignSimpleTokenToUser() {
|
||||
t.simpleTokensMu.Lock()
|
||||
runtime.Gosched()
|
||||
t.simpleTokenKeeper.addSimpleToken()
|
||||
t.simpleTokensMu.Unlock()
|
||||
}
|
||||
func newDeleterFunc(t *tokenSimple_etcd7492) func(string) {
|
||||
return func(tk string) {
|
||||
t.simpleTokensMu.Lock()
|
||||
defer t.simpleTokensMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tokenSimple_etcd7492) enable() {
|
||||
t.simpleTokenKeeper = NewSimpleTokenTTLKeeper_etcd7492(newDeleterFunc(t))
|
||||
}
|
||||
|
||||
func (t *tokenSimple_etcd7492) disable() {
|
||||
if t.simpleTokenKeeper != nil {
|
||||
t.simpleTokenKeeper.stop()
|
||||
t.simpleTokenKeeper = nil
|
||||
}
|
||||
t.simpleTokensMu.Lock()
|
||||
t.simpleTokensMu.Unlock()
|
||||
}
|
||||
|
||||
func newTokenProviderSimple_etcd7492() *tokenSimple_etcd7492 {
|
||||
return &tokenSimple_etcd7492{}
|
||||
}
|
||||
|
||||
func setupAuthStore_etcd7492() (store *authStore_etcd7492, teardownfunc func()) {
|
||||
as := &authStore_etcd7492{
|
||||
tokenProvider: newTokenProviderSimple_etcd7492(),
|
||||
}
|
||||
as.tokenProvider.enable()
|
||||
tearDown := func() {
|
||||
as.tokenProvider.disable()
|
||||
}
|
||||
return as, tearDown
|
||||
}
|
||||
|
||||
func Etcd7492() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
for i := 0; i < 100; i++ {
|
||||
go func() {
|
||||
as, tearDown := setupAuthStore_etcd7492()
|
||||
defer tearDown()
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(3)
|
||||
for i := 0; i < 3; i++ {
|
||||
go func() { // G2
|
||||
as.Authenticate()
|
||||
defer wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}()
|
||||
}
|
||||
}
|
87
src/runtime/testdata/testgoroutineleakprofile/goker/etcd7902.go
vendored
Normal file
87
src/runtime/testdata/testgoroutineleakprofile/goker/etcd7902.go
vendored
Normal file
|
@ -0,0 +1,87 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: etcd
|
||||
* Issue or PR : https://github.com/coreos/etcd/pull/7902
|
||||
* Buggy version: dfdaf082c51ba14861267f632f6af795a27eb4ef
|
||||
* fix commit-id: 87d99fe0387ee1df1cf1811d88d37331939ef4ae
|
||||
* Flaky: 100/100
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Etcd7902", Etcd7902)
|
||||
}
|
||||
|
||||
type roundClient_etcd7902 struct {
|
||||
progress int
|
||||
acquire func()
|
||||
validate func()
|
||||
release func()
|
||||
}
|
||||
|
||||
func runElectionFunc_etcd7902() {
|
||||
rcs := make([]roundClient_etcd7902, 3)
|
||||
nextc := make(chan bool)
|
||||
for i := range rcs {
|
||||
var rcNextc chan bool
|
||||
setRcNextc := func() {
|
||||
rcNextc = nextc
|
||||
}
|
||||
rcs[i].acquire = func() {}
|
||||
rcs[i].validate = func() {
|
||||
setRcNextc()
|
||||
}
|
||||
rcs[i].release = func() {
|
||||
if i == 0 { // Assume the first roundClient is the leader
|
||||
close(nextc)
|
||||
nextc = make(chan bool)
|
||||
}
|
||||
<-rcNextc // Follower is blocking here
|
||||
}
|
||||
}
|
||||
doRounds_etcd7902(rcs, 100)
|
||||
}
|
||||
|
||||
func doRounds_etcd7902(rcs []roundClient_etcd7902, rounds int) {
|
||||
var mu sync.Mutex
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(rcs))
|
||||
for i := range rcs {
|
||||
go func(rc *roundClient_etcd7902) { // G2,G3
|
||||
defer wg.Done()
|
||||
for rc.progress < rounds || rounds <= 0 {
|
||||
rc.acquire()
|
||||
mu.Lock()
|
||||
rc.validate()
|
||||
mu.Unlock()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
rc.progress++
|
||||
mu.Lock()
|
||||
rc.release()
|
||||
mu.Unlock()
|
||||
}
|
||||
}(&rcs[i])
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func Etcd7902() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
for i := 0; i < 100; i++ {
|
||||
go runElectionFunc_etcd7902() // G1
|
||||
}
|
||||
}
|
111
src/runtime/testdata/testgoroutineleakprofile/goker/grpc1275.go
vendored
Normal file
111
src/runtime/testdata/testgoroutineleakprofile/goker/grpc1275.go
vendored
Normal file
|
@ -0,0 +1,111 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: grpc-go
|
||||
* Issue or PR : https://github.com/grpc/grpc-go/pull/1275
|
||||
* Buggy version: (missing)
|
||||
* fix commit-id: 0669f3f89e0330e94bb13fa1ce8cc704aab50c9c
|
||||
* Flaky: 100/100
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Grpc1275", Grpc1275)
|
||||
}
|
||||
|
||||
type recvBuffer_grpc1275 struct {
|
||||
c chan bool
|
||||
}
|
||||
|
||||
func (b *recvBuffer_grpc1275) get() <-chan bool {
|
||||
return b.c
|
||||
}
|
||||
|
||||
type recvBufferReader_grpc1275 struct {
|
||||
recv *recvBuffer_grpc1275
|
||||
}
|
||||
|
||||
func (r *recvBufferReader_grpc1275) Read(p []byte) (int, error) {
|
||||
select {
|
||||
case <-r.recv.get():
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
type Stream_grpc1275 struct {
|
||||
trReader io.Reader
|
||||
}
|
||||
|
||||
func (s *Stream_grpc1275) Read(p []byte) (int, error) {
|
||||
return io.ReadFull(s.trReader, p)
|
||||
}
|
||||
|
||||
type http2Client_grpc1275 struct{}
|
||||
|
||||
func (t *http2Client_grpc1275) CloseStream(s *Stream_grpc1275) {
|
||||
// It is the client.CloseSream() method called by the
|
||||
// main goroutine that should send the message, but it
|
||||
// is not. The patch is to send out this message.
|
||||
}
|
||||
|
||||
func (t *http2Client_grpc1275) NewStream() *Stream_grpc1275 {
|
||||
return &Stream_grpc1275{
|
||||
trReader: &recvBufferReader_grpc1275{
|
||||
recv: &recvBuffer_grpc1275{
|
||||
c: make(chan bool),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testInflightStreamClosing_grpc1275() {
|
||||
client := &http2Client_grpc1275{}
|
||||
stream := client.NewStream()
|
||||
donec := make(chan bool)
|
||||
go func() { // G2
|
||||
defer close(donec)
|
||||
stream.Read([]byte{1})
|
||||
}()
|
||||
|
||||
client.CloseStream(stream)
|
||||
|
||||
timeout := time.NewTimer(300 * time.Nanosecond)
|
||||
select {
|
||||
case <-donec:
|
||||
if !timeout.Stop() {
|
||||
<-timeout.C
|
||||
}
|
||||
case <-timeout.C:
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// G1 G2
|
||||
/// testInflightStreamClosing()
|
||||
/// stream.Read()
|
||||
/// io.ReadFull()
|
||||
/// <- r.recv.get()
|
||||
/// CloseStream()
|
||||
/// <- donec
|
||||
/// ------------G1 timeout, G2 leak---------------------
|
||||
///
|
||||
|
||||
func Grpc1275() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
go func() {
|
||||
testInflightStreamClosing_grpc1275() // G1
|
||||
}()
|
||||
}
|
105
src/runtime/testdata/testgoroutineleakprofile/goker/grpc1424.go
vendored
Normal file
105
src/runtime/testdata/testgoroutineleakprofile/goker/grpc1424.go
vendored
Normal file
|
@ -0,0 +1,105 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: grpc-go
|
||||
* Issue or PR : https://github.com/grpc/grpc-go/pull/1424
|
||||
* Buggy version: 39c8c3866d926d95e11c03508bf83d00f2963f91
|
||||
* fix commit-id: 64bd0b04a7bb1982078bae6a2ab34c226125fbc1
|
||||
* Flaky: 100/100
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Grpc1424", Grpc1424)
|
||||
}
|
||||
|
||||
type Balancer_grpc1424 interface {
|
||||
Notify() <-chan bool
|
||||
}
|
||||
|
||||
type roundRobin_grpc1424 struct {
|
||||
mu sync.Mutex
|
||||
addrCh chan bool
|
||||
}
|
||||
|
||||
func (rr *roundRobin_grpc1424) Notify() <-chan bool {
|
||||
return rr.addrCh
|
||||
}
|
||||
|
||||
type addrConn_grpc1424 struct {
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (ac *addrConn_grpc1424) tearDown() {
|
||||
ac.mu.Lock()
|
||||
defer ac.mu.Unlock()
|
||||
}
|
||||
|
||||
type dialOption_grpc1424 struct {
|
||||
balancer Balancer_grpc1424
|
||||
}
|
||||
|
||||
type ClientConn_grpc1424 struct {
|
||||
dopts dialOption_grpc1424
|
||||
conns []*addrConn_grpc1424
|
||||
}
|
||||
|
||||
func (cc *ClientConn_grpc1424) lbWatcher(doneChan chan bool) {
|
||||
for addr := range cc.dopts.balancer.Notify() {
|
||||
if addr {
|
||||
// nop, make compiler happy
|
||||
}
|
||||
var (
|
||||
del []*addrConn_grpc1424
|
||||
)
|
||||
for _, a := range cc.conns {
|
||||
del = append(del, a)
|
||||
}
|
||||
for _, c := range del {
|
||||
c.tearDown()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func NewClientConn_grpc1424() *ClientConn_grpc1424 {
|
||||
cc := &ClientConn_grpc1424{
|
||||
dopts: dialOption_grpc1424{
|
||||
&roundRobin_grpc1424{addrCh: make(chan bool)},
|
||||
},
|
||||
}
|
||||
return cc
|
||||
}
|
||||
|
||||
func DialContext_grpc1424() {
|
||||
cc := NewClientConn_grpc1424()
|
||||
waitC := make(chan error, 1)
|
||||
go func() { // G2
|
||||
defer close(waitC)
|
||||
ch := cc.dopts.balancer.Notify()
|
||||
if ch != nil {
|
||||
doneChan := make(chan bool)
|
||||
go cc.lbWatcher(doneChan) // G3
|
||||
<-doneChan
|
||||
}
|
||||
}()
|
||||
/// close addrCh
|
||||
close(cc.dopts.balancer.(*roundRobin_grpc1424).addrCh)
|
||||
}
|
||||
|
||||
func Grpc1424() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
go DialContext_grpc1424() // G1
|
||||
}
|
84
src/runtime/testdata/testgoroutineleakprofile/goker/grpc1460.go
vendored
Normal file
84
src/runtime/testdata/testgoroutineleakprofile/goker/grpc1460.go
vendored
Normal file
|
@ -0,0 +1,84 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: grpc
|
||||
* Issue or PR : https://github.com/grpc/grpc-go/pull/1460
|
||||
* Buggy version: 7db1564ba1229bc42919bb1f6d9c4186f3aa8678
|
||||
* fix commit-id: e605a1ecf24b634f94f4eefdab10a9ada98b70dd
|
||||
* Flaky: 100/100
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Grpc1460", Grpc1460)
|
||||
}
|
||||
|
||||
type Stream_grpc1460 struct{}
|
||||
|
||||
type http2Client_grpc1460 struct {
|
||||
mu sync.Mutex
|
||||
awakenKeepalive chan struct{}
|
||||
activeStream []*Stream_grpc1460
|
||||
}
|
||||
|
||||
func (t *http2Client_grpc1460) keepalive() {
|
||||
t.mu.Lock()
|
||||
if len(t.activeStream) < 1 {
|
||||
<-t.awakenKeepalive
|
||||
runtime.Gosched()
|
||||
t.mu.Unlock()
|
||||
} else {
|
||||
t.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (t *http2Client_grpc1460) NewStream() {
|
||||
t.mu.Lock()
|
||||
runtime.Gosched()
|
||||
t.activeStream = append(t.activeStream, &Stream_grpc1460{})
|
||||
if len(t.activeStream) == 1 {
|
||||
select {
|
||||
case t.awakenKeepalive <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
t.mu.Unlock()
|
||||
}
|
||||
|
||||
///
|
||||
/// G1 G2
|
||||
/// client.keepalive()
|
||||
/// client.NewStream()
|
||||
/// t.mu.Lock()
|
||||
/// <-t.awakenKeepalive
|
||||
/// t.mu.Lock()
|
||||
/// ---------------G1, G2 deadlock--------------
|
||||
///
|
||||
|
||||
func Grpc1460() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
go func() {
|
||||
client := &http2Client_grpc1460{
|
||||
awakenKeepalive: make(chan struct{}),
|
||||
}
|
||||
go client.keepalive() //G1
|
||||
go client.NewStream() //G2
|
||||
}()
|
||||
}
|
||||
}
|
123
src/runtime/testdata/testgoroutineleakprofile/goker/grpc3017.go
vendored
Normal file
123
src/runtime/testdata/testgoroutineleakprofile/goker/grpc3017.go
vendored
Normal file
|
@ -0,0 +1,123 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// This test case is a reproduction of grpc/3017.
|
||||
//
|
||||
// It is a goroutine leak that also simultaneously engages many GC assists.
|
||||
// Testing runtime behaviour when pivoting between regular and goroutine leak detection modes.
|
||||
|
||||
func init() {
|
||||
register("Grpc3017", Grpc3017)
|
||||
}
|
||||
|
||||
type Address_grpc3017 int
|
||||
type SubConn_grpc3017 int
|
||||
|
||||
type subConnCacheEntry_grpc3017 struct {
|
||||
sc SubConn_grpc3017
|
||||
cancel func()
|
||||
abortDeleting bool
|
||||
}
|
||||
|
||||
type lbCacheClientConn_grpc3017 struct {
|
||||
mu sync.Mutex // L1
|
||||
timeout time.Duration
|
||||
subConnCache map[Address_grpc3017]*subConnCacheEntry_grpc3017
|
||||
subConnToAddr map[SubConn_grpc3017]Address_grpc3017
|
||||
}
|
||||
|
||||
func (ccc *lbCacheClientConn_grpc3017) NewSubConn(addrs []Address_grpc3017) SubConn_grpc3017 {
|
||||
if len(addrs) != 1 {
|
||||
return SubConn_grpc3017(1)
|
||||
}
|
||||
addrWithoutMD := addrs[0]
|
||||
ccc.mu.Lock() // L1
|
||||
defer ccc.mu.Unlock()
|
||||
if entry, ok := ccc.subConnCache[addrWithoutMD]; ok {
|
||||
entry.cancel()
|
||||
delete(ccc.subConnCache, addrWithoutMD)
|
||||
return entry.sc
|
||||
}
|
||||
scNew := SubConn_grpc3017(1)
|
||||
ccc.subConnToAddr[scNew] = addrWithoutMD
|
||||
return scNew
|
||||
}
|
||||
|
||||
func (ccc *lbCacheClientConn_grpc3017) RemoveSubConn(sc SubConn_grpc3017) {
|
||||
ccc.mu.Lock() // L1
|
||||
defer ccc.mu.Unlock()
|
||||
addr, ok := ccc.subConnToAddr[sc]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if entry, ok := ccc.subConnCache[addr]; ok {
|
||||
if entry.sc != sc {
|
||||
delete(ccc.subConnToAddr, sc)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
entry := &subConnCacheEntry_grpc3017{
|
||||
sc: sc,
|
||||
}
|
||||
ccc.subConnCache[addr] = entry
|
||||
|
||||
timer := time.AfterFunc(ccc.timeout, func() { // G3
|
||||
runtime.Gosched()
|
||||
ccc.mu.Lock() // L1
|
||||
if entry.abortDeleting {
|
||||
return // Missing unlock
|
||||
}
|
||||
delete(ccc.subConnToAddr, sc)
|
||||
delete(ccc.subConnCache, addr)
|
||||
ccc.mu.Unlock()
|
||||
})
|
||||
|
||||
entry.cancel = func() {
|
||||
if !timer.Stop() {
|
||||
entry.abortDeleting = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Grpc3017() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
go func() { //G1
|
||||
done := make(chan struct{})
|
||||
|
||||
ccc := &lbCacheClientConn_grpc3017{
|
||||
timeout: time.Nanosecond,
|
||||
subConnCache: make(map[Address_grpc3017]*subConnCacheEntry_grpc3017),
|
||||
subConnToAddr: make(map[SubConn_grpc3017]Address_grpc3017),
|
||||
}
|
||||
|
||||
sc := ccc.NewSubConn([]Address_grpc3017{Address_grpc3017(1)})
|
||||
go func() { // G2
|
||||
for i := 0; i < 10000; i++ {
|
||||
ccc.RemoveSubConn(sc)
|
||||
sc = ccc.NewSubConn([]Address_grpc3017{Address_grpc3017(1)})
|
||||
}
|
||||
close(done)
|
||||
}()
|
||||
<-done
|
||||
}()
|
||||
}
|
||||
}
|
65
src/runtime/testdata/testgoroutineleakprofile/goker/grpc660.go
vendored
Normal file
65
src/runtime/testdata/testgoroutineleakprofile/goker/grpc660.go
vendored
Normal file
|
@ -0,0 +1,65 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: grpc-go
|
||||
* Issue or PR : https://github.com/grpc/grpc-go/pull/660
|
||||
* Buggy version: db85417dd0de6cc6f583672c6175a7237e5b5dd2
|
||||
* fix commit-id: ceacfbcbc1514e4e677932fd55938ac455d182fb
|
||||
* Flaky: 100/100
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Grpc660", Grpc660)
|
||||
}
|
||||
|
||||
type benchmarkClient_grpc660 struct {
|
||||
stop chan bool
|
||||
}
|
||||
|
||||
func (bc *benchmarkClient_grpc660) doCloseLoopUnary() {
|
||||
for {
|
||||
done := make(chan bool)
|
||||
go func() { // G2
|
||||
if rand.Intn(10) > 7 {
|
||||
done <- false
|
||||
return
|
||||
}
|
||||
done <- true
|
||||
}()
|
||||
select {
|
||||
case <-bc.stop:
|
||||
return
|
||||
case <-done:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Grpc660() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
go func() {
|
||||
bc := &benchmarkClient_grpc660{
|
||||
stop: make(chan bool),
|
||||
}
|
||||
go bc.doCloseLoopUnary() // G1
|
||||
go func() { // helper goroutine
|
||||
bc.stop <- true
|
||||
}()
|
||||
}()
|
||||
}
|
74
src/runtime/testdata/testgoroutineleakprofile/goker/grpc795.go
vendored
Normal file
74
src/runtime/testdata/testgoroutineleakprofile/goker/grpc795.go
vendored
Normal file
|
@ -0,0 +1,74 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Grpc795", Grpc795)
|
||||
}
|
||||
|
||||
type Server_grpc795 struct {
|
||||
mu sync.Mutex
|
||||
drain bool
|
||||
}
|
||||
|
||||
func (s *Server_grpc795) GracefulStop() {
|
||||
s.mu.Lock()
|
||||
if s.drain {
|
||||
s.mu.Lock()
|
||||
return
|
||||
}
|
||||
s.drain = true
|
||||
s.mu.Unlock()
|
||||
}
|
||||
func (s *Server_grpc795) Serve() {
|
||||
s.mu.Lock()
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func NewServer_grpc795() *Server_grpc795 {
|
||||
return &Server_grpc795{}
|
||||
}
|
||||
|
||||
type test_grpc795 struct {
|
||||
srv *Server_grpc795
|
||||
}
|
||||
|
||||
func (te *test_grpc795) startServer() {
|
||||
s := NewServer_grpc795()
|
||||
te.srv = s
|
||||
go s.Serve()
|
||||
}
|
||||
|
||||
func newTest_grpc795() *test_grpc795 {
|
||||
return &test_grpc795{}
|
||||
}
|
||||
|
||||
func testServerGracefulStopIdempotent_grpc795() {
|
||||
te := newTest_grpc795()
|
||||
|
||||
te.startServer()
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
te.srv.GracefulStop()
|
||||
}
|
||||
}
|
||||
|
||||
func Grpc795() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
for i := 0; i < 100; i++ {
|
||||
go testServerGracefulStopIdempotent_grpc795()
|
||||
}
|
||||
}
|
105
src/runtime/testdata/testgoroutineleakprofile/goker/grpc862.go
vendored
Normal file
105
src/runtime/testdata/testgoroutineleakprofile/goker/grpc862.go
vendored
Normal file
|
@ -0,0 +1,105 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: grpc-go
|
||||
* Issue or PR : https://github.com/grpc/grpc-go/pull/862
|
||||
* Buggy version: d8f4ebe77f6b7b6403d7f98626de8a534f9b93a7
|
||||
* fix commit-id: dd5645bebff44f6b88780bb949022a09eadd7dae
|
||||
* Flaky: 100/100
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Grpc862", Grpc862)
|
||||
}
|
||||
|
||||
type ClientConn_grpc862 struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
conns []*addrConn_grpc862
|
||||
}
|
||||
|
||||
func (cc *ClientConn_grpc862) Close() {
|
||||
cc.cancel()
|
||||
conns := cc.conns
|
||||
cc.conns = nil
|
||||
for _, ac := range conns {
|
||||
ac.tearDown()
|
||||
}
|
||||
}
|
||||
|
||||
func (cc *ClientConn_grpc862) resetAddrConn() {
|
||||
ac := &addrConn_grpc862{
|
||||
cc: cc,
|
||||
}
|
||||
cc.conns = append(cc.conns, ac)
|
||||
ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
|
||||
ac.resetTransport()
|
||||
}
|
||||
|
||||
type addrConn_grpc862 struct {
|
||||
cc *ClientConn_grpc862
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
func (ac *addrConn_grpc862) resetTransport() {
|
||||
for retries := 1; ; retries++ {
|
||||
_ = 2 * time.Nanosecond * time.Duration(retries)
|
||||
timeout := 10 * time.Nanosecond
|
||||
_, cancel := context.WithTimeout(ac.ctx, timeout)
|
||||
_ = time.Now()
|
||||
cancel()
|
||||
<-ac.ctx.Done()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (ac *addrConn_grpc862) tearDown() {
|
||||
ac.cancel()
|
||||
}
|
||||
|
||||
func DialContext_grpc862(ctx context.Context) (conn *ClientConn_grpc862) {
|
||||
cc := &ClientConn_grpc862{}
|
||||
cc.ctx, cc.cancel = context.WithCancel(context.Background())
|
||||
defer func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if conn != nil {
|
||||
conn.Close()
|
||||
}
|
||||
conn = nil
|
||||
default:
|
||||
}
|
||||
}()
|
||||
go func() { // G2
|
||||
cc.resetAddrConn()
|
||||
}()
|
||||
return conn
|
||||
}
|
||||
|
||||
func Grpc862() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
go func() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go DialContext_grpc862(ctx) // G1
|
||||
go cancel() // helper goroutine
|
||||
}()
|
||||
}
|
81
src/runtime/testdata/testgoroutineleakprofile/goker/hugo3251.go
vendored
Normal file
81
src/runtime/testdata/testgoroutineleakprofile/goker/hugo3251.go
vendored
Normal file
|
@ -0,0 +1,81 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Hugo3251", Hugo3251)
|
||||
}
|
||||
|
||||
type remoteLock_hugo3251 struct {
|
||||
sync.RWMutex // L1
|
||||
m map[string]*sync.Mutex // L2
|
||||
}
|
||||
|
||||
func (l *remoteLock_hugo3251) URLLock(url string) {
|
||||
l.Lock() // L1
|
||||
if _, ok := l.m[url]; !ok {
|
||||
l.m[url] = &sync.Mutex{}
|
||||
}
|
||||
l.m[url].Lock() // L2
|
||||
runtime.Gosched()
|
||||
l.Unlock() // L1
|
||||
// runtime.Gosched()
|
||||
}
|
||||
|
||||
func (l *remoteLock_hugo3251) URLUnlock(url string) {
|
||||
l.RLock() // L1
|
||||
defer l.RUnlock() // L1
|
||||
if um, ok := l.m[url]; ok {
|
||||
um.Unlock() // L2
|
||||
}
|
||||
}
|
||||
|
||||
func resGetRemote_hugo3251(remoteURLLock *remoteLock_hugo3251, url string) error {
|
||||
remoteURLLock.URLLock(url)
|
||||
defer func() { remoteURLLock.URLUnlock(url) }()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func Hugo3251() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(time.Second)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
for i := 0; i < 11; i++ {
|
||||
go func() { // G1
|
||||
url := "http://Foo.Bar/foo_Bar-Foo"
|
||||
remoteURLLock := &remoteLock_hugo3251{m: make(map[string]*sync.Mutex)}
|
||||
for range []bool{false, true} {
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 100; i++ {
|
||||
wg.Add(1)
|
||||
go func(gor int) { // G2
|
||||
defer wg.Done()
|
||||
for j := 0; j < 200; j++ {
|
||||
err := resGetRemote_hugo3251(remoteURLLock, url)
|
||||
if err != nil {
|
||||
fmt.Errorf("Error getting resource content: %s", err)
|
||||
}
|
||||
time.Sleep(300 * time.Nanosecond)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
317
src/runtime/testdata/testgoroutineleakprofile/goker/hugo5379.go
vendored
Normal file
317
src/runtime/testdata/testgoroutineleakprofile/goker/hugo5379.go
vendored
Normal file
|
@ -0,0 +1,317 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Hugo5379", Hugo5379)
|
||||
}
|
||||
|
||||
type shortcodeHandler_hugo5379 struct {
|
||||
p *PageWithoutContent_hugo5379
|
||||
contentShortcodes map[int]func() error
|
||||
contentShortcodesDelta map[int]func() error
|
||||
init sync.Once // O1
|
||||
}
|
||||
|
||||
func (s *shortcodeHandler_hugo5379) executeShortcodesForDelta(p *PageWithoutContent_hugo5379) error {
|
||||
for k, _ := range s.contentShortcodesDelta {
|
||||
render := s.contentShortcodesDelta[k]
|
||||
if err := render(); err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *shortcodeHandler_hugo5379) updateDelta() {
|
||||
s.init.Do(func() {
|
||||
s.contentShortcodes = createShortcodeRenderers_hugo5379(s.p.withoutContent())
|
||||
})
|
||||
|
||||
delta := make(map[int]func() error)
|
||||
|
||||
for k, v := range s.contentShortcodes {
|
||||
if _, ok := delta[k]; !ok {
|
||||
delta[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
s.contentShortcodesDelta = delta
|
||||
}
|
||||
|
||||
type Page_hugo5379 struct {
|
||||
*pageInit_hugo5379
|
||||
*pageContentInit_hugo5379
|
||||
pageWithoutContent *PageWithoutContent_hugo5379
|
||||
contentInit sync.Once // O2
|
||||
contentInitMu sync.Mutex // L1
|
||||
shortcodeState *shortcodeHandler_hugo5379
|
||||
}
|
||||
|
||||
func (p *Page_hugo5379) WordCount() {
|
||||
p.initContentPlainAndMeta()
|
||||
}
|
||||
|
||||
func (p *Page_hugo5379) initContentPlainAndMeta() {
|
||||
p.initContent()
|
||||
p.initPlain(true)
|
||||
}
|
||||
|
||||
func (p *Page_hugo5379) initPlain(lock bool) {
|
||||
p.plainInit.Do(func() {
|
||||
if lock {
|
||||
/// Double locking here.
|
||||
p.contentInitMu.Lock()
|
||||
defer p.contentInitMu.Unlock()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (p *Page_hugo5379) withoutContent() *PageWithoutContent_hugo5379 {
|
||||
p.pageInit_hugo5379.withoutContentInit.Do(func() {
|
||||
p.pageWithoutContent = &PageWithoutContent_hugo5379{Page_hugo5379: p}
|
||||
})
|
||||
return p.pageWithoutContent
|
||||
}
|
||||
|
||||
func (p *Page_hugo5379) prepareForRender() error {
|
||||
var err error
|
||||
if err = handleShortcodes_hugo5379(p.withoutContent()); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Page_hugo5379) setContentInit() {
|
||||
p.shortcodeState.updateDelta()
|
||||
}
|
||||
|
||||
func (p *Page_hugo5379) initContent() {
|
||||
p.contentInit.Do(func() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
|
||||
defer cancel()
|
||||
c := make(chan error, 1)
|
||||
|
||||
go func() { // G2
|
||||
var err error
|
||||
p.contentInitMu.Lock() // first lock here
|
||||
defer p.contentInitMu.Unlock()
|
||||
|
||||
err = p.prepareForRender()
|
||||
if err != nil {
|
||||
c <- err
|
||||
return
|
||||
}
|
||||
c <- err
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-c:
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
type PageWithoutContent_hugo5379 struct {
|
||||
*Page_hugo5379
|
||||
}
|
||||
|
||||
type pageInit_hugo5379 struct {
|
||||
withoutContentInit sync.Once
|
||||
}
|
||||
|
||||
type pageContentInit_hugo5379 struct {
|
||||
contentInit sync.Once // O3
|
||||
plainInit sync.Once // O4
|
||||
}
|
||||
|
||||
type HugoSites_hugo5379 struct {
|
||||
Sites []*Site_hugo5379
|
||||
}
|
||||
|
||||
func (h *HugoSites_hugo5379) render() {
|
||||
for _, s := range h.Sites {
|
||||
for _, s2 := range h.Sites {
|
||||
s2.preparePagesForRender()
|
||||
}
|
||||
s.renderPages()
|
||||
}
|
||||
}
|
||||
|
||||
func (h *HugoSites_hugo5379) Build() {
|
||||
h.render()
|
||||
}
|
||||
|
||||
type Pages_hugo5379 []*Page_hugo5379
|
||||
|
||||
type PageCollections_hugo5379 struct {
|
||||
Pages Pages_hugo5379
|
||||
}
|
||||
|
||||
type Site_hugo5379 struct {
|
||||
*PageCollections_hugo5379
|
||||
}
|
||||
|
||||
func (s *Site_hugo5379) preparePagesForRender() {
|
||||
for _, p := range s.Pages {
|
||||
p.setContentInit()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Site_hugo5379) renderForLayouts() {
|
||||
/// Omit reflections
|
||||
for _, p := range s.Pages {
|
||||
p.WordCount()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Site_hugo5379) renderAndWritePage() {
|
||||
s.renderForLayouts()
|
||||
}
|
||||
|
||||
func (s *Site_hugo5379) renderPages() {
|
||||
numWorkers := 2
|
||||
wg := &sync.WaitGroup{}
|
||||
|
||||
for i := 0; i < numWorkers; i++ {
|
||||
wg.Add(1)
|
||||
go pageRenderer_hugo5379(s, wg) // G3
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
type sitesBuilder_hugo5379 struct {
|
||||
H *HugoSites_hugo5379
|
||||
}
|
||||
|
||||
func (s *sitesBuilder_hugo5379) Build() *sitesBuilder_hugo5379 {
|
||||
return s.build()
|
||||
}
|
||||
|
||||
func (s *sitesBuilder_hugo5379) build() *sitesBuilder_hugo5379 {
|
||||
s.H.Build()
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *sitesBuilder_hugo5379) CreateSitesE() error {
|
||||
sites, err := NewHugoSites_hugo5379()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.H = sites
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *sitesBuilder_hugo5379) CreateSites() *sitesBuilder_hugo5379 {
|
||||
if err := s.CreateSitesE(); err != nil {
|
||||
log.Fatalf("Failed to create sites: %s", err)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func newHugoSites_hugo5379(sites ...*Site_hugo5379) (*HugoSites_hugo5379, error) {
|
||||
h := &HugoSites_hugo5379{Sites: sites}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func newSite_hugo5379() *Site_hugo5379 {
|
||||
c := &PageCollections_hugo5379{}
|
||||
s := &Site_hugo5379{
|
||||
PageCollections_hugo5379: c,
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func createSitesFromConfig_hugo5379() []*Site_hugo5379 {
|
||||
var (
|
||||
sites []*Site_hugo5379
|
||||
)
|
||||
|
||||
var s *Site_hugo5379 = newSite_hugo5379()
|
||||
sites = append(sites, s)
|
||||
return sites
|
||||
}
|
||||
|
||||
func NewHugoSites_hugo5379() (*HugoSites_hugo5379, error) {
|
||||
sites := createSitesFromConfig_hugo5379()
|
||||
return newHugoSites_hugo5379(sites...)
|
||||
}
|
||||
|
||||
func prepareShortcodeForPage_hugo5379(p *PageWithoutContent_hugo5379) map[int]func() error {
|
||||
m := make(map[int]func() error)
|
||||
m[0] = func() error {
|
||||
return renderShortcode_hugo5379(p)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func renderShortcode_hugo5379(p *PageWithoutContent_hugo5379) error {
|
||||
return renderShortcodeWithPage_hugo5379(p)
|
||||
}
|
||||
|
||||
func renderShortcodeWithPage_hugo5379(p *PageWithoutContent_hugo5379) error {
|
||||
/// Omit reflections
|
||||
p.WordCount()
|
||||
return nil
|
||||
}
|
||||
|
||||
func createShortcodeRenderers_hugo5379(p *PageWithoutContent_hugo5379) map[int]func() error {
|
||||
return prepareShortcodeForPage_hugo5379(p)
|
||||
}
|
||||
|
||||
func newShortcodeHandler_hugo5379(p *Page_hugo5379) *shortcodeHandler_hugo5379 {
|
||||
return &shortcodeHandler_hugo5379{
|
||||
p: p.withoutContent(),
|
||||
contentShortcodes: make(map[int]func() error),
|
||||
contentShortcodesDelta: make(map[int]func() error),
|
||||
}
|
||||
}
|
||||
|
||||
func handleShortcodes_hugo5379(p *PageWithoutContent_hugo5379) error {
|
||||
return p.shortcodeState.executeShortcodesForDelta(p)
|
||||
}
|
||||
|
||||
func pageRenderer_hugo5379(s *Site_hugo5379, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
s.renderAndWritePage()
|
||||
}
|
||||
|
||||
func Hugo5379() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
go func() { // G1
|
||||
b := &sitesBuilder_hugo5379{}
|
||||
s := b.CreateSites()
|
||||
for _, site := range s.H.Sites {
|
||||
p := &Page_hugo5379{
|
||||
pageInit_hugo5379: &pageInit_hugo5379{},
|
||||
pageContentInit_hugo5379: &pageContentInit_hugo5379{},
|
||||
pageWithoutContent: &PageWithoutContent_hugo5379{},
|
||||
contentInit: sync.Once{},
|
||||
contentInitMu: sync.Mutex{},
|
||||
shortcodeState: nil,
|
||||
}
|
||||
p.shortcodeState = newShortcodeHandler_hugo5379(p)
|
||||
site.Pages = append(site.Pages, p)
|
||||
}
|
||||
s.Build()
|
||||
}()
|
||||
}
|
||||
}
|
129
src/runtime/testdata/testgoroutineleakprofile/goker/istio16224.go
vendored
Normal file
129
src/runtime/testdata/testgoroutineleakprofile/goker/istio16224.go
vendored
Normal file
|
@ -0,0 +1,129 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Istio16224", Istio16224)
|
||||
}
|
||||
|
||||
type ConfigStoreCache_istio16224 interface {
|
||||
RegisterEventHandler(handler func())
|
||||
Run()
|
||||
}
|
||||
|
||||
type Event_istio16224 int
|
||||
|
||||
type Handler_istio16224 func(Event_istio16224)
|
||||
|
||||
type configstoreMonitor_istio16224 struct {
|
||||
handlers []Handler_istio16224
|
||||
eventCh chan Event_istio16224
|
||||
}
|
||||
|
||||
func (m *configstoreMonitor_istio16224) Run(stop <-chan struct{}) {
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
// This bug is not descibed, but is a true positive (in our eyes)
|
||||
// In a real run main exits when the goro is blocked here.
|
||||
if _, ok := <-m.eventCh; ok {
|
||||
close(m.eventCh)
|
||||
}
|
||||
return
|
||||
case ce, ok := <-m.eventCh:
|
||||
if ok {
|
||||
m.processConfigEvent(ce)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *configstoreMonitor_istio16224) processConfigEvent(ce Event_istio16224) {
|
||||
m.applyHandlers(ce)
|
||||
}
|
||||
|
||||
func (m *configstoreMonitor_istio16224) AppendEventHandler(h Handler_istio16224) {
|
||||
m.handlers = append(m.handlers, h)
|
||||
}
|
||||
|
||||
func (m *configstoreMonitor_istio16224) applyHandlers(e Event_istio16224) {
|
||||
for _, f := range m.handlers {
|
||||
f(e)
|
||||
}
|
||||
}
|
||||
func (m *configstoreMonitor_istio16224) ScheduleProcessEvent(configEvent Event_istio16224) {
|
||||
m.eventCh <- configEvent
|
||||
}
|
||||
|
||||
type Monitor_istio16224 interface {
|
||||
Run(<-chan struct{})
|
||||
AppendEventHandler(Handler_istio16224)
|
||||
ScheduleProcessEvent(Event_istio16224)
|
||||
}
|
||||
|
||||
type controller_istio16224 struct {
|
||||
monitor Monitor_istio16224
|
||||
}
|
||||
|
||||
func (c *controller_istio16224) RegisterEventHandler(f func(Event_istio16224)) {
|
||||
c.monitor.AppendEventHandler(f)
|
||||
}
|
||||
|
||||
func (c *controller_istio16224) Run(stop <-chan struct{}) {
|
||||
c.monitor.Run(stop)
|
||||
}
|
||||
|
||||
func (c *controller_istio16224) Create() {
|
||||
c.monitor.ScheduleProcessEvent(Event_istio16224(0))
|
||||
}
|
||||
|
||||
func NewMonitor_istio16224() Monitor_istio16224 {
|
||||
return NewBufferedMonitor_istio16224()
|
||||
}
|
||||
|
||||
func NewBufferedMonitor_istio16224() Monitor_istio16224 {
|
||||
return &configstoreMonitor_istio16224{
|
||||
eventCh: make(chan Event_istio16224),
|
||||
}
|
||||
}
|
||||
|
||||
func Istio16224() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
go func() {
|
||||
controller := &controller_istio16224{monitor: NewMonitor_istio16224()}
|
||||
done := make(chan bool)
|
||||
lock := sync.Mutex{}
|
||||
controller.RegisterEventHandler(func(event Event_istio16224) {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
done <- true
|
||||
})
|
||||
|
||||
stop := make(chan struct{})
|
||||
go controller.Run(stop)
|
||||
|
||||
controller.Create()
|
||||
|
||||
lock.Lock() // blocks
|
||||
lock.Unlock()
|
||||
<-done
|
||||
|
||||
close(stop)
|
||||
}()
|
||||
}
|
||||
}
|
144
src/runtime/testdata/testgoroutineleakprofile/goker/istio17860.go
vendored
Normal file
144
src/runtime/testdata/testgoroutineleakprofile/goker/istio17860.go
vendored
Normal file
|
@ -0,0 +1,144 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Istio17860", Istio17860)
|
||||
}
|
||||
|
||||
type Proxy_istio17860 interface {
|
||||
IsLive() bool
|
||||
}
|
||||
|
||||
type TestProxy_istio17860 struct {
|
||||
live func() bool
|
||||
}
|
||||
|
||||
func (tp TestProxy_istio17860) IsLive() bool {
|
||||
if tp.live == nil {
|
||||
return true
|
||||
}
|
||||
return tp.live()
|
||||
}
|
||||
|
||||
type Agent_istio17860 interface {
|
||||
Run(ctx context.Context)
|
||||
Restart()
|
||||
}
|
||||
|
||||
type exitStatus_istio17860 int
|
||||
|
||||
type agent_istio17860 struct {
|
||||
proxy Proxy_istio17860
|
||||
mu *sync.Mutex
|
||||
statusCh chan exitStatus_istio17860
|
||||
currentEpoch int
|
||||
activeEpochs map[int]struct{}
|
||||
}
|
||||
|
||||
func (a *agent_istio17860) Run(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case status := <-a.statusCh:
|
||||
a.mu.Lock()
|
||||
delete(a.activeEpochs, int(status))
|
||||
active := len(a.activeEpochs)
|
||||
a.mu.Unlock()
|
||||
if active == 0 {
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *agent_istio17860) Restart() {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
a.waitUntilLive()
|
||||
a.currentEpoch++
|
||||
a.activeEpochs[a.currentEpoch] = struct{}{}
|
||||
|
||||
go a.runWait(a.currentEpoch)
|
||||
}
|
||||
|
||||
func (a *agent_istio17860) runWait(epoch int) {
|
||||
a.statusCh <- exitStatus_istio17860(epoch)
|
||||
}
|
||||
|
||||
func (a *agent_istio17860) waitUntilLive() {
|
||||
if len(a.activeEpochs) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
interval := time.NewTicker(30 * time.Nanosecond)
|
||||
timer := time.NewTimer(100 * time.Nanosecond)
|
||||
defer func() {
|
||||
interval.Stop()
|
||||
timer.Stop()
|
||||
}()
|
||||
|
||||
if a.proxy.IsLive() {
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-timer.C:
|
||||
return
|
||||
case <-interval.C:
|
||||
if a.proxy.IsLive() {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func NewAgent_istio17860(proxy Proxy_istio17860) Agent_istio17860 {
|
||||
return &agent_istio17860{
|
||||
proxy: proxy,
|
||||
mu: &sync.Mutex{},
|
||||
statusCh: make(chan exitStatus_istio17860),
|
||||
activeEpochs: make(map[int]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func Istio17860() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
go func() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
neverLive := func() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
a := NewAgent_istio17860(TestProxy_istio17860{live: neverLive})
|
||||
go func() { a.Run(ctx) }()
|
||||
|
||||
a.Restart()
|
||||
go a.Restart()
|
||||
|
||||
time.Sleep(200 * time.Nanosecond)
|
||||
}()
|
||||
}
|
||||
}
|
154
src/runtime/testdata/testgoroutineleakprofile/goker/istio18454.go
vendored
Normal file
154
src/runtime/testdata/testgoroutineleakprofile/goker/istio18454.go
vendored
Normal file
|
@ -0,0 +1,154 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Istio18454", Istio18454)
|
||||
}
|
||||
|
||||
const eventChCap_istio18454 = 1024
|
||||
|
||||
type Worker_istio18454 struct {
|
||||
ctx context.Context
|
||||
ctxCancel context.CancelFunc
|
||||
}
|
||||
|
||||
func (w *Worker_istio18454) Start(setupFn func(), runFn func(c context.Context)) {
|
||||
if setupFn != nil {
|
||||
setupFn()
|
||||
}
|
||||
go func() {
|
||||
runFn(w.ctx)
|
||||
}()
|
||||
}
|
||||
|
||||
func (w *Worker_istio18454) Stop() {
|
||||
w.ctxCancel()
|
||||
}
|
||||
|
||||
type Strategy_istio18454 struct {
|
||||
timer *time.Timer
|
||||
timerFrequency time.Duration
|
||||
stateLock sync.Mutex
|
||||
resetChan chan struct{}
|
||||
worker *Worker_istio18454
|
||||
startTimerFn func()
|
||||
}
|
||||
|
||||
func (s *Strategy_istio18454) OnChange() {
|
||||
s.stateLock.Lock()
|
||||
if s.timer != nil {
|
||||
s.stateLock.Unlock()
|
||||
s.resetChan <- struct{}{}
|
||||
return
|
||||
}
|
||||
s.startTimerFn()
|
||||
s.stateLock.Unlock()
|
||||
}
|
||||
|
||||
func (s *Strategy_istio18454) startTimer() {
|
||||
s.timer = time.NewTimer(s.timerFrequency)
|
||||
eventLoop := func(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-s.timer.C:
|
||||
case <-s.resetChan:
|
||||
if !s.timer.Stop() {
|
||||
<-s.timer.C
|
||||
}
|
||||
s.timer.Reset(s.timerFrequency)
|
||||
case <-ctx.Done():
|
||||
s.timer.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
s.worker.Start(nil, eventLoop)
|
||||
}
|
||||
|
||||
func (s *Strategy_istio18454) Close() {
|
||||
s.worker.Stop()
|
||||
}
|
||||
|
||||
type Event_istio18454 int
|
||||
|
||||
type Processor_istio18454 struct {
|
||||
stateStrategy *Strategy_istio18454
|
||||
worker *Worker_istio18454
|
||||
eventCh chan Event_istio18454
|
||||
}
|
||||
|
||||
func (p *Processor_istio18454) processEvent() {
|
||||
p.stateStrategy.OnChange()
|
||||
}
|
||||
|
||||
func (p *Processor_istio18454) Start() {
|
||||
setupFn := func() {
|
||||
for i := 0; i < eventChCap_istio18454; i++ {
|
||||
p.eventCh <- Event_istio18454(0)
|
||||
}
|
||||
}
|
||||
runFn := func(ctx context.Context) {
|
||||
defer func() {
|
||||
p.stateStrategy.Close()
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-p.eventCh:
|
||||
p.processEvent()
|
||||
}
|
||||
}
|
||||
}
|
||||
p.worker.Start(setupFn, runFn)
|
||||
}
|
||||
|
||||
func (p *Processor_istio18454) Stop() {
|
||||
p.worker.Stop()
|
||||
}
|
||||
|
||||
func NewWorker_istio18454() *Worker_istio18454 {
|
||||
worker := &Worker_istio18454{}
|
||||
worker.ctx, worker.ctxCancel = context.WithCancel(context.Background())
|
||||
return worker
|
||||
}
|
||||
|
||||
func Istio18454() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
go func() {
|
||||
stateStrategy := &Strategy_istio18454{
|
||||
timerFrequency: time.Nanosecond,
|
||||
resetChan: make(chan struct{}, 1),
|
||||
worker: NewWorker_istio18454(),
|
||||
}
|
||||
stateStrategy.startTimerFn = stateStrategy.startTimer
|
||||
|
||||
p := &Processor_istio18454{
|
||||
stateStrategy: stateStrategy,
|
||||
worker: NewWorker_istio18454(),
|
||||
eventCh: make(chan Event_istio18454, eventChCap_istio18454),
|
||||
}
|
||||
|
||||
p.Start()
|
||||
defer p.Stop()
|
||||
}()
|
||||
}
|
||||
}
|
95
src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes10182.go
vendored
Normal file
95
src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes10182.go
vendored
Normal file
|
@ -0,0 +1,95 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: kubernetes
|
||||
* Issue or PR : https://github.com/kubernetes/kubernetes/pull/10182
|
||||
* Buggy version: 4b990d128a17eea9058d28a3b3688ab8abafbd94
|
||||
* fix commit-id: 64ad3e17ad15cd0f9a4fd86706eec1c572033254
|
||||
* Flaky: 15/100
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Kubernetes10182", Kubernetes10182)
|
||||
}
|
||||
|
||||
type statusManager_kubernetes10182 struct {
|
||||
podStatusesLock sync.RWMutex
|
||||
podStatusChannel chan bool
|
||||
}
|
||||
|
||||
func (s *statusManager_kubernetes10182) Start() {
|
||||
go func() {
|
||||
for i := 0; i < 2; i++ {
|
||||
s.syncBatch()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (s *statusManager_kubernetes10182) syncBatch() {
|
||||
runtime.Gosched()
|
||||
<-s.podStatusChannel
|
||||
s.DeletePodStatus()
|
||||
}
|
||||
|
||||
func (s *statusManager_kubernetes10182) DeletePodStatus() {
|
||||
s.podStatusesLock.Lock()
|
||||
defer s.podStatusesLock.Unlock()
|
||||
}
|
||||
|
||||
func (s *statusManager_kubernetes10182) SetPodStatus() {
|
||||
s.podStatusesLock.Lock()
|
||||
defer s.podStatusesLock.Unlock()
|
||||
s.podStatusChannel <- true
|
||||
}
|
||||
|
||||
func NewStatusManager_kubernetes10182() *statusManager_kubernetes10182 {
|
||||
return &statusManager_kubernetes10182{
|
||||
podStatusChannel: make(chan bool),
|
||||
}
|
||||
}
|
||||
|
||||
// Example of deadlock trace:
|
||||
//
|
||||
// G1 G2 G3
|
||||
// --------------------------------------------------------------------------------
|
||||
// s.Start()
|
||||
// s.syncBatch()
|
||||
// s.SetPodStatus()
|
||||
// <-s.podStatusChannel
|
||||
// s.podStatusesLock.Lock()
|
||||
// s.podStatusChannel <- true
|
||||
// s.podStatusesLock.Unlock()
|
||||
// return
|
||||
// s.DeletePodStatus()
|
||||
// s.podStatusesLock.Lock()
|
||||
// s.podStatusChannel <- true
|
||||
// s.podStatusesLock.Lock()
|
||||
// -----------------------------------G1,G3 leak-------------------------------------
|
||||
|
||||
func Kubernetes10182() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
go func() {
|
||||
s := NewStatusManager_kubernetes10182()
|
||||
go s.Start()
|
||||
go s.SetPodStatus()
|
||||
go s.SetPodStatus()
|
||||
}()
|
||||
}
|
||||
}
|
118
src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes11298.go
vendored
Normal file
118
src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes11298.go
vendored
Normal file
|
@ -0,0 +1,118 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Kubernetes11298", Kubernetes11298)
|
||||
}
|
||||
|
||||
type Signal_kubernetes11298 <-chan struct{}
|
||||
|
||||
func After_kubernetes11298(f func()) Signal_kubernetes11298 {
|
||||
ch := make(chan struct{})
|
||||
go func() {
|
||||
defer close(ch)
|
||||
if f != nil {
|
||||
f()
|
||||
}
|
||||
}()
|
||||
return Signal_kubernetes11298(ch)
|
||||
}
|
||||
|
||||
func Until_kubernetes11298(f func(), period time.Duration, stopCh <-chan struct{}) {
|
||||
if f == nil {
|
||||
return
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
default:
|
||||
}
|
||||
f()
|
||||
select {
|
||||
case <-stopCh:
|
||||
case <-time.After(period):
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
type notifier_kubernetes11298 struct {
|
||||
lock sync.Mutex
|
||||
cond *sync.Cond
|
||||
}
|
||||
|
||||
// abort will be closed no matter what
|
||||
func (n *notifier_kubernetes11298) serviceLoop(abort <-chan struct{}) {
|
||||
n.lock.Lock()
|
||||
defer n.lock.Unlock()
|
||||
for {
|
||||
select {
|
||||
case <-abort:
|
||||
return
|
||||
default:
|
||||
ch := After_kubernetes11298(func() {
|
||||
n.cond.Wait()
|
||||
})
|
||||
select {
|
||||
case <-abort:
|
||||
n.cond.Signal()
|
||||
<-ch
|
||||
return
|
||||
case <-ch:
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// abort will be closed no matter what
|
||||
func Notify_kubernetes11298(abort <-chan struct{}) {
|
||||
n := ¬ifier_kubernetes11298{}
|
||||
n.cond = sync.NewCond(&n.lock)
|
||||
finished := After_kubernetes11298(func() {
|
||||
Until_kubernetes11298(func() {
|
||||
for {
|
||||
select {
|
||||
case <-abort:
|
||||
return
|
||||
default:
|
||||
func() {
|
||||
n.lock.Lock()
|
||||
defer n.lock.Unlock()
|
||||
n.cond.Signal()
|
||||
}()
|
||||
}
|
||||
}
|
||||
}, 0, abort)
|
||||
})
|
||||
Until_kubernetes11298(func() { n.serviceLoop(finished) }, 0, abort)
|
||||
}
|
||||
func Kubernetes11298() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
go func() {
|
||||
done := make(chan struct{})
|
||||
notifyDone := After_kubernetes11298(func() { Notify_kubernetes11298(done) })
|
||||
go func() {
|
||||
defer close(done)
|
||||
time.Sleep(300 * time.Nanosecond)
|
||||
}()
|
||||
<-notifyDone
|
||||
}()
|
||||
}
|
||||
}
|
166
src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes13135.go
vendored
Normal file
166
src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes13135.go
vendored
Normal file
|
@ -0,0 +1,166 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: kubernetes
|
||||
* Issue or PR : https://github.com/kubernetes/kubernetes/pull/13135
|
||||
* Buggy version: 6ced66249d4fd2a81e86b4a71d8df0139fe5ceae
|
||||
* fix commit-id: a12b7edc42c5c06a2e7d9f381975658692951d5a
|
||||
* Flaky: 93/100
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Kubernetes13135", Kubernetes13135)
|
||||
}
|
||||
|
||||
var (
|
||||
StopChannel_kubernetes13135 chan struct{}
|
||||
)
|
||||
|
||||
func Util_kubernetes13135(f func(), period time.Duration, stopCh <-chan struct{}) {
|
||||
for {
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
default:
|
||||
}
|
||||
func() {
|
||||
f()
|
||||
}()
|
||||
time.Sleep(period)
|
||||
}
|
||||
}
|
||||
|
||||
type Store_kubernetes13135 interface {
|
||||
Add(obj interface{})
|
||||
Replace(obj interface{})
|
||||
}
|
||||
|
||||
type Reflector_kubernetes13135 struct {
|
||||
store Store_kubernetes13135
|
||||
}
|
||||
|
||||
func (r *Reflector_kubernetes13135) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
r.syncWith()
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewReflector_kubernetes13135(store Store_kubernetes13135) *Reflector_kubernetes13135 {
|
||||
return &Reflector_kubernetes13135{
|
||||
store: store,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reflector_kubernetes13135) syncWith() {
|
||||
r.store.Replace(nil)
|
||||
}
|
||||
|
||||
type Cacher_kubernetes13135 struct {
|
||||
sync.Mutex
|
||||
initialized sync.WaitGroup
|
||||
initOnce sync.Once
|
||||
watchCache *WatchCache_kubernetes13135
|
||||
reflector *Reflector_kubernetes13135
|
||||
}
|
||||
|
||||
func (c *Cacher_kubernetes13135) processEvent() {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
}
|
||||
|
||||
func (c *Cacher_kubernetes13135) startCaching(stopChannel <-chan struct{}) {
|
||||
c.Lock()
|
||||
for {
|
||||
err := c.reflector.ListAndWatch(stopChannel)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type WatchCache_kubernetes13135 struct {
|
||||
sync.RWMutex
|
||||
onReplace func()
|
||||
onEvent func()
|
||||
}
|
||||
|
||||
func (w *WatchCache_kubernetes13135) SetOnEvent(onEvent func()) {
|
||||
w.Lock()
|
||||
defer w.Unlock()
|
||||
w.onEvent = onEvent
|
||||
}
|
||||
|
||||
func (w *WatchCache_kubernetes13135) SetOnReplace(onReplace func()) {
|
||||
w.Lock()
|
||||
defer w.Unlock()
|
||||
w.onReplace = onReplace
|
||||
}
|
||||
|
||||
func (w *WatchCache_kubernetes13135) processEvent() {
|
||||
w.Lock()
|
||||
defer w.Unlock()
|
||||
if w.onEvent != nil {
|
||||
w.onEvent()
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WatchCache_kubernetes13135) Add(obj interface{}) {
|
||||
w.processEvent()
|
||||
}
|
||||
|
||||
func (w *WatchCache_kubernetes13135) Replace(obj interface{}) {
|
||||
w.Lock()
|
||||
defer w.Unlock()
|
||||
if w.onReplace != nil {
|
||||
w.onReplace()
|
||||
}
|
||||
}
|
||||
|
||||
func NewCacher_kubernetes13135(stopCh <-chan struct{}) *Cacher_kubernetes13135 {
|
||||
watchCache := &WatchCache_kubernetes13135{}
|
||||
cacher := &Cacher_kubernetes13135{
|
||||
initialized: sync.WaitGroup{},
|
||||
watchCache: watchCache,
|
||||
reflector: NewReflector_kubernetes13135(watchCache),
|
||||
}
|
||||
cacher.initialized.Add(1)
|
||||
watchCache.SetOnReplace(func() {
|
||||
cacher.initOnce.Do(func() { cacher.initialized.Done() })
|
||||
cacher.Unlock()
|
||||
})
|
||||
watchCache.SetOnEvent(cacher.processEvent)
|
||||
go Util_kubernetes13135(func() { cacher.startCaching(stopCh) }, 0, stopCh) // G2
|
||||
cacher.initialized.Wait()
|
||||
return cacher
|
||||
}
|
||||
|
||||
func Kubernetes13135() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
StopChannel_kubernetes13135 = make(chan struct{})
|
||||
for i := 0; i < 50; i++ {
|
||||
go func() {
|
||||
// Should create a local channel. Using a single global channel
|
||||
// concurrently will cause a deadlock which does not actually exist
|
||||
// in the original microbenchmark.
|
||||
StopChannel_kubernetes13135 := make(chan struct{})
|
||||
|
||||
c := NewCacher_kubernetes13135(StopChannel_kubernetes13135) // G1
|
||||
go c.watchCache.Add(nil) // G3
|
||||
go close(StopChannel_kubernetes13135)
|
||||
}()
|
||||
}
|
||||
}
|
100
src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes1321.go
vendored
Normal file
100
src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes1321.go
vendored
Normal file
|
@ -0,0 +1,100 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: kubernetes
|
||||
* Issue or PR : https://github.com/kubernetes/kubernetes/pull/1321
|
||||
* Buggy version: 9cd0fc70f1ca852c903b18b0933991036b3b2fa1
|
||||
* fix commit-id: 435e0b73bb99862f9dedf56a50260ff3dfef14ff
|
||||
* Flaky: 1/100
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Kubernetes1321", Kubernetes1321)
|
||||
}
|
||||
|
||||
type muxWatcher_kubernetes1321 struct {
|
||||
result chan struct{}
|
||||
m *Mux_kubernetes1321
|
||||
id int64
|
||||
}
|
||||
|
||||
func (mw *muxWatcher_kubernetes1321) Stop() {
|
||||
mw.m.stopWatching(mw.id)
|
||||
}
|
||||
|
||||
type Mux_kubernetes1321 struct {
|
||||
lock sync.Mutex
|
||||
watchers map[int64]*muxWatcher_kubernetes1321
|
||||
}
|
||||
|
||||
func NewMux_kubernetes1321() *Mux_kubernetes1321 {
|
||||
m := &Mux_kubernetes1321{
|
||||
watchers: map[int64]*muxWatcher_kubernetes1321{},
|
||||
}
|
||||
go m.loop() // G2
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *Mux_kubernetes1321) Watch() *muxWatcher_kubernetes1321 {
|
||||
mw := &muxWatcher_kubernetes1321{
|
||||
result: make(chan struct{}),
|
||||
m: m,
|
||||
id: int64(len(m.watchers)),
|
||||
}
|
||||
m.watchers[mw.id] = mw
|
||||
runtime.Gosched()
|
||||
return mw
|
||||
}
|
||||
|
||||
func (m *Mux_kubernetes1321) loop() {
|
||||
for i := 0; i < 100; i++ {
|
||||
m.distribute()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Mux_kubernetes1321) distribute() {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
for _, w := range m.watchers {
|
||||
w.result <- struct{}{}
|
||||
runtime.Gosched()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Mux_kubernetes1321) stopWatching(id int64) {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
w, ok := m.watchers[id]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
delete(m.watchers, id)
|
||||
close(w.result)
|
||||
}
|
||||
|
||||
func testMuxWatcherClose_kubernetes1321() {
|
||||
m := NewMux_kubernetes1321()
|
||||
m.watchers[m.Watch().id].Stop()
|
||||
}
|
||||
|
||||
func Kubernetes1321() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
for i := 0; i < 1000; i++ {
|
||||
go testMuxWatcherClose_kubernetes1321() // G1
|
||||
}
|
||||
}
|
72
src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes25331.go
vendored
Normal file
72
src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes25331.go
vendored
Normal file
|
@ -0,0 +1,72 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: kubernetes
|
||||
* Issue or PR : https://github.com/kubernetes/kubernetes/pull/25331
|
||||
* Buggy version: 5dd087040bb13434f1ddf2f0693d0203c30f28cb
|
||||
* fix commit-id: 97f4647dc3d8cf46c2b66b89a31c758a6edfb57c
|
||||
* Flaky: 100/100
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Kubernetes25331", Kubernetes25331)
|
||||
}
|
||||
|
||||
type watchChan_kubernetes25331 struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
resultChan chan bool
|
||||
errChan chan error
|
||||
}
|
||||
|
||||
func (wc *watchChan_kubernetes25331) Stop() {
|
||||
wc.errChan <- errors.New("Error")
|
||||
wc.cancel()
|
||||
}
|
||||
|
||||
func (wc *watchChan_kubernetes25331) run() {
|
||||
select {
|
||||
case err := <-wc.errChan:
|
||||
errResult := len(err.Error()) != 0
|
||||
wc.cancel() // Removed in fix
|
||||
wc.resultChan <- errResult
|
||||
case <-wc.ctx.Done():
|
||||
}
|
||||
}
|
||||
|
||||
func NewWatchChan_kubernetes25331() *watchChan_kubernetes25331 {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
return &watchChan_kubernetes25331{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
resultChan: make(chan bool),
|
||||
errChan: make(chan error),
|
||||
}
|
||||
}
|
||||
|
||||
func Kubernetes25331() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
go func() {
|
||||
wc := NewWatchChan_kubernetes25331()
|
||||
go wc.run() // G1
|
||||
go wc.Stop() // G2
|
||||
}()
|
||||
}
|
87
src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes26980.go
vendored
Normal file
87
src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes26980.go
vendored
Normal file
|
@ -0,0 +1,87 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Kubernetes26980", Kubernetes26980)
|
||||
}
|
||||
|
||||
type processorListener_kubernetes26980 struct {
|
||||
lock sync.RWMutex
|
||||
cond sync.Cond
|
||||
|
||||
pendingNotifications []interface{}
|
||||
}
|
||||
|
||||
func (p *processorListener_kubernetes26980) add(notification interface{}) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
p.pendingNotifications = append(p.pendingNotifications, notification)
|
||||
p.cond.Broadcast()
|
||||
}
|
||||
|
||||
func (p *processorListener_kubernetes26980) pop(stopCh <-chan struct{}) {
|
||||
p.lock.Lock()
|
||||
runtime.Gosched()
|
||||
defer p.lock.Unlock()
|
||||
for {
|
||||
for len(p.pendingNotifications) == 0 {
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
default:
|
||||
}
|
||||
p.cond.Wait()
|
||||
}
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newProcessListener_kubernetes26980() *processorListener_kubernetes26980 {
|
||||
ret := &processorListener_kubernetes26980{
|
||||
pendingNotifications: []interface{}{},
|
||||
}
|
||||
ret.cond.L = &ret.lock
|
||||
return ret
|
||||
}
|
||||
func Kubernetes26980() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
for i := 0; i < 3000; i++ {
|
||||
go func() {
|
||||
pl := newProcessListener_kubernetes26980()
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
pl.add(1)
|
||||
runtime.Gosched()
|
||||
go pl.pop(stopCh)
|
||||
|
||||
resultCh := make(chan struct{})
|
||||
go func() {
|
||||
pl.lock.Lock()
|
||||
close(resultCh)
|
||||
}()
|
||||
runtime.Gosched()
|
||||
<-resultCh
|
||||
pl.lock.Unlock()
|
||||
}()
|
||||
}
|
||||
}
|
223
src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes30872.go
vendored
Normal file
223
src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes30872.go
vendored
Normal file
|
@ -0,0 +1,223 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Kubernetes30872", Kubernetes30872)
|
||||
}
|
||||
|
||||
type PopProcessFunc_kubernetes30872 func()
|
||||
|
||||
type ProcessFunc_kubernetes30872 func()
|
||||
|
||||
func Util_kubernetes30872(f func(), stopCh <-chan struct{}) {
|
||||
JitterUntil_kubernetes30872(f, stopCh)
|
||||
}
|
||||
|
||||
func JitterUntil_kubernetes30872(f func(), stopCh <-chan struct{}) {
|
||||
for {
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
default:
|
||||
}
|
||||
func() {
|
||||
f()
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
type Queue_kubernetes30872 interface {
|
||||
HasSynced()
|
||||
Pop(PopProcessFunc_kubernetes30872)
|
||||
}
|
||||
|
||||
type Config_kubernetes30872 struct {
|
||||
Queue Queue_kubernetes30872
|
||||
Process ProcessFunc_kubernetes30872
|
||||
}
|
||||
|
||||
type Controller_kubernetes30872 struct {
|
||||
config Config_kubernetes30872
|
||||
}
|
||||
|
||||
func (c *Controller_kubernetes30872) Run(stopCh <-chan struct{}) {
|
||||
Util_kubernetes30872(c.processLoop, stopCh)
|
||||
}
|
||||
|
||||
func (c *Controller_kubernetes30872) HasSynced() {
|
||||
c.config.Queue.HasSynced()
|
||||
}
|
||||
|
||||
func (c *Controller_kubernetes30872) processLoop() {
|
||||
c.config.Queue.Pop(PopProcessFunc_kubernetes30872(c.config.Process))
|
||||
}
|
||||
|
||||
type ControllerInterface_kubernetes30872 interface {
|
||||
Run(<-chan struct{})
|
||||
HasSynced()
|
||||
}
|
||||
|
||||
type ResourceEventHandler_kubernetes30872 interface {
|
||||
OnAdd()
|
||||
}
|
||||
|
||||
type ResourceEventHandlerFuncs_kubernetes30872 struct {
|
||||
AddFunc func()
|
||||
}
|
||||
|
||||
func (r ResourceEventHandlerFuncs_kubernetes30872) OnAdd() {
|
||||
if r.AddFunc != nil {
|
||||
r.AddFunc()
|
||||
}
|
||||
}
|
||||
|
||||
type informer_kubernetes30872 struct {
|
||||
controller ControllerInterface_kubernetes30872
|
||||
|
||||
stopChan chan struct{}
|
||||
}
|
||||
|
||||
type federatedInformerImpl_kubernetes30872 struct {
|
||||
sync.Mutex
|
||||
clusterInformer informer_kubernetes30872
|
||||
}
|
||||
|
||||
func (f *federatedInformerImpl_kubernetes30872) ClustersSynced() {
|
||||
f.Lock() // L1
|
||||
defer f.Unlock()
|
||||
f.clusterInformer.controller.HasSynced()
|
||||
}
|
||||
|
||||
func (f *federatedInformerImpl_kubernetes30872) addCluster() {
|
||||
f.Lock() // L1
|
||||
defer f.Unlock()
|
||||
}
|
||||
|
||||
func (f *federatedInformerImpl_kubernetes30872) Start() {
|
||||
f.Lock() // L1
|
||||
defer f.Unlock()
|
||||
|
||||
f.clusterInformer.stopChan = make(chan struct{})
|
||||
go f.clusterInformer.controller.Run(f.clusterInformer.stopChan) // G2
|
||||
runtime.Gosched()
|
||||
}
|
||||
|
||||
func (f *federatedInformerImpl_kubernetes30872) Stop() {
|
||||
f.Lock() // L1
|
||||
defer f.Unlock()
|
||||
close(f.clusterInformer.stopChan)
|
||||
}
|
||||
|
||||
type DelayingDeliverer_kubernetes30872 struct{}
|
||||
|
||||
func (d *DelayingDeliverer_kubernetes30872) StartWithHandler(handler func()) {
|
||||
go func() { // G4
|
||||
handler()
|
||||
}()
|
||||
}
|
||||
|
||||
type FederationView_kubernetes30872 interface {
|
||||
ClustersSynced()
|
||||
}
|
||||
|
||||
type FederatedInformer_kubernetes30872 interface {
|
||||
FederationView_kubernetes30872
|
||||
Start()
|
||||
Stop()
|
||||
}
|
||||
|
||||
type NamespaceController_kubernetes30872 struct {
|
||||
namespaceDeliverer *DelayingDeliverer_kubernetes30872
|
||||
namespaceFederatedInformer FederatedInformer_kubernetes30872
|
||||
}
|
||||
|
||||
func (nc *NamespaceController_kubernetes30872) isSynced() {
|
||||
nc.namespaceFederatedInformer.ClustersSynced()
|
||||
}
|
||||
|
||||
func (nc *NamespaceController_kubernetes30872) reconcileNamespace() {
|
||||
nc.isSynced()
|
||||
}
|
||||
|
||||
func (nc *NamespaceController_kubernetes30872) Run(stopChan <-chan struct{}) {
|
||||
nc.namespaceFederatedInformer.Start()
|
||||
go func() { // G3
|
||||
<-stopChan
|
||||
nc.namespaceFederatedInformer.Stop()
|
||||
}()
|
||||
nc.namespaceDeliverer.StartWithHandler(func() {
|
||||
nc.reconcileNamespace()
|
||||
})
|
||||
}
|
||||
|
||||
type DeltaFIFO_kubernetes30872 struct {
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
func (f *DeltaFIFO_kubernetes30872) HasSynced() {
|
||||
f.lock.Lock() // L2
|
||||
defer f.lock.Unlock()
|
||||
}
|
||||
|
||||
func (f *DeltaFIFO_kubernetes30872) Pop(process PopProcessFunc_kubernetes30872) {
|
||||
f.lock.Lock() // L2
|
||||
defer f.lock.Unlock()
|
||||
process()
|
||||
}
|
||||
|
||||
func NewFederatedInformer_kubernetes30872() FederatedInformer_kubernetes30872 {
|
||||
federatedInformer := &federatedInformerImpl_kubernetes30872{}
|
||||
federatedInformer.clusterInformer.controller = NewInformer_kubernetes30872(
|
||||
ResourceEventHandlerFuncs_kubernetes30872{
|
||||
AddFunc: func() {
|
||||
federatedInformer.addCluster()
|
||||
},
|
||||
})
|
||||
return federatedInformer
|
||||
}
|
||||
|
||||
func NewInformer_kubernetes30872(h ResourceEventHandler_kubernetes30872) *Controller_kubernetes30872 {
|
||||
fifo := &DeltaFIFO_kubernetes30872{}
|
||||
cfg := &Config_kubernetes30872{
|
||||
Queue: fifo,
|
||||
Process: func() {
|
||||
h.OnAdd()
|
||||
},
|
||||
}
|
||||
return &Controller_kubernetes30872{config: *cfg}
|
||||
}
|
||||
|
||||
func NewNamespaceController_kubernetes30872() *NamespaceController_kubernetes30872 {
|
||||
nc := &NamespaceController_kubernetes30872{}
|
||||
nc.namespaceDeliverer = &DelayingDeliverer_kubernetes30872{}
|
||||
nc.namespaceFederatedInformer = NewFederatedInformer_kubernetes30872()
|
||||
return nc
|
||||
}
|
||||
|
||||
func Kubernetes30872() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
go func() { // G1
|
||||
namespaceController := NewNamespaceController_kubernetes30872()
|
||||
stop := make(chan struct{})
|
||||
namespaceController.Run(stop)
|
||||
close(stop)
|
||||
}()
|
||||
}
|
||||
}
|
83
src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes38669.go
vendored
Normal file
83
src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes38669.go
vendored
Normal file
|
@ -0,0 +1,83 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Kubernetes38669", Kubernetes38669)
|
||||
}
|
||||
|
||||
type Event_kubernetes38669 int
|
||||
type watchCacheEvent_kubernetes38669 int
|
||||
|
||||
type cacheWatcher_kubernetes38669 struct {
|
||||
sync.Mutex
|
||||
input chan watchCacheEvent_kubernetes38669
|
||||
result chan Event_kubernetes38669
|
||||
stopped bool
|
||||
}
|
||||
|
||||
func (c *cacheWatcher_kubernetes38669) process(initEvents []watchCacheEvent_kubernetes38669) {
|
||||
for _, event := range initEvents {
|
||||
c.sendWatchCacheEvent(&event)
|
||||
}
|
||||
defer close(c.result)
|
||||
defer c.Stop()
|
||||
for {
|
||||
_, ok := <-c.input
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cacheWatcher_kubernetes38669) sendWatchCacheEvent(event *watchCacheEvent_kubernetes38669) {
|
||||
c.result <- Event_kubernetes38669(*event)
|
||||
}
|
||||
|
||||
func (c *cacheWatcher_kubernetes38669) Stop() {
|
||||
c.stop()
|
||||
}
|
||||
|
||||
func (c *cacheWatcher_kubernetes38669) stop() {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
if !c.stopped {
|
||||
c.stopped = true
|
||||
close(c.input)
|
||||
}
|
||||
}
|
||||
|
||||
func newCacheWatcher_kubernetes38669(chanSize int, initEvents []watchCacheEvent_kubernetes38669) *cacheWatcher_kubernetes38669 {
|
||||
watcher := &cacheWatcher_kubernetes38669{
|
||||
input: make(chan watchCacheEvent_kubernetes38669, chanSize),
|
||||
result: make(chan Event_kubernetes38669, chanSize),
|
||||
stopped: false,
|
||||
}
|
||||
go watcher.process(initEvents)
|
||||
return watcher
|
||||
}
|
||||
|
||||
func Kubernetes38669() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
go func() {
|
||||
initEvents := []watchCacheEvent_kubernetes38669{1, 2}
|
||||
w := newCacheWatcher_kubernetes38669(0, initEvents)
|
||||
w.Stop()
|
||||
}()
|
||||
}
|
68
src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes5316.go
vendored
Normal file
68
src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes5316.go
vendored
Normal file
|
@ -0,0 +1,68 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: kubernetes
|
||||
* Issue or PR : https://github.com/kubernetes/kubernetes/pull/5316
|
||||
* Buggy version: c868b0bbf09128960bc7c4ada1a77347a464d876
|
||||
* fix commit-id: cc3a433a7abc89d2f766d4c87eaae9448e3dc091
|
||||
* Flaky: 100/100
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/rand"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Kubernetes5316", Kubernetes5316)
|
||||
}
|
||||
|
||||
func finishRequest_kubernetes5316(timeout time.Duration, fn func() error) {
|
||||
ch := make(chan bool)
|
||||
errCh := make(chan error)
|
||||
go func() { // G2
|
||||
if err := fn(); err != nil {
|
||||
errCh <- err
|
||||
} else {
|
||||
ch <- true
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ch:
|
||||
case <-errCh:
|
||||
case <-time.After(timeout):
|
||||
}
|
||||
}
|
||||
|
||||
func Kubernetes5316() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
// Wait a bit because the child goroutine relies on timed operations.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Yield several times to allow the child goroutine to run
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
go func() {
|
||||
fn := func() error {
|
||||
time.Sleep(2 * time.Millisecond)
|
||||
if rand.Intn(10) > 5 {
|
||||
return errors.New("Error")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
go finishRequest_kubernetes5316(time.Microsecond, fn) // G1
|
||||
}()
|
||||
}
|
108
src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes58107.go
vendored
Normal file
108
src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes58107.go
vendored
Normal file
|
@ -0,0 +1,108 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: kubernetes
|
||||
* Tag: Reproduce misbehavior
|
||||
* Issue or PR : https://github.com/kubernetes/kubernetes/pull/58107
|
||||
* Buggy version: 2f17d782eb2772d6401da7ddced9ac90656a7a79
|
||||
* fix commit-id: 010a127314a935d8d038f8dd4559fc5b249813e4
|
||||
* Flaky: 53/100
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Kubernetes58107", Kubernetes58107)
|
||||
}
|
||||
|
||||
type RateLimitingInterface_kubernetes58107 interface {
|
||||
Get()
|
||||
Put()
|
||||
}
|
||||
|
||||
type Type_kubernetes58107 struct {
|
||||
cond *sync.Cond
|
||||
}
|
||||
|
||||
func (q *Type_kubernetes58107) Get() {
|
||||
q.cond.L.Lock()
|
||||
defer q.cond.L.Unlock()
|
||||
q.cond.Wait()
|
||||
}
|
||||
|
||||
func (q *Type_kubernetes58107) Put() {
|
||||
q.cond.Signal()
|
||||
}
|
||||
|
||||
type ResourceQuotaController_kubernetes58107 struct {
|
||||
workerLock sync.RWMutex
|
||||
queue RateLimitingInterface_kubernetes58107
|
||||
missingUsageQueue RateLimitingInterface_kubernetes58107
|
||||
}
|
||||
|
||||
func (rq *ResourceQuotaController_kubernetes58107) worker(queue RateLimitingInterface_kubernetes58107, _ string) {
|
||||
workFunc := func() bool {
|
||||
rq.workerLock.RLock()
|
||||
defer rq.workerLock.RUnlock()
|
||||
queue.Get()
|
||||
return true
|
||||
}
|
||||
for {
|
||||
if quit := workFunc(); quit {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rq *ResourceQuotaController_kubernetes58107) Run() {
|
||||
go rq.worker(rq.queue, "G1") // G3
|
||||
go rq.worker(rq.missingUsageQueue, "G2") // G4
|
||||
}
|
||||
|
||||
func (rq *ResourceQuotaController_kubernetes58107) Sync() {
|
||||
for i := 0; i < 100000; i++ {
|
||||
rq.workerLock.Lock()
|
||||
runtime.Gosched()
|
||||
rq.workerLock.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (rq *ResourceQuotaController_kubernetes58107) HelperSignals() {
|
||||
for i := 0; i < 100000; i++ {
|
||||
rq.queue.Put()
|
||||
rq.missingUsageQueue.Put()
|
||||
}
|
||||
}
|
||||
|
||||
func startResourceQuotaController_kubernetes58107() {
|
||||
resourceQuotaController := &ResourceQuotaController_kubernetes58107{
|
||||
queue: &Type_kubernetes58107{sync.NewCond(&sync.Mutex{})},
|
||||
missingUsageQueue: &Type_kubernetes58107{sync.NewCond(&sync.Mutex{})},
|
||||
}
|
||||
|
||||
go resourceQuotaController.Run() // G2
|
||||
go resourceQuotaController.Sync() // G5
|
||||
resourceQuotaController.HelperSignals()
|
||||
}
|
||||
|
||||
func Kubernetes58107() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(1000 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
go startResourceQuotaController_kubernetes58107() // G1
|
||||
}
|
||||
}
|
120
src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes62464.go
vendored
Normal file
120
src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes62464.go
vendored
Normal file
|
@ -0,0 +1,120 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: kubernetes
|
||||
* Issue or PR : https://github.com/kubernetes/kubernetes/pull/62464
|
||||
* Buggy version: a048ca888ad27367b1a7b7377c67658920adbf5d
|
||||
* fix commit-id: c1b19fce903675b82e9fdd1befcc5f5d658bfe78
|
||||
* Flaky: 8/100
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Kubernetes62464", Kubernetes62464)
|
||||
}
|
||||
|
||||
type State_kubernetes62464 interface {
|
||||
GetCPUSetOrDefault()
|
||||
GetCPUSet() bool
|
||||
GetDefaultCPUSet()
|
||||
SetDefaultCPUSet()
|
||||
}
|
||||
|
||||
type stateMemory_kubernetes62464 struct {
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
func (s *stateMemory_kubernetes62464) GetCPUSetOrDefault() {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
if ok := s.GetCPUSet(); ok {
|
||||
return
|
||||
}
|
||||
s.GetDefaultCPUSet()
|
||||
}
|
||||
|
||||
func (s *stateMemory_kubernetes62464) GetCPUSet() bool {
|
||||
runtime.Gosched()
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
if rand.Intn(10) > 5 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *stateMemory_kubernetes62464) GetDefaultCPUSet() {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
}
|
||||
|
||||
func (s *stateMemory_kubernetes62464) SetDefaultCPUSet() {
|
||||
s.Lock()
|
||||
runtime.Gosched()
|
||||
defer s.Unlock()
|
||||
}
|
||||
|
||||
type staticPolicy_kubernetes62464 struct{}
|
||||
|
||||
func (p *staticPolicy_kubernetes62464) RemoveContainer(s State_kubernetes62464) {
|
||||
s.GetDefaultCPUSet()
|
||||
s.SetDefaultCPUSet()
|
||||
}
|
||||
|
||||
type manager_kubernetes62464 struct {
|
||||
state *stateMemory_kubernetes62464
|
||||
}
|
||||
|
||||
func (m *manager_kubernetes62464) reconcileState() {
|
||||
m.state.GetCPUSetOrDefault()
|
||||
}
|
||||
|
||||
func NewPolicyAndManager_kubernetes62464() (*staticPolicy_kubernetes62464, *manager_kubernetes62464) {
|
||||
s := &stateMemory_kubernetes62464{}
|
||||
m := &manager_kubernetes62464{s}
|
||||
p := &staticPolicy_kubernetes62464{}
|
||||
return p, m
|
||||
}
|
||||
|
||||
///
|
||||
/// G1 G2
|
||||
/// m.reconcileState()
|
||||
/// m.state.GetCPUSetOrDefault()
|
||||
/// s.RLock()
|
||||
/// s.GetCPUSet()
|
||||
/// p.RemoveContainer()
|
||||
/// s.GetDefaultCPUSet()
|
||||
/// s.SetDefaultCPUSet()
|
||||
/// s.Lock()
|
||||
/// s.RLock()
|
||||
/// ---------------------G1,G2 deadlock---------------------
|
||||
///
|
||||
|
||||
func Kubernetes62464() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
go func() {
|
||||
p, m := NewPolicyAndManager_kubernetes62464()
|
||||
go m.reconcileState()
|
||||
go p.RemoveContainer(m.state)
|
||||
}()
|
||||
}
|
||||
}
|
86
src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes6632.go
vendored
Normal file
86
src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes6632.go
vendored
Normal file
|
@ -0,0 +1,86 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: kubernetes
|
||||
* Issue or PR : https://github.com/kubernetes/kubernetes/pull/6632
|
||||
* Buggy version: e597b41d939573502c8dda1dde7bf3439325fb5d
|
||||
* fix commit-id: 82afb7ab1fe12cf2efceede2322d082eaf5d5adc
|
||||
* Flaky: 4/100
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Kubernetes6632", Kubernetes6632)
|
||||
}
|
||||
|
||||
type Connection_kubernetes6632 struct {
|
||||
closeChan chan bool
|
||||
}
|
||||
|
||||
type idleAwareFramer_kubernetes6632 struct {
|
||||
resetChan chan bool
|
||||
writeLock sync.Mutex
|
||||
conn *Connection_kubernetes6632
|
||||
}
|
||||
|
||||
func (i *idleAwareFramer_kubernetes6632) monitor() {
|
||||
var resetChan = i.resetChan
|
||||
Loop:
|
||||
for {
|
||||
select {
|
||||
case <-i.conn.closeChan:
|
||||
i.writeLock.Lock()
|
||||
close(resetChan)
|
||||
i.resetChan = nil
|
||||
i.writeLock.Unlock()
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (i *idleAwareFramer_kubernetes6632) WriteFrame() {
|
||||
i.writeLock.Lock()
|
||||
defer i.writeLock.Unlock()
|
||||
if i.resetChan == nil {
|
||||
return
|
||||
}
|
||||
i.resetChan <- true
|
||||
}
|
||||
|
||||
func NewIdleAwareFramer_kubernetes6632() *idleAwareFramer_kubernetes6632 {
|
||||
return &idleAwareFramer_kubernetes6632{
|
||||
resetChan: make(chan bool),
|
||||
conn: &Connection_kubernetes6632{
|
||||
closeChan: make(chan bool),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func Kubernetes6632() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
go func() {
|
||||
i := NewIdleAwareFramer_kubernetes6632()
|
||||
|
||||
go func() { // helper goroutine
|
||||
i.conn.closeChan <- true
|
||||
}()
|
||||
go i.monitor() // G1
|
||||
go i.WriteFrame() // G2
|
||||
}()
|
||||
}
|
||||
}
|
97
src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes70277.go
vendored
Normal file
97
src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes70277.go
vendored
Normal file
|
@ -0,0 +1,97 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Kubernetes70277", Kubernetes70277)
|
||||
}
|
||||
|
||||
type WaitFunc_kubernetes70277 func(done <-chan struct{}) <-chan struct{}
|
||||
|
||||
type ConditionFunc_kubernetes70277 func() (done bool, err error)
|
||||
|
||||
func WaitFor_kubernetes70277(wait WaitFunc_kubernetes70277, fn ConditionFunc_kubernetes70277, done <-chan struct{}) error {
|
||||
c := wait(done)
|
||||
for {
|
||||
_, open := <-c
|
||||
ok, err := fn()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ok {
|
||||
return nil
|
||||
}
|
||||
if !open {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func poller_kubernetes70277(interval, timeout time.Duration) WaitFunc_kubernetes70277 {
|
||||
return WaitFunc_kubernetes70277(func(done <-chan struct{}) <-chan struct{} {
|
||||
ch := make(chan struct{})
|
||||
go func() {
|
||||
defer close(ch)
|
||||
|
||||
tick := time.NewTicker(interval)
|
||||
defer tick.Stop()
|
||||
|
||||
var after <-chan time.Time
|
||||
if timeout != 0 {
|
||||
timer := time.NewTimer(timeout)
|
||||
after = timer.C
|
||||
defer timer.Stop()
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case <-tick.C:
|
||||
select {
|
||||
case ch <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
case <-after:
|
||||
return
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return ch
|
||||
})
|
||||
}
|
||||
|
||||
func Kubernetes70277() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
for i := 0; i < 1000; i++ {
|
||||
go func() {
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
waitFunc := poller_kubernetes70277(time.Millisecond, 80*time.Millisecond)
|
||||
var doneCh <-chan struct{}
|
||||
|
||||
WaitFor_kubernetes70277(func(done <-chan struct{}) <-chan struct{} {
|
||||
doneCh = done
|
||||
return waitFunc(done)
|
||||
}, func() (bool, error) {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
return true, nil
|
||||
}, stopCh)
|
||||
|
||||
<-doneCh // block here
|
||||
}()
|
||||
}
|
||||
}
|
39
src/runtime/testdata/testgoroutineleakprofile/goker/main.go
vendored
Normal file
39
src/runtime/testdata/testgoroutineleakprofile/goker/main.go
vendored
Normal file
|
@ -0,0 +1,39 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import "os"
|
||||
|
||||
// The number of times the main (profiling) goroutine should yield
|
||||
// in order to allow the leaking goroutines to get stuck.
|
||||
const yieldCount = 10
|
||||
|
||||
var cmds = map[string]func(){}
|
||||
|
||||
func register(name string, f func()) {
|
||||
if cmds[name] != nil {
|
||||
panic("duplicate registration: " + name)
|
||||
}
|
||||
cmds[name] = f
|
||||
}
|
||||
|
||||
func registerInit(name string, f func()) {
|
||||
if len(os.Args) >= 2 && os.Args[1] == name {
|
||||
f()
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
if len(os.Args) < 2 {
|
||||
println("usage: " + os.Args[0] + " name-of-test")
|
||||
return
|
||||
}
|
||||
f := cmds[os.Args[1]]
|
||||
if f == nil {
|
||||
println("unknown function: " + os.Args[1])
|
||||
return
|
||||
}
|
||||
f()
|
||||
}
|
68
src/runtime/testdata/testgoroutineleakprofile/goker/moby17176.go
vendored
Normal file
68
src/runtime/testdata/testgoroutineleakprofile/goker/moby17176.go
vendored
Normal file
|
@ -0,0 +1,68 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: moby
|
||||
* Issue or PR : https://github.com/moby/moby/pull/17176
|
||||
* Buggy version: d295dc66521e2734390473ec1f1da8a73ad3288a
|
||||
* fix commit-id: 2f16895ee94848e2d8ad72bc01968b4c88d84cb8
|
||||
* Flaky: 100/100
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Moby17176", Moby17176)
|
||||
}
|
||||
|
||||
type DeviceSet_moby17176 struct {
|
||||
sync.Mutex
|
||||
nrDeletedDevices int
|
||||
}
|
||||
|
||||
func (devices *DeviceSet_moby17176) cleanupDeletedDevices() error {
|
||||
devices.Lock()
|
||||
if devices.nrDeletedDevices == 0 {
|
||||
/// Missing devices.Unlock()
|
||||
return nil
|
||||
}
|
||||
devices.Unlock()
|
||||
return errors.New("Error")
|
||||
}
|
||||
|
||||
func testDevmapperLockReleasedDeviceDeletion_moby17176() {
|
||||
ds := &DeviceSet_moby17176{
|
||||
nrDeletedDevices: 0,
|
||||
}
|
||||
ds.cleanupDeletedDevices()
|
||||
doneChan := make(chan bool)
|
||||
go func() {
|
||||
ds.Lock()
|
||||
defer ds.Unlock()
|
||||
doneChan <- true
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(time.Millisecond):
|
||||
case <-doneChan:
|
||||
}
|
||||
}
|
||||
func Moby17176() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
go testDevmapperLockReleasedDeviceDeletion_moby17176()
|
||||
}
|
||||
}
|
146
src/runtime/testdata/testgoroutineleakprofile/goker/moby21233.go
vendored
Normal file
146
src/runtime/testdata/testgoroutineleakprofile/goker/moby21233.go
vendored
Normal file
|
@ -0,0 +1,146 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: moby
|
||||
* Issue or PR : https://github.com/moby/moby/pull/21233
|
||||
* Buggy version: cc12d2bfaae135e63b1f962ad80e6943dd995337
|
||||
* fix commit-id: 2f4aa9658408ac72a598363c6e22eadf93dbb8a7
|
||||
* Flaky:100/100
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Moby21233", Moby21233)
|
||||
}
|
||||
|
||||
type Progress_moby21233 struct{}
|
||||
|
||||
type Output_moby21233 interface {
|
||||
WriteProgress(Progress_moby21233) error
|
||||
}
|
||||
|
||||
type chanOutput_moby21233 chan<- Progress_moby21233
|
||||
|
||||
type TransferManager_moby21233 struct {
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
type Transfer_moby21233 struct {
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
type Watcher_moby21233 struct {
|
||||
signalChan chan struct{}
|
||||
releaseChan chan struct{}
|
||||
running chan struct{}
|
||||
}
|
||||
|
||||
func ChanOutput_moby21233(progressChan chan<- Progress_moby21233) Output_moby21233 {
|
||||
return chanOutput_moby21233(progressChan)
|
||||
}
|
||||
func (out chanOutput_moby21233) WriteProgress(p Progress_moby21233) error {
|
||||
out <- p
|
||||
return nil
|
||||
}
|
||||
func NewTransferManager_moby21233() *TransferManager_moby21233 {
|
||||
return &TransferManager_moby21233{}
|
||||
}
|
||||
func NewTransfer_moby21233() *Transfer_moby21233 {
|
||||
return &Transfer_moby21233{}
|
||||
}
|
||||
func (t *Transfer_moby21233) Release(watcher *Watcher_moby21233) {
|
||||
t.mu.Lock()
|
||||
t.mu.Unlock()
|
||||
close(watcher.releaseChan)
|
||||
<-watcher.running
|
||||
}
|
||||
func (t *Transfer_moby21233) Watch(progressOutput Output_moby21233) *Watcher_moby21233 {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
lastProgress := Progress_moby21233{}
|
||||
w := &Watcher_moby21233{
|
||||
releaseChan: make(chan struct{}),
|
||||
signalChan: make(chan struct{}),
|
||||
running: make(chan struct{}),
|
||||
}
|
||||
go func() { // G2
|
||||
defer func() {
|
||||
close(w.running)
|
||||
}()
|
||||
done := false
|
||||
for {
|
||||
t.mu.Lock()
|
||||
t.mu.Unlock()
|
||||
if rand.Int31n(2) >= 1 {
|
||||
progressOutput.WriteProgress(lastProgress)
|
||||
}
|
||||
if done {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-w.signalChan:
|
||||
case <-w.releaseChan:
|
||||
done = true
|
||||
}
|
||||
}
|
||||
}()
|
||||
return w
|
||||
}
|
||||
func (tm *TransferManager_moby21233) Transfer(progressOutput Output_moby21233) (*Transfer_moby21233, *Watcher_moby21233) {
|
||||
tm.mu.Lock()
|
||||
defer tm.mu.Unlock()
|
||||
t := NewTransfer_moby21233()
|
||||
return t, t.Watch(progressOutput)
|
||||
}
|
||||
|
||||
func testTransfer_moby21233() { // G1
|
||||
tm := NewTransferManager_moby21233()
|
||||
progressChan := make(chan Progress_moby21233)
|
||||
progressDone := make(chan struct{})
|
||||
go func() { // G3
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
for p := range progressChan { /// Chan consumer
|
||||
if rand.Int31n(2) >= 1 {
|
||||
return
|
||||
}
|
||||
_ = p
|
||||
}
|
||||
close(progressDone)
|
||||
}()
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
ids := []string{"id1", "id2", "id3"}
|
||||
xrefs := make([]*Transfer_moby21233, len(ids))
|
||||
watchers := make([]*Watcher_moby21233, len(ids))
|
||||
for i := range ids {
|
||||
xrefs[i], watchers[i] = tm.Transfer(ChanOutput_moby21233(progressChan)) /// Chan producer
|
||||
time.Sleep(2 * time.Millisecond)
|
||||
}
|
||||
|
||||
for i := range xrefs {
|
||||
xrefs[i].Release(watchers[i])
|
||||
}
|
||||
|
||||
close(progressChan)
|
||||
<-progressDone
|
||||
}
|
||||
|
||||
func Moby21233() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
for i := 0; i < 100; i++ {
|
||||
go testTransfer_moby21233() // G1
|
||||
}
|
||||
}
|
59
src/runtime/testdata/testgoroutineleakprofile/goker/moby25384.go
vendored
Normal file
59
src/runtime/testdata/testgoroutineleakprofile/goker/moby25384.go
vendored
Normal file
|
@ -0,0 +1,59 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: moby
|
||||
* Issue or PR : https://github.com/moby/moby/pull/25384
|
||||
* Buggy version: 58befe3081726ef74ea09198cd9488fb42c51f51
|
||||
* fix commit-id: 42360d164b9f25fb4b150ef066fcf57fa39559a7
|
||||
* Flaky: 100/100
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Moby25348", Moby25348)
|
||||
}
|
||||
|
||||
type plugin_moby25348 struct{}
|
||||
|
||||
type Manager_moby25348 struct {
|
||||
plugins []*plugin_moby25348
|
||||
}
|
||||
|
||||
func (pm *Manager_moby25348) init() {
|
||||
var group sync.WaitGroup
|
||||
group.Add(len(pm.plugins))
|
||||
for _, p := range pm.plugins {
|
||||
go func(p *plugin_moby25348) {
|
||||
defer group.Done()
|
||||
}(p)
|
||||
group.Wait() // Block here
|
||||
}
|
||||
}
|
||||
|
||||
func Moby25348() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
go func() {
|
||||
p1 := &plugin_moby25348{}
|
||||
p2 := &plugin_moby25348{}
|
||||
pm := &Manager_moby25348{
|
||||
plugins: []*plugin_moby25348{p1, p2},
|
||||
}
|
||||
go pm.init()
|
||||
}()
|
||||
}
|
242
src/runtime/testdata/testgoroutineleakprofile/goker/moby27782.go
vendored
Normal file
242
src/runtime/testdata/testgoroutineleakprofile/goker/moby27782.go
vendored
Normal file
|
@ -0,0 +1,242 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: moby
|
||||
* Issue or PR : https://github.com/moby/moby/pull/27782
|
||||
* Buggy version: 18768fdc2e76ec6c600c8ab57d2d487ee7877794
|
||||
* fix commit-id: a69a59ffc7e3d028a72d1195c2c1535f447eaa84
|
||||
* Flaky: 2/100
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Moby27782", Moby27782)
|
||||
}
|
||||
|
||||
type Event_moby27782 struct {
|
||||
Op Op_moby27782
|
||||
}
|
||||
|
||||
type Op_moby27782 uint32
|
||||
|
||||
const (
|
||||
Create_moby27782 Op_moby27782 = 1 << iota
|
||||
Write_moby27782
|
||||
Remove_moby27782
|
||||
Rename_moby27782
|
||||
Chmod_moby27782
|
||||
)
|
||||
|
||||
func newEvent(op Op_moby27782) Event_moby27782 {
|
||||
return Event_moby27782{op}
|
||||
}
|
||||
|
||||
func (e *Event_moby27782) ignoreLinux(w *Watcher_moby27782) bool {
|
||||
if e.Op != Write_moby27782 {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
w.cv.Broadcast()
|
||||
return true
|
||||
}
|
||||
runtime.Gosched()
|
||||
return false
|
||||
}
|
||||
|
||||
type Watcher_moby27782 struct {
|
||||
Events chan Event_moby27782
|
||||
mu sync.Mutex // L1
|
||||
cv *sync.Cond // C1
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
func NewWatcher_moby27782() *Watcher_moby27782 {
|
||||
w := &Watcher_moby27782{
|
||||
Events: make(chan Event_moby27782),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
w.cv = sync.NewCond(&w.mu)
|
||||
go w.readEvents() // G3
|
||||
return w
|
||||
}
|
||||
|
||||
func (w *Watcher_moby27782) readEvents() {
|
||||
defer close(w.Events)
|
||||
for {
|
||||
if w.isClosed() {
|
||||
return
|
||||
}
|
||||
event := newEvent(Write_moby27782) // MODIFY event
|
||||
if !event.ignoreLinux(w) {
|
||||
runtime.Gosched()
|
||||
select {
|
||||
case w.Events <- event:
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher_moby27782) isClosed() bool {
|
||||
select {
|
||||
case <-w.done:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher_moby27782) Close() {
|
||||
if w.isClosed() {
|
||||
return
|
||||
}
|
||||
close(w.done)
|
||||
}
|
||||
|
||||
func (w *Watcher_moby27782) Remove() {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
exists := true
|
||||
for exists {
|
||||
w.cv.Wait()
|
||||
runtime.Gosched()
|
||||
}
|
||||
}
|
||||
|
||||
type FileWatcher_moby27782 interface {
|
||||
Events() <-chan Event_moby27782
|
||||
Remove()
|
||||
Close()
|
||||
}
|
||||
|
||||
func New_moby27782() FileWatcher_moby27782 {
|
||||
return NewEventWatcher_moby27782()
|
||||
}
|
||||
|
||||
func NewEventWatcher_moby27782() FileWatcher_moby27782 {
|
||||
return &fsNotifyWatcher_moby27782{NewWatcher_moby27782()}
|
||||
}
|
||||
|
||||
type fsNotifyWatcher_moby27782 struct {
|
||||
*Watcher_moby27782
|
||||
}
|
||||
|
||||
func (w *fsNotifyWatcher_moby27782) Events() <-chan Event_moby27782 {
|
||||
return w.Watcher_moby27782.Events
|
||||
}
|
||||
|
||||
func watchFile_moby27782() FileWatcher_moby27782 {
|
||||
fileWatcher := New_moby27782()
|
||||
return fileWatcher
|
||||
}
|
||||
|
||||
type LogWatcher_moby27782 struct {
|
||||
closeOnce sync.Once
|
||||
closeNotifier chan struct{}
|
||||
}
|
||||
|
||||
func (w *LogWatcher_moby27782) Close() {
|
||||
w.closeOnce.Do(func() {
|
||||
close(w.closeNotifier)
|
||||
})
|
||||
}
|
||||
|
||||
func (w *LogWatcher_moby27782) WatchClose() <-chan struct{} {
|
||||
return w.closeNotifier
|
||||
}
|
||||
|
||||
func NewLogWatcher_moby27782() *LogWatcher_moby27782 {
|
||||
return &LogWatcher_moby27782{
|
||||
closeNotifier: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func followLogs_moby27782(logWatcher *LogWatcher_moby27782) {
|
||||
fileWatcher := watchFile_moby27782()
|
||||
defer func() {
|
||||
fileWatcher.Close()
|
||||
}()
|
||||
waitRead := func() {
|
||||
runtime.Gosched()
|
||||
select {
|
||||
case <-fileWatcher.Events():
|
||||
case <-logWatcher.WatchClose():
|
||||
fileWatcher.Remove()
|
||||
return
|
||||
}
|
||||
}
|
||||
handleDecodeErr := func() {
|
||||
waitRead()
|
||||
}
|
||||
handleDecodeErr()
|
||||
}
|
||||
|
||||
type Container_moby27782 struct {
|
||||
LogDriver *JSONFileLogger_moby27782
|
||||
}
|
||||
|
||||
func (container *Container_moby27782) InitializeStdio() {
|
||||
if err := container.startLogging(); err != nil {
|
||||
container.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
func (container *Container_moby27782) startLogging() error {
|
||||
l := &JSONFileLogger_moby27782{
|
||||
readers: make(map[*LogWatcher_moby27782]struct{}),
|
||||
}
|
||||
container.LogDriver = l
|
||||
l.ReadLogs()
|
||||
return errors.New("Some error")
|
||||
}
|
||||
|
||||
func (container *Container_moby27782) Reset() {
|
||||
if container.LogDriver != nil {
|
||||
container.LogDriver.Close()
|
||||
}
|
||||
}
|
||||
|
||||
type JSONFileLogger_moby27782 struct {
|
||||
readers map[*LogWatcher_moby27782]struct{}
|
||||
}
|
||||
|
||||
func (l *JSONFileLogger_moby27782) ReadLogs() *LogWatcher_moby27782 {
|
||||
logWatcher := NewLogWatcher_moby27782()
|
||||
go l.readLogs(logWatcher) // G2
|
||||
return logWatcher
|
||||
}
|
||||
|
||||
func (l *JSONFileLogger_moby27782) readLogs(logWatcher *LogWatcher_moby27782) {
|
||||
l.readers[logWatcher] = struct{}{}
|
||||
followLogs_moby27782(logWatcher)
|
||||
}
|
||||
|
||||
func (l *JSONFileLogger_moby27782) Close() {
|
||||
for r := range l.readers {
|
||||
r.Close()
|
||||
delete(l.readers, r)
|
||||
}
|
||||
}
|
||||
|
||||
func Moby27782() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
go (&Container_moby27782{}).InitializeStdio() // G1
|
||||
}
|
||||
}
|
125
src/runtime/testdata/testgoroutineleakprofile/goker/moby28462.go
vendored
Normal file
125
src/runtime/testdata/testgoroutineleakprofile/goker/moby28462.go
vendored
Normal file
|
@ -0,0 +1,125 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: moby
|
||||
* Issue or PR : https://github.com/moby/moby/pull/28462
|
||||
* Buggy version: b184bdabf7a01c4b802304ac64ac133743c484be
|
||||
* fix commit-id: 89b123473774248fc3a0356dd3ce5b116cc69b29
|
||||
* Flaky: 69/100
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Moby28462", Moby28462)
|
||||
}
|
||||
|
||||
type State_moby28462 struct {
|
||||
Health *Health_moby28462
|
||||
}
|
||||
|
||||
type Container_moby28462 struct {
|
||||
sync.Mutex
|
||||
State *State_moby28462
|
||||
}
|
||||
|
||||
func (ctr *Container_moby28462) start() {
|
||||
go ctr.waitExit()
|
||||
}
|
||||
func (ctr *Container_moby28462) waitExit() {
|
||||
|
||||
}
|
||||
|
||||
type Store_moby28462 struct {
|
||||
ctr *Container_moby28462
|
||||
}
|
||||
|
||||
func (s *Store_moby28462) Get() *Container_moby28462 {
|
||||
return s.ctr
|
||||
}
|
||||
|
||||
type Daemon_moby28462 struct {
|
||||
containers Store_moby28462
|
||||
}
|
||||
|
||||
func (d *Daemon_moby28462) StateChanged() {
|
||||
c := d.containers.Get()
|
||||
c.Lock()
|
||||
d.updateHealthMonitorElseBranch(c)
|
||||
defer c.Unlock()
|
||||
}
|
||||
|
||||
func (d *Daemon_moby28462) updateHealthMonitorIfBranch(c *Container_moby28462) {
|
||||
h := c.State.Health
|
||||
if stop := h.OpenMonitorChannel(); stop != nil {
|
||||
go monitor_moby28462(c, stop)
|
||||
}
|
||||
}
|
||||
func (d *Daemon_moby28462) updateHealthMonitorElseBranch(c *Container_moby28462) {
|
||||
h := c.State.Health
|
||||
h.CloseMonitorChannel()
|
||||
}
|
||||
|
||||
type Health_moby28462 struct {
|
||||
stop chan struct{}
|
||||
}
|
||||
|
||||
func (s *Health_moby28462) OpenMonitorChannel() chan struct{} {
|
||||
return s.stop
|
||||
}
|
||||
|
||||
func (s *Health_moby28462) CloseMonitorChannel() {
|
||||
if s.stop != nil {
|
||||
s.stop <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func monitor_moby28462(c *Container_moby28462, stop chan struct{}) {
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
return
|
||||
default:
|
||||
handleProbeResult_moby28462(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func handleProbeResult_moby28462(c *Container_moby28462) {
|
||||
runtime.Gosched()
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
}
|
||||
|
||||
func NewDaemonAndContainer_moby28462() (*Daemon_moby28462, *Container_moby28462) {
|
||||
c := &Container_moby28462{
|
||||
State: &State_moby28462{&Health_moby28462{make(chan struct{})}},
|
||||
}
|
||||
d := &Daemon_moby28462{Store_moby28462{c}}
|
||||
return d, c
|
||||
}
|
||||
|
||||
func Moby28462() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
go func() {
|
||||
d, c := NewDaemonAndContainer_moby28462()
|
||||
go monitor_moby28462(c, c.State.Health.OpenMonitorChannel()) // G1
|
||||
go d.StateChanged() // G2
|
||||
}()
|
||||
}
|
||||
}
|
67
src/runtime/testdata/testgoroutineleakprofile/goker/moby30408.go
vendored
Normal file
67
src/runtime/testdata/testgoroutineleakprofile/goker/moby30408.go
vendored
Normal file
|
@ -0,0 +1,67 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Moby30408", Moby30408)
|
||||
}
|
||||
|
||||
type Manifest_moby30408 struct {
|
||||
Implements []string
|
||||
}
|
||||
|
||||
type Plugin_moby30408 struct {
|
||||
activateWait *sync.Cond
|
||||
activateErr error
|
||||
Manifest *Manifest_moby30408
|
||||
}
|
||||
|
||||
func (p *Plugin_moby30408) waitActive() error {
|
||||
p.activateWait.L.Lock()
|
||||
for !p.activated() {
|
||||
p.activateWait.Wait()
|
||||
}
|
||||
p.activateWait.L.Unlock()
|
||||
return p.activateErr
|
||||
}
|
||||
|
||||
func (p *Plugin_moby30408) activated() bool {
|
||||
return p.Manifest != nil
|
||||
}
|
||||
|
||||
func testActive_moby30408(p *Plugin_moby30408) {
|
||||
done := make(chan struct{})
|
||||
go func() { // G2
|
||||
p.waitActive()
|
||||
close(done)
|
||||
}()
|
||||
<-done
|
||||
}
|
||||
|
||||
func Moby30408() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
go func() { // G1
|
||||
p := &Plugin_moby30408{activateWait: sync.NewCond(&sync.Mutex{})}
|
||||
p.activateErr = errors.New("some junk happened")
|
||||
|
||||
testActive_moby30408(p)
|
||||
}()
|
||||
}
|
71
src/runtime/testdata/testgoroutineleakprofile/goker/moby33781.go
vendored
Normal file
71
src/runtime/testdata/testgoroutineleakprofile/goker/moby33781.go
vendored
Normal file
|
@ -0,0 +1,71 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: moby
|
||||
* Issue or PR : https://github.com/moby/moby/pull/33781
|
||||
* Buggy version: 33fd3817b0f5ca4b87f0a75c2bd583b4425d392b
|
||||
* fix commit-id: 67297ba0051d39be544009ba76abea14bc0be8a4
|
||||
* Flaky: 25/100
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Moby33781", Moby33781)
|
||||
}
|
||||
|
||||
func monitor_moby33781(stop chan bool) {
|
||||
probeInterval := time.Millisecond
|
||||
probeTimeout := time.Millisecond
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
return
|
||||
case <-time.After(probeInterval):
|
||||
results := make(chan bool)
|
||||
ctx, cancelProbe := context.WithTimeout(context.Background(), probeTimeout)
|
||||
go func() { // G3
|
||||
results <- true
|
||||
close(results)
|
||||
}()
|
||||
select {
|
||||
case <-stop:
|
||||
// results should be drained here
|
||||
cancelProbe()
|
||||
return
|
||||
case <-results:
|
||||
cancelProbe()
|
||||
case <-ctx.Done():
|
||||
cancelProbe()
|
||||
<-results
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Moby33781() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
for i := 0; i < 100; i++ {
|
||||
go func(i int) {
|
||||
stop := make(chan bool)
|
||||
go monitor_moby33781(stop) // G1
|
||||
go func() { // G2
|
||||
time.Sleep(time.Duration(i) * time.Millisecond)
|
||||
stop <- true
|
||||
}()
|
||||
}(i)
|
||||
}
|
||||
}
|
53
src/runtime/testdata/testgoroutineleakprofile/goker/moby36114.go
vendored
Normal file
53
src/runtime/testdata/testgoroutineleakprofile/goker/moby36114.go
vendored
Normal file
|
@ -0,0 +1,53 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: moby
|
||||
* Issue or PR : https://github.com/moby/moby/pull/36114
|
||||
* Buggy version: 6d4d3c52ae7c3f910bfc7552a2a673a8338e5b9f
|
||||
* fix commit-id: a44fcd3d27c06aaa60d8d1cbce169f0d982e74b1
|
||||
* Flaky: 100/100
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Moby36114", Moby36114)
|
||||
}
|
||||
|
||||
type serviceVM_moby36114 struct {
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func (svm *serviceVM_moby36114) hotAddVHDsAtStart() {
|
||||
svm.Lock()
|
||||
defer svm.Unlock()
|
||||
svm.hotRemoveVHDsAtStart()
|
||||
}
|
||||
|
||||
func (svm *serviceVM_moby36114) hotRemoveVHDsAtStart() {
|
||||
svm.Lock()
|
||||
defer svm.Unlock()
|
||||
}
|
||||
|
||||
func Moby36114() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
go func() {
|
||||
s := &serviceVM_moby36114{}
|
||||
go s.hotAddVHDsAtStart()
|
||||
}()
|
||||
}
|
||||
}
|
101
src/runtime/testdata/testgoroutineleakprofile/goker/moby4951.go
vendored
Normal file
101
src/runtime/testdata/testgoroutineleakprofile/goker/moby4951.go
vendored
Normal file
|
@ -0,0 +1,101 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: moby
|
||||
* Issue or PR : https://github.com/moby/moby/pull/4951
|
||||
* Buggy version: 81f148be566ab2b17810ad4be61a5d8beac8330f
|
||||
* fix commit-id: 2ffef1b7eb618162673c6ffabccb9ca57c7dfce3
|
||||
* Flaky: 100/100
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Moby4951", Moby4951)
|
||||
}
|
||||
|
||||
type DeviceSet_moby4951 struct {
|
||||
sync.Mutex
|
||||
infos map[string]*DevInfo_moby4951
|
||||
nrDeletedDevices int
|
||||
}
|
||||
|
||||
func (devices *DeviceSet_moby4951) DeleteDevice(hash string) {
|
||||
devices.Lock()
|
||||
defer devices.Unlock()
|
||||
|
||||
info := devices.lookupDevice(hash)
|
||||
|
||||
info.lock.Lock()
|
||||
runtime.Gosched()
|
||||
defer info.lock.Unlock()
|
||||
|
||||
devices.deleteDevice(info)
|
||||
}
|
||||
|
||||
func (devices *DeviceSet_moby4951) lookupDevice(hash string) *DevInfo_moby4951 {
|
||||
existing, ok := devices.infos[hash]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return existing
|
||||
}
|
||||
|
||||
func (devices *DeviceSet_moby4951) deleteDevice(info *DevInfo_moby4951) {
|
||||
devices.removeDeviceAndWait(info.Name())
|
||||
}
|
||||
|
||||
func (devices *DeviceSet_moby4951) removeDeviceAndWait(devname string) {
|
||||
/// remove devices by devname
|
||||
devices.Unlock()
|
||||
time.Sleep(300 * time.Nanosecond)
|
||||
devices.Lock()
|
||||
}
|
||||
|
||||
type DevInfo_moby4951 struct {
|
||||
lock sync.Mutex
|
||||
name string
|
||||
}
|
||||
|
||||
func (info *DevInfo_moby4951) Name() string {
|
||||
return info.name
|
||||
}
|
||||
|
||||
func NewDeviceSet_moby4951() *DeviceSet_moby4951 {
|
||||
devices := &DeviceSet_moby4951{
|
||||
infos: make(map[string]*DevInfo_moby4951),
|
||||
}
|
||||
info1 := &DevInfo_moby4951{
|
||||
name: "info1",
|
||||
}
|
||||
info2 := &DevInfo_moby4951{
|
||||
name: "info2",
|
||||
}
|
||||
devices.infos[info1.name] = info1
|
||||
devices.infos[info2.name] = info2
|
||||
return devices
|
||||
}
|
||||
|
||||
func Moby4951() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
ds := NewDeviceSet_moby4951()
|
||||
/// Delete devices by the same info
|
||||
go ds.DeleteDevice("info1")
|
||||
go ds.DeleteDevice("info1")
|
||||
}()
|
||||
}
|
55
src/runtime/testdata/testgoroutineleakprofile/goker/moby7559.go
vendored
Normal file
55
src/runtime/testdata/testgoroutineleakprofile/goker/moby7559.go
vendored
Normal file
|
@ -0,0 +1,55 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
* Project: moby
|
||||
* Issue or PR : https://github.com/moby/moby/pull/7559
|
||||
* Buggy version: 64579f51fcb439c36377c0068ccc9a007b368b5a
|
||||
* fix commit-id: 6cbb8e070d6c3a66bf48fbe5cbf689557eee23db
|
||||
* Flaky: 100/100
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Moby7559", Moby7559)
|
||||
}
|
||||
|
||||
type UDPProxy_moby7559 struct {
|
||||
connTrackLock sync.Mutex
|
||||
}
|
||||
|
||||
func (proxy *UDPProxy_moby7559) Run() {
|
||||
for i := 0; i < 2; i++ {
|
||||
proxy.connTrackLock.Lock()
|
||||
_, err := net.DialUDP("udp", nil, nil)
|
||||
if err != nil {
|
||||
continue
|
||||
/// Missing unlock here
|
||||
}
|
||||
if i == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
proxy.connTrackLock.Unlock()
|
||||
}
|
||||
|
||||
func Moby7559() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
go (&UDPProxy_moby7559{}).Run()
|
||||
}
|
||||
}
|
122
src/runtime/testdata/testgoroutineleakprofile/goker/serving2137.go
vendored
Normal file
122
src/runtime/testdata/testgoroutineleakprofile/goker/serving2137.go
vendored
Normal file
|
@ -0,0 +1,122 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Serving2137", Serving2137)
|
||||
}
|
||||
|
||||
type token_serving2137 struct{}
|
||||
|
||||
type request_serving2137 struct {
|
||||
lock *sync.Mutex
|
||||
accepted chan bool
|
||||
}
|
||||
|
||||
type Breaker_serving2137 struct {
|
||||
pendingRequests chan token_serving2137
|
||||
activeRequests chan token_serving2137
|
||||
}
|
||||
|
||||
func (b *Breaker_serving2137) Maybe(thunk func()) bool {
|
||||
var t token_serving2137
|
||||
select {
|
||||
default:
|
||||
// Pending request queue is full. Report failure.
|
||||
return false
|
||||
case b.pendingRequests <- t:
|
||||
// Pending request has capacity.
|
||||
// Wait for capacity in the active queue.
|
||||
b.activeRequests <- t
|
||||
// Defer releasing capacity in the active and pending request queue.
|
||||
defer func() {
|
||||
<-b.activeRequests
|
||||
runtime.Gosched()
|
||||
<-b.pendingRequests
|
||||
}()
|
||||
// Do the thing.
|
||||
thunk()
|
||||
// Report success
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Breaker_serving2137) concurrentRequest() request_serving2137 {
|
||||
r := request_serving2137{lock: &sync.Mutex{}, accepted: make(chan bool, 1)}
|
||||
r.lock.Lock()
|
||||
var start sync.WaitGroup
|
||||
start.Add(1)
|
||||
go func() { // G2, G3
|
||||
start.Done()
|
||||
runtime.Gosched()
|
||||
ok := b.Maybe(func() {
|
||||
// Will block on locked mutex.
|
||||
r.lock.Lock()
|
||||
runtime.Gosched()
|
||||
r.lock.Unlock()
|
||||
})
|
||||
r.accepted <- ok
|
||||
}()
|
||||
start.Wait() // Ensure that the go func has had a chance to execute.
|
||||
return r
|
||||
}
|
||||
|
||||
// Perform n requests against the breaker, returning mutexes for each
|
||||
// request which succeeded, and a slice of bools for all requests.
|
||||
func (b *Breaker_serving2137) concurrentRequests(n int) []request_serving2137 {
|
||||
requests := make([]request_serving2137, n)
|
||||
for i := range requests {
|
||||
requests[i] = b.concurrentRequest()
|
||||
}
|
||||
return requests
|
||||
}
|
||||
|
||||
func NewBreaker_serving2137(queueDepth, maxConcurrency int32) *Breaker_serving2137 {
|
||||
return &Breaker_serving2137{
|
||||
pendingRequests: make(chan token_serving2137, queueDepth+maxConcurrency),
|
||||
activeRequests: make(chan token_serving2137, maxConcurrency),
|
||||
}
|
||||
}
|
||||
|
||||
func unlock_serving2137(req request_serving2137) {
|
||||
req.lock.Unlock()
|
||||
runtime.Gosched()
|
||||
// Verify that function has completed
|
||||
ok := <-req.accepted
|
||||
runtime.Gosched()
|
||||
// Requeue for next usage
|
||||
req.accepted <- ok
|
||||
}
|
||||
|
||||
func unlockAll_serving2137(requests []request_serving2137) {
|
||||
for _, lc := range requests {
|
||||
unlock_serving2137(lc)
|
||||
}
|
||||
}
|
||||
|
||||
func Serving2137() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
go func() {
|
||||
b := NewBreaker_serving2137(1, 1)
|
||||
|
||||
locks := b.concurrentRequests(2) // G1
|
||||
unlockAll_serving2137(locks)
|
||||
}()
|
||||
}
|
||||
}
|
87
src/runtime/testdata/testgoroutineleakprofile/goker/syncthing4829.go
vendored
Normal file
87
src/runtime/testdata/testgoroutineleakprofile/goker/syncthing4829.go
vendored
Normal file
|
@ -0,0 +1,87 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Syncthing4829", Syncthing4829)
|
||||
}
|
||||
|
||||
type Address_syncthing4829 int
|
||||
|
||||
type Mapping_syncthing4829 struct {
|
||||
mut sync.RWMutex // L2
|
||||
|
||||
extAddresses map[string]Address_syncthing4829
|
||||
}
|
||||
|
||||
func (m *Mapping_syncthing4829) clearAddresses() {
|
||||
m.mut.Lock() // L2
|
||||
var removed []Address_syncthing4829
|
||||
for id, addr := range m.extAddresses {
|
||||
removed = append(removed, addr)
|
||||
delete(m.extAddresses, id)
|
||||
}
|
||||
if len(removed) > 0 {
|
||||
m.notify(nil, removed)
|
||||
}
|
||||
m.mut.Unlock() // L2
|
||||
}
|
||||
|
||||
func (m *Mapping_syncthing4829) notify(added, remove []Address_syncthing4829) {
|
||||
m.mut.RLock() // L2
|
||||
m.mut.RUnlock() // L2
|
||||
}
|
||||
|
||||
type Service_syncthing4829 struct {
|
||||
mut sync.RWMutex // L1
|
||||
|
||||
mappings []*Mapping_syncthing4829
|
||||
}
|
||||
|
||||
func (s *Service_syncthing4829) NewMapping() *Mapping_syncthing4829 {
|
||||
mapping := &Mapping_syncthing4829{
|
||||
extAddresses: make(map[string]Address_syncthing4829),
|
||||
}
|
||||
s.mut.Lock() // L1
|
||||
s.mappings = append(s.mappings, mapping)
|
||||
s.mut.Unlock() // L1
|
||||
return mapping
|
||||
}
|
||||
|
||||
func (s *Service_syncthing4829) RemoveMapping(mapping *Mapping_syncthing4829) {
|
||||
s.mut.Lock() // L1
|
||||
defer s.mut.Unlock() // L1
|
||||
for _, existing := range s.mappings {
|
||||
if existing == mapping {
|
||||
mapping.clearAddresses()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Syncthing4829() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
|
||||
go func() { // G1
|
||||
natSvc := &Service_syncthing4829{}
|
||||
m := natSvc.NewMapping()
|
||||
m.extAddresses["test"] = 0
|
||||
|
||||
natSvc.RemoveMapping(m)
|
||||
}()
|
||||
}
|
103
src/runtime/testdata/testgoroutineleakprofile/goker/syncthing5795.go
vendored
Normal file
103
src/runtime/testdata/testgoroutineleakprofile/goker/syncthing5795.go
vendored
Normal file
|
@ -0,0 +1,103 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("Syncthing5795", Syncthing5795)
|
||||
}
|
||||
|
||||
type message_syncthing5795 interface{}
|
||||
|
||||
type ClusterConfig_syncthing5795 struct{}
|
||||
|
||||
type Model_syncthing5795 interface {
|
||||
ClusterConfig(message_syncthing5795)
|
||||
}
|
||||
|
||||
type TestModel_syncthing5795 struct {
|
||||
ccFn func()
|
||||
}
|
||||
|
||||
func (t *TestModel_syncthing5795) ClusterConfig(msg message_syncthing5795) {
|
||||
if t.ccFn != nil {
|
||||
t.ccFn()
|
||||
}
|
||||
}
|
||||
|
||||
type Connection_syncthing5795 interface {
|
||||
Start()
|
||||
Close()
|
||||
}
|
||||
|
||||
type rawConnection_syncthing5795 struct {
|
||||
receiver Model_syncthing5795
|
||||
|
||||
inbox chan message_syncthing5795
|
||||
dispatcherLoopStopped chan struct{}
|
||||
closed chan struct{}
|
||||
closeOnce sync.Once
|
||||
}
|
||||
|
||||
func (c *rawConnection_syncthing5795) Start() {
|
||||
go c.dispatcherLoop() // G2
|
||||
}
|
||||
|
||||
func (c *rawConnection_syncthing5795) dispatcherLoop() {
|
||||
defer close(c.dispatcherLoopStopped)
|
||||
var msg message_syncthing5795
|
||||
for {
|
||||
select {
|
||||
case msg = <-c.inbox:
|
||||
case <-c.closed:
|
||||
return
|
||||
}
|
||||
switch msg := msg.(type) {
|
||||
case *ClusterConfig_syncthing5795:
|
||||
c.receiver.ClusterConfig(msg)
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *rawConnection_syncthing5795) Close() {
|
||||
c.closeOnce.Do(func() {
|
||||
close(c.closed)
|
||||
<-c.dispatcherLoopStopped
|
||||
})
|
||||
}
|
||||
|
||||
func Syncthing5795() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
go func() { // G1
|
||||
m := &TestModel_syncthing5795{}
|
||||
c := &rawConnection_syncthing5795{
|
||||
dispatcherLoopStopped: make(chan struct{}),
|
||||
closed: make(chan struct{}),
|
||||
inbox: make(chan message_syncthing5795),
|
||||
receiver: m,
|
||||
}
|
||||
m.ccFn = c.Close
|
||||
|
||||
c.Start()
|
||||
c.inbox <- &ClusterConfig_syncthing5795{}
|
||||
|
||||
<-c.dispatcherLoopStopped
|
||||
}()
|
||||
}
|
39
src/runtime/testdata/testgoroutineleakprofile/main.go
vendored
Normal file
39
src/runtime/testdata/testgoroutineleakprofile/main.go
vendored
Normal file
|
@ -0,0 +1,39 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import "os"
|
||||
|
||||
// The number of times the main (profiling) goroutine should yield
|
||||
// in order to allow the leaking goroutines to get stuck.
|
||||
const yieldCount = 10
|
||||
|
||||
var cmds = map[string]func(){}
|
||||
|
||||
func register(name string, f func()) {
|
||||
if cmds[name] != nil {
|
||||
panic("duplicate registration: " + name)
|
||||
}
|
||||
cmds[name] = f
|
||||
}
|
||||
|
||||
func registerInit(name string, f func()) {
|
||||
if len(os.Args) >= 2 && os.Args[1] == name {
|
||||
f()
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
if len(os.Args) < 2 {
|
||||
println("usage: " + os.Args[0] + " name-of-test")
|
||||
return
|
||||
}
|
||||
f := cmds[os.Args[1]]
|
||||
if f == nil {
|
||||
println("unknown function: " + os.Args[1])
|
||||
return
|
||||
}
|
||||
f()
|
||||
}
|
253
src/runtime/testdata/testgoroutineleakprofile/simple.go
vendored
Normal file
253
src/runtime/testdata/testgoroutineleakprofile/simple.go
vendored
Normal file
|
@ -0,0 +1,253 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// This is a set of micro-tests with obvious goroutine leaks that
|
||||
// ensures goroutine leak detection works.
|
||||
//
|
||||
// Tests in this file are not flaky iff. run with GOMAXPROCS=1.
|
||||
// The main goroutine forcefully yields via `runtime.Gosched()` before
|
||||
// running the profiler. This moves them to the back of the run queue,
|
||||
// allowing the leaky goroutines to be scheduled beforehand and get stuck.
|
||||
|
||||
func init() {
|
||||
register("NilRecv", NilRecv)
|
||||
register("NilSend", NilSend)
|
||||
register("SelectNoCases", SelectNoCases)
|
||||
register("ChanRecv", ChanRecv)
|
||||
register("ChanSend", ChanSend)
|
||||
register("Select", Select)
|
||||
register("WaitGroup", WaitGroup)
|
||||
register("MutexStack", MutexStack)
|
||||
register("MutexHeap", MutexHeap)
|
||||
register("RWMutexRLock", RWMutexRLock)
|
||||
register("RWMutexLock", RWMutexLock)
|
||||
register("Cond", Cond)
|
||||
register("Mixed", Mixed)
|
||||
register("NoLeakGlobal", NoLeakGlobal)
|
||||
}
|
||||
|
||||
func NilRecv() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
go func() {
|
||||
var c chan int
|
||||
<-c
|
||||
panic("should not be reached")
|
||||
}()
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}
|
||||
|
||||
func NilSend() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
go func() {
|
||||
var c chan int
|
||||
c <- 0
|
||||
panic("should not be reached")
|
||||
}()
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}
|
||||
|
||||
func ChanRecv() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
go func() {
|
||||
<-make(chan int)
|
||||
panic("should not be reached")
|
||||
}()
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}
|
||||
|
||||
func SelectNoCases() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
go func() {
|
||||
select {}
|
||||
panic("should not be reached")
|
||||
}()
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}
|
||||
|
||||
func ChanSend() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
go func() {
|
||||
make(chan int) <- 0
|
||||
panic("should not be reached")
|
||||
}()
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}
|
||||
|
||||
func Select() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
go func() {
|
||||
select {
|
||||
case make(chan int) <- 0:
|
||||
case <-make(chan int):
|
||||
}
|
||||
panic("should not be reached")
|
||||
}()
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}
|
||||
|
||||
func WaitGroup() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
go func() {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
wg.Wait()
|
||||
panic("should not be reached")
|
||||
}()
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}
|
||||
|
||||
func MutexStack() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
for i := 0; i < 1000; i++ {
|
||||
go func() {
|
||||
var mu sync.Mutex
|
||||
mu.Lock()
|
||||
mu.Lock()
|
||||
panic("should not be reached")
|
||||
}()
|
||||
}
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}
|
||||
|
||||
func MutexHeap() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
for i := 0; i < 1000; i++ {
|
||||
go func() {
|
||||
mu := &sync.Mutex{}
|
||||
go func() {
|
||||
mu.Lock()
|
||||
mu.Lock()
|
||||
panic("should not be reached")
|
||||
}()
|
||||
}()
|
||||
}
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}
|
||||
|
||||
func RWMutexRLock() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
go func() {
|
||||
mu := &sync.RWMutex{}
|
||||
mu.Lock()
|
||||
mu.RLock()
|
||||
panic("should not be reached")
|
||||
}()
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}
|
||||
|
||||
func RWMutexLock() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
go func() {
|
||||
mu := &sync.RWMutex{}
|
||||
mu.Lock()
|
||||
mu.Lock()
|
||||
panic("should not be reached")
|
||||
}()
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}
|
||||
|
||||
func Cond() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
go func() {
|
||||
cond := sync.NewCond(&sync.Mutex{})
|
||||
cond.L.Lock()
|
||||
cond.Wait()
|
||||
panic("should not be reached")
|
||||
}()
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}
|
||||
|
||||
func Mixed() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
go func() {
|
||||
ch := make(chan int)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
ch <- 0
|
||||
wg.Done()
|
||||
panic("should not be reached")
|
||||
}()
|
||||
wg.Wait()
|
||||
<-ch
|
||||
panic("should not be reached")
|
||||
}()
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}
|
||||
|
||||
var ch = make(chan int)
|
||||
|
||||
// No leak should be reported by this test
|
||||
func NoLeakGlobal() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
go func() {
|
||||
<-ch
|
||||
}()
|
||||
// Yield several times to allow the child goroutine to run.
|
||||
for i := 0; i < yieldCount; i++ {
|
||||
runtime.Gosched()
|
||||
}
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}
|
89
src/runtime/testdata/testgoroutineleakprofile/stresstests.go
vendored
Normal file
89
src/runtime/testdata/testgoroutineleakprofile/stresstests.go
vendored
Normal file
|
@ -0,0 +1,89 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const spawnGCMaxDepth = 5
|
||||
|
||||
func init() {
|
||||
register("SpawnGC", SpawnGC)
|
||||
register("DaisyChain", DaisyChain)
|
||||
}
|
||||
|
||||
func spawnGC(i int) {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
if i == 0 {
|
||||
return
|
||||
}
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(i + 1)
|
||||
go func() {
|
||||
wg.Done()
|
||||
<-make(chan int)
|
||||
}()
|
||||
for j := 0; j < i; j++ {
|
||||
go func() {
|
||||
wg.Done()
|
||||
spawnGC(i - 1)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
runtime.Gosched()
|
||||
if i == spawnGCMaxDepth {
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
} else {
|
||||
// We want to concurrently trigger the profile in order to concurrently run
|
||||
// the GC, but we don't want to stream all the profiles to standard output.
|
||||
//
|
||||
// Only output the profile for the root call to spawnGC, and otherwise stream
|
||||
// the profile outputs to /dev/null to avoid jumbling.
|
||||
prof.WriteTo(io.Discard, 2)
|
||||
}
|
||||
}
|
||||
|
||||
// SpawnGC spawns a tree of goroutine leaks and calls the goroutine leak profiler
|
||||
// for each node in the tree. It is supposed to stress the goroutine leak profiler
|
||||
// under a heavily concurrent workload.
|
||||
func SpawnGC() {
|
||||
spawnGC(spawnGCMaxDepth)
|
||||
}
|
||||
|
||||
// DaisyChain spawns a daisy-chain of runnable goroutines.
|
||||
//
|
||||
// Each goroutine in the chain creates a new channel and goroutine.
|
||||
//
|
||||
// This illustrates a pathological worstcase for the goroutine leak GC complexity,
|
||||
// as opposed to the regular GC, which is not negatively affected by this pattern.
|
||||
func DaisyChain() {
|
||||
prof := pprof.Lookup("goroutineleak")
|
||||
defer func() {
|
||||
time.Sleep(time.Second)
|
||||
prof.WriteTo(os.Stdout, 2)
|
||||
}()
|
||||
var chain func(i int, ch chan struct{})
|
||||
chain = func(i int, ch chan struct{}) {
|
||||
if i <= 0 {
|
||||
go func() {
|
||||
time.Sleep(time.Hour)
|
||||
ch <- struct{}{}
|
||||
}()
|
||||
return
|
||||
}
|
||||
ch2 := make(chan struct{})
|
||||
go chain(i-1, ch2)
|
||||
<-ch2
|
||||
ch <- struct{}{}
|
||||
}
|
||||
// The channel buffer avoids goroutine leaks.
|
||||
go chain(1000, make(chan struct{}, 1))
|
||||
}
|
|
@ -1206,6 +1206,7 @@ var gStatusStrings = [...]string{
|
|||
_Gwaiting: "waiting",
|
||||
_Gdead: "dead",
|
||||
_Gcopystack: "copystack",
|
||||
_Gleaked: "leaked",
|
||||
_Gpreempted: "preempted",
|
||||
}
|
||||
|
||||
|
@ -1226,7 +1227,7 @@ func goroutineheader(gp *g) {
|
|||
}
|
||||
|
||||
// Override.
|
||||
if gpstatus == _Gwaiting && gp.waitreason != waitReasonZero {
|
||||
if (gpstatus == _Gwaiting || gpstatus == _Gleaked) && gp.waitreason != waitReasonZero {
|
||||
status = gp.waitreason.String()
|
||||
}
|
||||
|
||||
|
@ -1245,6 +1246,9 @@ func goroutineheader(gp *g) {
|
|||
}
|
||||
}
|
||||
print(" [", status)
|
||||
if gpstatus == _Gleaked {
|
||||
print(" (leaked)")
|
||||
}
|
||||
if isScan {
|
||||
print(" (scan)")
|
||||
}
|
||||
|
|
|
@ -122,7 +122,7 @@ func goStatusToTraceGoStatus(status uint32, wr waitReason) tracev2.GoStatus {
|
|||
tgs = tracev2.GoRunning
|
||||
case _Gsyscall:
|
||||
tgs = tracev2.GoSyscall
|
||||
case _Gwaiting, _Gpreempted:
|
||||
case _Gwaiting, _Gpreempted, _Gleaked:
|
||||
// There are a number of cases where a G might end up in
|
||||
// _Gwaiting but it's actually running in a non-preemptive
|
||||
// state but needs to present itself as preempted to the
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue