2016-03-01 22:57:46 +00:00
|
|
|
// Copyright 2011 The Go Authors. All rights reserved.
|
2011-03-23 13:54:31 -04:00
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
2018-03-04 12:15:37 +01:00
|
|
|
// +build !nacl,!js
|
2014-05-20 12:10:19 -04:00
|
|
|
|
2017-02-17 00:10:39 -05:00
|
|
|
package pprof
|
2011-03-23 13:54:31 -04:00
|
|
|
|
|
|
|
|
import (
|
|
|
|
|
"bytes"
|
2017-04-05 13:50:52 -04:00
|
|
|
"context"
|
2016-10-31 15:30:59 +00:00
|
|
|
"fmt"
|
2015-06-05 11:01:53 -04:00
|
|
|
"internal/testenv"
|
2017-04-20 16:10:59 -04:00
|
|
|
"io"
|
2017-05-04 16:45:29 +02:00
|
|
|
"io/ioutil"
|
cmd/5l, runtime: fix divide for profiling tracebacks on ARM
Two bugs:
1. The first iteration of the traceback always uses LR when provided,
which it is (only) during a profiling signal, but in fact LR is correct
only if the stack frame has not been allocated yet. Otherwise an
intervening call may have changed LR, and the saved copy in the stack
frame should be used. Fix in traceback_arm.c.
2. The division runtime call adds 8 bytes to the stack. In order to
keep the traceback routines happy, it must copy the saved LR into
the new 0(SP). Change
SUB $8, SP
into
MOVW 0(SP), R11 // r11 is temporary, for use by linker
MOVW.W R11, -8(SP)
to update SP and 0(SP) atomically, so that the traceback always
sees a saved LR at 0(SP).
Fixes #6681.
R=golang-dev, r
CC=golang-dev
https://golang.org/cl/19910044
2013-10-31 18:15:55 +00:00
|
|
|
"math/big"
|
2015-01-16 16:53:13 -08:00
|
|
|
"os"
|
2012-02-28 16:18:24 -05:00
|
|
|
"os/exec"
|
2013-08-11 13:05:51 +04:00
|
|
|
"regexp"
|
2011-03-23 13:54:31 -04:00
|
|
|
"runtime"
|
2017-02-17 00:17:26 -05:00
|
|
|
"runtime/pprof/internal/profile"
|
2011-03-23 13:54:31 -04:00
|
|
|
"strings"
|
2013-08-11 13:05:51 +04:00
|
|
|
"sync"
|
2017-05-04 16:45:29 +02:00
|
|
|
"sync/atomic"
|
2011-03-23 13:54:31 -04:00
|
|
|
"testing"
|
2013-08-11 13:05:51 +04:00
|
|
|
"time"
|
2011-03-23 13:54:31 -04:00
|
|
|
)
|
|
|
|
|
|
2017-11-06 15:33:31 -05:00
|
|
|
func cpuHogger(f func(x int) int, y *int, dur time.Duration) {
|
2014-08-30 22:38:54 +04:00
|
|
|
// We only need to get one 100 Hz clock tick, so we've got
|
2016-01-15 14:23:43 -05:00
|
|
|
// a large safety buffer.
|
2014-09-09 16:05:00 +10:00
|
|
|
// But do at least 500 iterations (which should take about 100ms),
|
2014-08-30 22:38:54 +04:00
|
|
|
// otherwise TestCPUProfileMultithreaded can fail if only one
|
2016-01-15 14:23:43 -05:00
|
|
|
// thread is scheduled during the testing period.
|
2014-08-30 22:38:54 +04:00
|
|
|
t0 := time.Now()
|
2017-11-06 15:33:31 -05:00
|
|
|
accum := *y
|
2016-01-15 14:23:43 -05:00
|
|
|
for i := 0; i < 500 || time.Since(t0) < dur; i++ {
|
2017-11-06 15:33:31 -05:00
|
|
|
accum = f(accum)
|
2014-08-30 22:38:54 +04:00
|
|
|
}
|
2017-11-06 15:33:31 -05:00
|
|
|
*y = accum
|
2014-08-30 22:38:54 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var (
|
|
|
|
|
salt1 = 0
|
|
|
|
|
salt2 = 0
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
// The actual CPU hogging function.
|
|
|
|
|
// Must not call other functions nor access heap/globals in the loop,
|
|
|
|
|
// otherwise under race detector the samples will be in the race runtime.
|
2017-11-06 15:33:31 -05:00
|
|
|
func cpuHog1(x int) int {
|
|
|
|
|
foo := x
|
2014-08-30 22:38:54 +04:00
|
|
|
for i := 0; i < 1e5; i++ {
|
|
|
|
|
if foo > 0 {
|
|
|
|
|
foo *= foo
|
|
|
|
|
} else {
|
|
|
|
|
foo *= foo + 1
|
|
|
|
|
}
|
|
|
|
|
}
|
runtime: fix false positive race in profile label reading
Because profile labels are copied from the goroutine into the tag
buffer by the signal handler, there's a carefully-crafted set of race
detector annotations to create the necessary happens-before edges
between setting a goroutine's profile label and retrieving it from the
profile tag buffer.
Given the constraints of the signal handler, we have to approximate
the true synchronization behavior. Currently, that approximation is
too weak.
Ideally, runtime_setProfLabel would perform a store-release on
&getg().labels and copying each label into the profile would perform a
load-acquire on &getg().labels. This would create the necessary
happens-before edges through each individual g.labels object.
Since we can't do this in the signal handler, we instead synchronize
on a "labelSync" global. The problem occurs with the following
sequence:
1. Goroutine 1 calls setProfLabel, which does a store-release on
labelSync.
2. Goroutine 2 calls setProfLabel, which does a store-release on
labelSync.
3. Goroutine 3 reads the profile, which does a load-acquire on
labelSync.
The problem is that the load-acquire only synchronizes with the *most
recent* store-release to labelSync, and the two store-releases don't
synchronize with each other. So, once goroutine 3 touches the label
set by goroutine 1, we report a race.
The solution is to use racereleasemerge. This is like a
read-modify-write, rather than just a store-release. Each RMW of
labelSync in runtime_setProfLabel synchronizes with the previous RMW
of labelSync, and this ultimately carries forward to the load-acquire,
so it synchronizes with *all* setProfLabel operations, not just the
most recent.
Change-Id: Iab58329b156122002fff12cfe64fbeacb31c9613
Reviewed-on: https://go-review.googlesource.com/56670
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Russ Cox <rsc@golang.org>
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
2017-08-17 18:40:07 -04:00
|
|
|
return foo
|
2014-08-30 22:38:54 +04:00
|
|
|
}
|
|
|
|
|
|
2017-11-06 15:33:31 -05:00
|
|
|
func cpuHog2(x int) int {
|
|
|
|
|
foo := x
|
2014-08-30 22:38:54 +04:00
|
|
|
for i := 0; i < 1e5; i++ {
|
|
|
|
|
if foo > 0 {
|
|
|
|
|
foo *= foo
|
|
|
|
|
} else {
|
|
|
|
|
foo *= foo + 2
|
2013-08-05 19:49:02 -04:00
|
|
|
}
|
2014-08-30 22:38:54 +04:00
|
|
|
}
|
runtime: fix false positive race in profile label reading
Because profile labels are copied from the goroutine into the tag
buffer by the signal handler, there's a carefully-crafted set of race
detector annotations to create the necessary happens-before edges
between setting a goroutine's profile label and retrieving it from the
profile tag buffer.
Given the constraints of the signal handler, we have to approximate
the true synchronization behavior. Currently, that approximation is
too weak.
Ideally, runtime_setProfLabel would perform a store-release on
&getg().labels and copying each label into the profile would perform a
load-acquire on &getg().labels. This would create the necessary
happens-before edges through each individual g.labels object.
Since we can't do this in the signal handler, we instead synchronize
on a "labelSync" global. The problem occurs with the following
sequence:
1. Goroutine 1 calls setProfLabel, which does a store-release on
labelSync.
2. Goroutine 2 calls setProfLabel, which does a store-release on
labelSync.
3. Goroutine 3 reads the profile, which does a load-acquire on
labelSync.
The problem is that the load-acquire only synchronizes with the *most
recent* store-release to labelSync, and the two store-releases don't
synchronize with each other. So, once goroutine 3 touches the label
set by goroutine 1, we report a race.
The solution is to use racereleasemerge. This is like a
read-modify-write, rather than just a store-release. Each RMW of
labelSync in runtime_setProfLabel synchronizes with the previous RMW
of labelSync, and this ultimately carries forward to the load-acquire,
so it synchronizes with *all* setProfLabel operations, not just the
most recent.
Change-Id: Iab58329b156122002fff12cfe64fbeacb31c9613
Reviewed-on: https://go-review.googlesource.com/56670
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Russ Cox <rsc@golang.org>
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
2017-08-17 18:40:07 -04:00
|
|
|
return foo
|
2014-08-30 22:38:54 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestCPUProfile(t *testing.T) {
|
2017-02-17 00:10:39 -05:00
|
|
|
testCPUProfile(t, []string{"runtime/pprof.cpuHog1"}, func(dur time.Duration) {
|
2017-11-06 15:33:31 -05:00
|
|
|
cpuHogger(cpuHog1, &salt1, dur)
|
2013-08-05 19:49:02 -04:00
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestCPUProfileMultithreaded(t *testing.T) {
|
|
|
|
|
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
|
2017-02-17 00:10:39 -05:00
|
|
|
testCPUProfile(t, []string{"runtime/pprof.cpuHog1", "runtime/pprof.cpuHog2"}, func(dur time.Duration) {
|
2013-08-05 19:49:02 -04:00
|
|
|
c := make(chan int)
|
|
|
|
|
go func() {
|
2017-11-06 15:33:31 -05:00
|
|
|
cpuHogger(cpuHog1, &salt1, dur)
|
2013-08-05 19:49:02 -04:00
|
|
|
c <- 1
|
|
|
|
|
}()
|
2017-11-06 15:33:31 -05:00
|
|
|
cpuHogger(cpuHog2, &salt2, dur)
|
2013-08-13 12:18:29 -04:00
|
|
|
<-c
|
2013-08-05 19:49:02 -04:00
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-20 15:54:12 -04:00
|
|
|
func TestCPUProfileInlining(t *testing.T) {
|
|
|
|
|
testCPUProfile(t, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}, func(dur time.Duration) {
|
2017-11-06 15:33:31 -05:00
|
|
|
cpuHogger(inlinedCaller, &salt1, dur)
|
2017-04-20 15:54:12 -04:00
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
2017-11-06 15:33:31 -05:00
|
|
|
func inlinedCaller(x int) int {
|
|
|
|
|
x = inlinedCallee(x)
|
|
|
|
|
return x
|
2017-04-20 15:54:12 -04:00
|
|
|
}
|
|
|
|
|
|
2017-11-06 15:33:31 -05:00
|
|
|
func inlinedCallee(x int) int {
|
2017-04-20 15:54:12 -04:00
|
|
|
// We could just use cpuHog1, but for loops prevent inlining
|
|
|
|
|
// right now. :(
|
2017-11-06 15:33:31 -05:00
|
|
|
foo := x
|
2017-04-20 15:54:12 -04:00
|
|
|
i := 0
|
|
|
|
|
loop:
|
|
|
|
|
if foo > 0 {
|
|
|
|
|
foo *= foo
|
|
|
|
|
} else {
|
|
|
|
|
foo *= foo + 1
|
|
|
|
|
}
|
|
|
|
|
if i++; i < 1e5 {
|
|
|
|
|
goto loop
|
|
|
|
|
}
|
2017-11-06 15:33:31 -05:00
|
|
|
return foo
|
2017-04-20 15:54:12 -04:00
|
|
|
}
|
|
|
|
|
|
2017-04-20 16:10:59 -04:00
|
|
|
func parseProfile(t *testing.T, valBytes []byte, f func(uintptr, []*profile.Location, map[string][]string)) {
|
2016-11-10 13:31:41 -05:00
|
|
|
p, err := profile.Parse(bytes.NewReader(valBytes))
|
|
|
|
|
if err != nil {
|
|
|
|
|
t.Fatal(err)
|
2016-10-31 15:30:59 +00:00
|
|
|
}
|
2016-11-10 13:31:41 -05:00
|
|
|
for _, sample := range p.Sample {
|
|
|
|
|
count := uintptr(sample.Value[0])
|
2017-04-20 16:10:59 -04:00
|
|
|
f(count, sample.Location, sample.Label)
|
2013-09-13 14:19:23 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-01-15 14:23:43 -05:00
|
|
|
func testCPUProfile(t *testing.T, need []string, f func(dur time.Duration)) {
|
2013-09-13 14:19:23 -04:00
|
|
|
switch runtime.GOOS {
|
|
|
|
|
case "darwin":
|
2015-04-11 19:04:25 -04:00
|
|
|
switch runtime.GOARCH {
|
|
|
|
|
case "arm", "arm64":
|
|
|
|
|
// nothing
|
|
|
|
|
default:
|
2015-02-27 08:58:54 -05:00
|
|
|
out, err := exec.Command("uname", "-a").CombinedOutput()
|
|
|
|
|
if err != nil {
|
|
|
|
|
t.Fatal(err)
|
|
|
|
|
}
|
|
|
|
|
vers := string(out)
|
|
|
|
|
t.Logf("uname -a: %v", vers)
|
2013-09-13 14:19:23 -04:00
|
|
|
}
|
|
|
|
|
case "plan9":
|
2015-02-27 08:58:54 -05:00
|
|
|
t.Skip("skipping on plan9")
|
2013-09-13 14:19:23 -04:00
|
|
|
}
|
|
|
|
|
|
2016-01-15 14:23:43 -05:00
|
|
|
const maxDuration = 5 * time.Second
|
|
|
|
|
// If we're running a long test, start with a long duration
|
2017-02-09 14:03:49 -05:00
|
|
|
// for tests that try to make sure something *doesn't* happen.
|
2016-01-15 14:23:43 -05:00
|
|
|
duration := 5 * time.Second
|
|
|
|
|
if testing.Short() {
|
|
|
|
|
duration = 200 * time.Millisecond
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Profiling tests are inherently flaky, especially on a
|
|
|
|
|
// loaded system, such as when this test is running with
|
|
|
|
|
// several others under go test std. If a test fails in a way
|
|
|
|
|
// that could mean it just didn't run long enough, try with a
|
|
|
|
|
// longer duration.
|
|
|
|
|
for duration <= maxDuration {
|
|
|
|
|
var prof bytes.Buffer
|
|
|
|
|
if err := StartCPUProfile(&prof); err != nil {
|
|
|
|
|
t.Fatal(err)
|
|
|
|
|
}
|
|
|
|
|
f(duration)
|
|
|
|
|
StopCPUProfile()
|
|
|
|
|
|
|
|
|
|
if profileOk(t, need, prof, duration) {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
duration *= 2
|
|
|
|
|
if duration <= maxDuration {
|
|
|
|
|
t.Logf("retrying with %s duration", duration)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-11-01 17:06:52 +01:00
|
|
|
switch runtime.GOOS {
|
|
|
|
|
case "darwin", "dragonfly", "netbsd", "solaris":
|
2016-02-02 17:34:48 +00:00
|
|
|
t.Skipf("ignoring failure on %s; see golang.org/issue/13841", runtime.GOOS)
|
2016-01-15 14:23:43 -05:00
|
|
|
}
|
|
|
|
|
// Ignore the failure if the tests are running in a QEMU-based emulator,
|
|
|
|
|
// QEMU is not perfect at emulating everything.
|
|
|
|
|
// IN_QEMU environmental variable is set by some of the Go builders.
|
|
|
|
|
// IN_QEMU=1 indicates that the tests are running in QEMU. See issue 9605.
|
|
|
|
|
if os.Getenv("IN_QEMU") == "1" {
|
|
|
|
|
t.Skip("ignore the failure in QEMU; see golang.org/issue/9605")
|
2013-09-13 14:19:23 -04:00
|
|
|
}
|
2016-01-15 14:23:43 -05:00
|
|
|
t.FailNow()
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-05 13:50:52 -04:00
|
|
|
func contains(slice []string, s string) bool {
|
|
|
|
|
for i := range slice {
|
|
|
|
|
if slice[i] == s {
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
|
2016-01-15 14:23:43 -05:00
|
|
|
func profileOk(t *testing.T, need []string, prof bytes.Buffer, duration time.Duration) (ok bool) {
|
|
|
|
|
ok = true
|
2013-09-13 14:19:23 -04:00
|
|
|
|
2014-08-30 22:38:54 +04:00
|
|
|
// Check that profile is well formed and contains need.
|
2016-10-31 15:30:59 +00:00
|
|
|
have := make([]uintptr, len(need))
|
2015-03-26 20:02:50 -04:00
|
|
|
var samples uintptr
|
2017-02-17 00:10:39 -05:00
|
|
|
var buf bytes.Buffer
|
2017-04-20 16:10:59 -04:00
|
|
|
parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, labels map[string][]string) {
|
2017-02-17 00:10:39 -05:00
|
|
|
fmt.Fprintf(&buf, "%d:", count)
|
2017-04-20 16:10:59 -04:00
|
|
|
fprintStack(&buf, stk)
|
2016-10-31 15:30:59 +00:00
|
|
|
samples += count
|
2017-04-20 16:10:59 -04:00
|
|
|
for i, name := range need {
|
2017-10-05 15:49:32 +02:00
|
|
|
if semi := strings.Index(name, ";"); semi > -1 {
|
2017-04-20 16:10:59 -04:00
|
|
|
kv := strings.SplitN(name[semi+1:], "=", 2)
|
|
|
|
|
if len(kv) != 2 || !contains(labels[kv[0]], kv[1]) {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
name = name[:semi]
|
2011-03-23 13:54:31 -04:00
|
|
|
}
|
2017-04-20 16:10:59 -04:00
|
|
|
for _, loc := range stk {
|
|
|
|
|
for _, line := range loc.Line {
|
|
|
|
|
if strings.Contains(line.Function.Name, name) {
|
|
|
|
|
have[i] += count
|
2017-04-05 13:50:52 -04:00
|
|
|
}
|
2013-08-05 19:49:02 -04:00
|
|
|
}
|
2011-03-23 13:54:31 -04:00
|
|
|
}
|
|
|
|
|
}
|
2017-02-17 00:10:39 -05:00
|
|
|
fmt.Fprintf(&buf, "\n")
|
2013-09-13 14:19:23 -04:00
|
|
|
})
|
2017-02-17 00:10:39 -05:00
|
|
|
t.Logf("total %d CPU profile samples collected:\n%s", samples, buf.String())
|
2011-03-23 13:54:31 -04:00
|
|
|
|
2015-07-22 13:22:42 -04:00
|
|
|
if samples < 10 && runtime.GOOS == "windows" {
|
|
|
|
|
// On some windows machines we end up with
|
|
|
|
|
// not enough samples due to coarse timer
|
|
|
|
|
// resolution. Let it go.
|
2016-01-15 14:23:43 -05:00
|
|
|
t.Log("too few samples on Windows (golang.org/issue/10842)")
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Check that we got a reasonable number of samples.
|
2016-12-15 17:42:53 -08:00
|
|
|
// We used to always require at least ideal/4 samples,
|
|
|
|
|
// but that is too hard to guarantee on a loaded system.
|
|
|
|
|
// Now we accept 10 or more samples, which we take to be
|
2016-12-16 07:50:08 -08:00
|
|
|
// enough to show that at least some profiling is occurring.
|
2016-12-15 17:42:53 -08:00
|
|
|
if ideal := uintptr(duration * 100 / time.Second); samples == 0 || (samples < ideal/4 && samples < 10) {
|
2016-01-15 14:23:43 -05:00
|
|
|
t.Logf("too few samples; got %d, want at least %d, ideally %d", samples, ideal/4, ideal)
|
|
|
|
|
ok = false
|
2015-07-22 13:22:42 -04:00
|
|
|
}
|
|
|
|
|
|
cmd/5l, runtime: fix divide for profiling tracebacks on ARM
Two bugs:
1. The first iteration of the traceback always uses LR when provided,
which it is (only) during a profiling signal, but in fact LR is correct
only if the stack frame has not been allocated yet. Otherwise an
intervening call may have changed LR, and the saved copy in the stack
frame should be used. Fix in traceback_arm.c.
2. The division runtime call adds 8 bytes to the stack. In order to
keep the traceback routines happy, it must copy the saved LR into
the new 0(SP). Change
SUB $8, SP
into
MOVW 0(SP), R11 // r11 is temporary, for use by linker
MOVW.W R11, -8(SP)
to update SP and 0(SP) atomically, so that the traceback always
sees a saved LR at 0(SP).
Fixes #6681.
R=golang-dev, r
CC=golang-dev
https://golang.org/cl/19910044
2013-10-31 18:15:55 +00:00
|
|
|
if len(need) == 0 {
|
2016-01-15 14:23:43 -05:00
|
|
|
return ok
|
cmd/5l, runtime: fix divide for profiling tracebacks on ARM
Two bugs:
1. The first iteration of the traceback always uses LR when provided,
which it is (only) during a profiling signal, but in fact LR is correct
only if the stack frame has not been allocated yet. Otherwise an
intervening call may have changed LR, and the saved copy in the stack
frame should be used. Fix in traceback_arm.c.
2. The division runtime call adds 8 bytes to the stack. In order to
keep the traceback routines happy, it must copy the saved LR into
the new 0(SP). Change
SUB $8, SP
into
MOVW 0(SP), R11 // r11 is temporary, for use by linker
MOVW.W R11, -8(SP)
to update SP and 0(SP) atomically, so that the traceback always
sees a saved LR at 0(SP).
Fixes #6681.
R=golang-dev, r
CC=golang-dev
https://golang.org/cl/19910044
2013-10-31 18:15:55 +00:00
|
|
|
}
|
2016-10-31 15:30:59 +00:00
|
|
|
|
|
|
|
|
var total uintptr
|
|
|
|
|
for i, name := range need {
|
|
|
|
|
total += have[i]
|
|
|
|
|
t.Logf("%s: %d\n", name, have[i])
|
|
|
|
|
}
|
|
|
|
|
if total == 0 {
|
|
|
|
|
t.Logf("no samples in expected functions")
|
|
|
|
|
ok = false
|
|
|
|
|
}
|
|
|
|
|
// We'd like to check a reasonable minimum, like
|
|
|
|
|
// total / len(have) / smallconstant, but this test is
|
|
|
|
|
// pretty flaky (see bug 7095). So we'll just test to
|
|
|
|
|
// make sure we got at least one sample.
|
|
|
|
|
min := uintptr(1)
|
|
|
|
|
for i, name := range need {
|
|
|
|
|
if have[i] < min {
|
|
|
|
|
t.Logf("%s has %d samples out of %d, want at least %d, ideally %d", name, have[i], total, min, total/uintptr(len(have)))
|
|
|
|
|
ok = false
|
|
|
|
|
}
|
2013-08-05 19:49:02 -04:00
|
|
|
}
|
2016-01-15 14:23:43 -05:00
|
|
|
return ok
|
2011-03-23 13:54:31 -04:00
|
|
|
}
|
2013-08-06 14:49:55 -04:00
|
|
|
|
2015-03-26 20:02:50 -04:00
|
|
|
// Fork can hang if preempted with signals frequently enough (see issue 5517).
|
|
|
|
|
// Ensure that we do not do this.
|
2013-08-13 13:01:30 +04:00
|
|
|
func TestCPUProfileWithFork(t *testing.T) {
|
2015-06-05 11:01:53 -04:00
|
|
|
testenv.MustHaveExec(t)
|
2015-02-27 08:58:54 -05:00
|
|
|
|
2013-08-13 13:01:30 +04:00
|
|
|
heap := 1 << 30
|
2015-03-26 20:02:50 -04:00
|
|
|
if runtime.GOOS == "android" {
|
|
|
|
|
// Use smaller size for Android to avoid crash.
|
|
|
|
|
heap = 100 << 20
|
|
|
|
|
}
|
2013-08-13 13:01:30 +04:00
|
|
|
if testing.Short() {
|
|
|
|
|
heap = 100 << 20
|
|
|
|
|
}
|
|
|
|
|
// This makes fork slower.
|
|
|
|
|
garbage := make([]byte, heap)
|
|
|
|
|
// Need to touch the slice, otherwise it won't be paged in.
|
|
|
|
|
done := make(chan bool)
|
|
|
|
|
go func() {
|
|
|
|
|
for i := range garbage {
|
|
|
|
|
garbage[i] = 42
|
|
|
|
|
}
|
|
|
|
|
done <- true
|
|
|
|
|
}()
|
|
|
|
|
<-done
|
|
|
|
|
|
|
|
|
|
var prof bytes.Buffer
|
|
|
|
|
if err := StartCPUProfile(&prof); err != nil {
|
|
|
|
|
t.Fatal(err)
|
|
|
|
|
}
|
|
|
|
|
defer StopCPUProfile()
|
|
|
|
|
|
|
|
|
|
for i := 0; i < 10; i++ {
|
2015-03-26 20:02:50 -04:00
|
|
|
exec.Command(os.Args[0], "-h").CombinedOutput()
|
2013-08-13 13:01:30 +04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-09-13 14:19:23 -04:00
|
|
|
// Test that profiler does not observe runtime.gogo as "user" goroutine execution.
|
|
|
|
|
// If it did, it would see inconsistent state and would either record an incorrect stack
|
|
|
|
|
// or crash because the stack was malformed.
|
|
|
|
|
func TestGoroutineSwitch(t *testing.T) {
|
2013-09-23 15:58:35 -04:00
|
|
|
// How much to try. These defaults take about 1 seconds
|
|
|
|
|
// on a 2012 MacBook Pro. The ones in short mode take
|
|
|
|
|
// about 0.1 seconds.
|
2013-09-13 14:19:23 -04:00
|
|
|
tries := 10
|
2013-09-23 15:58:35 -04:00
|
|
|
count := 1000000
|
2013-09-13 14:19:23 -04:00
|
|
|
if testing.Short() {
|
|
|
|
|
tries = 1
|
|
|
|
|
}
|
|
|
|
|
for try := 0; try < tries; try++ {
|
|
|
|
|
var prof bytes.Buffer
|
|
|
|
|
if err := StartCPUProfile(&prof); err != nil {
|
|
|
|
|
t.Fatal(err)
|
|
|
|
|
}
|
|
|
|
|
for i := 0; i < count; i++ {
|
|
|
|
|
runtime.Gosched()
|
|
|
|
|
}
|
|
|
|
|
StopCPUProfile()
|
|
|
|
|
|
|
|
|
|
// Read profile to look for entries for runtime.gogo with an attempt at a traceback.
|
|
|
|
|
// The special entry
|
2017-04-20 16:10:59 -04:00
|
|
|
parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, _ map[string][]string) {
|
2016-10-31 15:30:59 +00:00
|
|
|
// An entry with two frames with 'System' in its top frame
|
|
|
|
|
// exists to record a PC without a traceback. Those are okay.
|
|
|
|
|
if len(stk) == 2 {
|
2017-04-20 16:10:59 -04:00
|
|
|
name := stk[1].Line[0].Function.Name
|
|
|
|
|
if name == "runtime._System" || name == "runtime._ExternalCode" || name == "runtime._GC" {
|
2016-10-31 15:30:59 +00:00
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Otherwise, should not see runtime.gogo.
|
|
|
|
|
// The place we'd see it would be the inner most frame.
|
2017-04-20 16:10:59 -04:00
|
|
|
name := stk[0].Line[0].Function.Name
|
|
|
|
|
if name == "runtime.gogo" {
|
2016-10-31 15:30:59 +00:00
|
|
|
var buf bytes.Buffer
|
2017-04-20 16:10:59 -04:00
|
|
|
fprintStack(&buf, stk)
|
2016-10-31 15:30:59 +00:00
|
|
|
t.Fatalf("found profile entry for runtime.gogo:\n%s", buf.String())
|
|
|
|
|
}
|
|
|
|
|
})
|
2013-09-13 14:19:23 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-20 16:10:59 -04:00
|
|
|
func fprintStack(w io.Writer, stk []*profile.Location) {
|
|
|
|
|
for _, loc := range stk {
|
|
|
|
|
fmt.Fprintf(w, " %#x", loc.Address)
|
|
|
|
|
fmt.Fprintf(w, " (")
|
|
|
|
|
for i, line := range loc.Line {
|
|
|
|
|
if i > 0 {
|
|
|
|
|
fmt.Fprintf(w, " ")
|
|
|
|
|
}
|
|
|
|
|
fmt.Fprintf(w, "%s:%d", line.Function.Name, line.Line)
|
|
|
|
|
}
|
|
|
|
|
fmt.Fprintf(w, ")")
|
|
|
|
|
}
|
|
|
|
|
fmt.Fprintf(w, "\n")
|
|
|
|
|
}
|
|
|
|
|
|
cmd/5l, runtime: fix divide for profiling tracebacks on ARM
Two bugs:
1. The first iteration of the traceback always uses LR when provided,
which it is (only) during a profiling signal, but in fact LR is correct
only if the stack frame has not been allocated yet. Otherwise an
intervening call may have changed LR, and the saved copy in the stack
frame should be used. Fix in traceback_arm.c.
2. The division runtime call adds 8 bytes to the stack. In order to
keep the traceback routines happy, it must copy the saved LR into
the new 0(SP). Change
SUB $8, SP
into
MOVW 0(SP), R11 // r11 is temporary, for use by linker
MOVW.W R11, -8(SP)
to update SP and 0(SP) atomically, so that the traceback always
sees a saved LR at 0(SP).
Fixes #6681.
R=golang-dev, r
CC=golang-dev
https://golang.org/cl/19910044
2013-10-31 18:15:55 +00:00
|
|
|
// Test that profiling of division operations is okay, especially on ARM. See issue 6681.
|
|
|
|
|
func TestMathBigDivide(t *testing.T) {
|
2016-01-15 14:23:43 -05:00
|
|
|
testCPUProfile(t, nil, func(duration time.Duration) {
|
2015-12-29 12:33:16 -05:00
|
|
|
t := time.After(duration)
|
cmd/5l, runtime: fix divide for profiling tracebacks on ARM
Two bugs:
1. The first iteration of the traceback always uses LR when provided,
which it is (only) during a profiling signal, but in fact LR is correct
only if the stack frame has not been allocated yet. Otherwise an
intervening call may have changed LR, and the saved copy in the stack
frame should be used. Fix in traceback_arm.c.
2. The division runtime call adds 8 bytes to the stack. In order to
keep the traceback routines happy, it must copy the saved LR into
the new 0(SP). Change
SUB $8, SP
into
MOVW 0(SP), R11 // r11 is temporary, for use by linker
MOVW.W R11, -8(SP)
to update SP and 0(SP) atomically, so that the traceback always
sees a saved LR at 0(SP).
Fixes #6681.
R=golang-dev, r
CC=golang-dev
https://golang.org/cl/19910044
2013-10-31 18:15:55 +00:00
|
|
|
pi := new(big.Int)
|
|
|
|
|
for {
|
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
|
n := big.NewInt(2646693125139304345)
|
|
|
|
|
d := big.NewInt(842468587426513207)
|
|
|
|
|
pi.Div(n, d)
|
|
|
|
|
}
|
|
|
|
|
select {
|
|
|
|
|
case <-t:
|
|
|
|
|
return
|
|
|
|
|
default:
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-11 13:05:51 +04:00
|
|
|
func TestBlockProfile(t *testing.T) {
|
2013-08-14 13:56:01 +04:00
|
|
|
type TestCase struct {
|
|
|
|
|
name string
|
|
|
|
|
f func()
|
2017-09-18 13:54:21 -04:00
|
|
|
stk []string
|
2016-10-31 15:30:59 +00:00
|
|
|
re string
|
2013-08-14 13:56:01 +04:00
|
|
|
}
|
|
|
|
|
tests := [...]TestCase{
|
2017-09-18 13:54:21 -04:00
|
|
|
{
|
|
|
|
|
name: "chan recv",
|
|
|
|
|
f: blockChanRecv,
|
|
|
|
|
stk: []string{
|
|
|
|
|
"runtime.chanrecv1",
|
|
|
|
|
"runtime/pprof.blockChanRecv",
|
|
|
|
|
"runtime/pprof.TestBlockProfile",
|
|
|
|
|
},
|
|
|
|
|
re: `
|
2017-04-19 16:06:46 -04:00
|
|
|
[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
|
2017-04-19 16:11:15 -04:00
|
|
|
# 0x[0-9a-f]+ runtime\.chanrecv1\+0x[0-9a-f]+ .*/src/runtime/chan.go:[0-9]+
|
|
|
|
|
# 0x[0-9a-f]+ runtime/pprof\.blockChanRecv\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
|
|
|
|
|
# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
|
2016-10-31 15:30:59 +00:00
|
|
|
`},
|
2017-09-18 13:54:21 -04:00
|
|
|
{
|
|
|
|
|
name: "chan send",
|
|
|
|
|
f: blockChanSend,
|
|
|
|
|
stk: []string{
|
|
|
|
|
"runtime.chansend1",
|
|
|
|
|
"runtime/pprof.blockChanSend",
|
|
|
|
|
"runtime/pprof.TestBlockProfile",
|
|
|
|
|
},
|
|
|
|
|
re: `
|
2017-04-19 16:06:46 -04:00
|
|
|
[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
|
2017-04-19 16:11:15 -04:00
|
|
|
# 0x[0-9a-f]+ runtime\.chansend1\+0x[0-9a-f]+ .*/src/runtime/chan.go:[0-9]+
|
|
|
|
|
# 0x[0-9a-f]+ runtime/pprof\.blockChanSend\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
|
|
|
|
|
# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
|
2016-10-31 15:30:59 +00:00
|
|
|
`},
|
2017-09-18 13:54:21 -04:00
|
|
|
{
|
|
|
|
|
name: "chan close",
|
|
|
|
|
f: blockChanClose,
|
|
|
|
|
stk: []string{
|
|
|
|
|
"runtime.chanrecv1",
|
|
|
|
|
"runtime/pprof.blockChanClose",
|
|
|
|
|
"runtime/pprof.TestBlockProfile",
|
|
|
|
|
},
|
|
|
|
|
re: `
|
2017-04-19 16:06:46 -04:00
|
|
|
[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
|
2017-04-19 16:11:15 -04:00
|
|
|
# 0x[0-9a-f]+ runtime\.chanrecv1\+0x[0-9a-f]+ .*/src/runtime/chan.go:[0-9]+
|
|
|
|
|
# 0x[0-9a-f]+ runtime/pprof\.blockChanClose\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
|
|
|
|
|
# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
|
2016-10-31 15:30:59 +00:00
|
|
|
`},
|
2017-09-18 13:54:21 -04:00
|
|
|
{
|
|
|
|
|
name: "select recv async",
|
|
|
|
|
f: blockSelectRecvAsync,
|
|
|
|
|
stk: []string{
|
|
|
|
|
"runtime.selectgo",
|
|
|
|
|
"runtime/pprof.blockSelectRecvAsync",
|
|
|
|
|
"runtime/pprof.TestBlockProfile",
|
|
|
|
|
},
|
|
|
|
|
re: `
|
2017-04-19 16:06:46 -04:00
|
|
|
[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
|
2017-04-19 16:11:15 -04:00
|
|
|
# 0x[0-9a-f]+ runtime\.selectgo\+0x[0-9a-f]+ .*/src/runtime/select.go:[0-9]+
|
|
|
|
|
# 0x[0-9a-f]+ runtime/pprof\.blockSelectRecvAsync\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
|
|
|
|
|
# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
|
2016-10-31 15:30:59 +00:00
|
|
|
`},
|
2017-09-18 13:54:21 -04:00
|
|
|
{
|
|
|
|
|
name: "select send sync",
|
|
|
|
|
f: blockSelectSendSync,
|
|
|
|
|
stk: []string{
|
|
|
|
|
"runtime.selectgo",
|
|
|
|
|
"runtime/pprof.blockSelectSendSync",
|
|
|
|
|
"runtime/pprof.TestBlockProfile",
|
|
|
|
|
},
|
|
|
|
|
re: `
|
2017-04-19 16:06:46 -04:00
|
|
|
[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
|
2017-04-19 16:11:15 -04:00
|
|
|
# 0x[0-9a-f]+ runtime\.selectgo\+0x[0-9a-f]+ .*/src/runtime/select.go:[0-9]+
|
|
|
|
|
# 0x[0-9a-f]+ runtime/pprof\.blockSelectSendSync\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
|
|
|
|
|
# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
|
2016-10-31 15:30:59 +00:00
|
|
|
`},
|
2017-09-18 13:54:21 -04:00
|
|
|
{
|
|
|
|
|
name: "mutex",
|
|
|
|
|
f: blockMutex,
|
|
|
|
|
stk: []string{
|
|
|
|
|
"sync.(*Mutex).Lock",
|
|
|
|
|
"runtime/pprof.blockMutex",
|
|
|
|
|
"runtime/pprof.TestBlockProfile",
|
|
|
|
|
},
|
|
|
|
|
re: `
|
2017-04-19 16:06:46 -04:00
|
|
|
[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
|
2017-04-19 16:11:15 -04:00
|
|
|
# 0x[0-9a-f]+ sync\.\(\*Mutex\)\.Lock\+0x[0-9a-f]+ .*/src/sync/mutex\.go:[0-9]+
|
|
|
|
|
# 0x[0-9a-f]+ runtime/pprof\.blockMutex\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
|
|
|
|
|
# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
|
2016-10-31 15:30:59 +00:00
|
|
|
`},
|
2017-09-18 13:54:21 -04:00
|
|
|
{
|
|
|
|
|
name: "cond",
|
|
|
|
|
f: blockCond,
|
|
|
|
|
stk: []string{
|
|
|
|
|
"sync.(*Cond).Wait",
|
|
|
|
|
"runtime/pprof.blockCond",
|
|
|
|
|
"runtime/pprof.TestBlockProfile",
|
|
|
|
|
},
|
|
|
|
|
re: `
|
2017-04-19 16:06:46 -04:00
|
|
|
[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
|
2017-04-19 16:11:15 -04:00
|
|
|
# 0x[0-9a-f]+ sync\.\(\*Cond\)\.Wait\+0x[0-9a-f]+ .*/src/sync/cond\.go:[0-9]+
|
|
|
|
|
# 0x[0-9a-f]+ runtime/pprof\.blockCond\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
|
|
|
|
|
# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
|
2016-10-31 15:30:59 +00:00
|
|
|
`},
|
2013-08-14 13:56:01 +04:00
|
|
|
}
|
|
|
|
|
|
2017-09-18 13:54:21 -04:00
|
|
|
// Generate block profile
|
2013-08-11 13:05:51 +04:00
|
|
|
runtime.SetBlockProfileRate(1)
|
|
|
|
|
defer runtime.SetBlockProfileRate(0)
|
2013-08-14 13:56:01 +04:00
|
|
|
for _, test := range tests {
|
|
|
|
|
test.f()
|
2016-10-31 15:30:59 +00:00
|
|
|
}
|
|
|
|
|
|
2017-09-18 13:54:21 -04:00
|
|
|
t.Run("debug=1", func(t *testing.T) {
|
|
|
|
|
var w bytes.Buffer
|
|
|
|
|
Lookup("block").WriteTo(&w, 1)
|
|
|
|
|
prof := w.String()
|
|
|
|
|
|
|
|
|
|
if !strings.HasPrefix(prof, "--- contention:\ncycles/second=") {
|
|
|
|
|
t.Fatalf("Bad profile header:\n%v", prof)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if strings.HasSuffix(prof, "#\t0x0\n\n") {
|
|
|
|
|
t.Errorf("Useless 0 suffix:\n%v", prof)
|
|
|
|
|
}
|
2016-10-31 15:30:59 +00:00
|
|
|
|
2017-09-18 13:54:21 -04:00
|
|
|
for _, test := range tests {
|
|
|
|
|
if !regexp.MustCompile(strings.Replace(test.re, "\t", "\t+", -1)).MatchString(prof) {
|
|
|
|
|
t.Errorf("Bad %v entry, expect:\n%v\ngot:\n%v", test.name, test.re, prof)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
t.Run("proto", func(t *testing.T) {
|
|
|
|
|
// proto format
|
|
|
|
|
var w bytes.Buffer
|
|
|
|
|
Lookup("block").WriteTo(&w, 0)
|
|
|
|
|
p, err := profile.Parse(&w)
|
|
|
|
|
if err != nil {
|
|
|
|
|
t.Fatalf("failed to parse profile: %v", err)
|
|
|
|
|
}
|
|
|
|
|
t.Logf("parsed proto: %s", p)
|
|
|
|
|
if err := p.CheckValid(); err != nil {
|
|
|
|
|
t.Fatalf("invalid profile: %v", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
stks := stacks(p)
|
|
|
|
|
for _, test := range tests {
|
|
|
|
|
if !containsStack(stks, test.stk) {
|
|
|
|
|
t.Errorf("No matching stack entry for %v, want %+v", test.name, test.stk)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func stacks(p *profile.Profile) (res [][]string) {
|
|
|
|
|
for _, s := range p.Sample {
|
|
|
|
|
var stk []string
|
|
|
|
|
for _, l := range s.Location {
|
|
|
|
|
for _, line := range l.Line {
|
|
|
|
|
stk = append(stk, line.Function.Name)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
res = append(res, stk)
|
2016-10-31 15:30:59 +00:00
|
|
|
}
|
2017-09-18 13:54:21 -04:00
|
|
|
return res
|
|
|
|
|
}
|
2016-10-31 15:30:59 +00:00
|
|
|
|
2017-09-18 13:54:21 -04:00
|
|
|
func containsStack(got [][]string, want []string) bool {
|
|
|
|
|
for _, stk := range got {
|
|
|
|
|
if len(stk) < len(want) {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
for i, f := range want {
|
|
|
|
|
if f != stk[i] {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
if i == len(want)-1 {
|
|
|
|
|
return true
|
|
|
|
|
}
|
2016-10-31 15:30:59 +00:00
|
|
|
}
|
2013-08-11 13:05:51 +04:00
|
|
|
}
|
2017-09-18 13:54:21 -04:00
|
|
|
return false
|
2013-08-11 13:05:51 +04:00
|
|
|
}
|
|
|
|
|
|
2013-08-14 13:56:01 +04:00
|
|
|
const blockDelay = 10 * time.Millisecond
|
|
|
|
|
|
|
|
|
|
func blockChanRecv() {
|
2013-08-11 13:05:51 +04:00
|
|
|
c := make(chan bool)
|
|
|
|
|
go func() {
|
2013-08-14 13:56:01 +04:00
|
|
|
time.Sleep(blockDelay)
|
2013-08-11 13:05:51 +04:00
|
|
|
c <- true
|
|
|
|
|
}()
|
|
|
|
|
<-c
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-14 13:56:01 +04:00
|
|
|
func blockChanSend() {
|
|
|
|
|
c := make(chan bool)
|
|
|
|
|
go func() {
|
|
|
|
|
time.Sleep(blockDelay)
|
|
|
|
|
<-c
|
|
|
|
|
}()
|
|
|
|
|
c <- true
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func blockChanClose() {
|
|
|
|
|
c := make(chan bool)
|
|
|
|
|
go func() {
|
|
|
|
|
time.Sleep(blockDelay)
|
|
|
|
|
close(c)
|
|
|
|
|
}()
|
|
|
|
|
<-c
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func blockSelectRecvAsync() {
|
2016-04-06 18:58:22 +01:00
|
|
|
const numTries = 3
|
2013-08-14 13:56:01 +04:00
|
|
|
c := make(chan bool, 1)
|
|
|
|
|
c2 := make(chan bool, 1)
|
|
|
|
|
go func() {
|
2016-04-06 18:58:22 +01:00
|
|
|
for i := 0; i < numTries; i++ {
|
|
|
|
|
time.Sleep(blockDelay)
|
|
|
|
|
c <- true
|
|
|
|
|
}
|
2013-08-14 13:56:01 +04:00
|
|
|
}()
|
2016-04-06 18:58:22 +01:00
|
|
|
for i := 0; i < numTries; i++ {
|
|
|
|
|
select {
|
|
|
|
|
case <-c:
|
|
|
|
|
case <-c2:
|
|
|
|
|
}
|
2013-08-14 13:56:01 +04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func blockSelectSendSync() {
|
|
|
|
|
c := make(chan bool)
|
|
|
|
|
c2 := make(chan bool)
|
|
|
|
|
go func() {
|
|
|
|
|
time.Sleep(blockDelay)
|
|
|
|
|
<-c
|
|
|
|
|
}()
|
|
|
|
|
select {
|
|
|
|
|
case c <- true:
|
|
|
|
|
case c2 <- true:
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func blockMutex() {
|
2013-08-11 13:05:51 +04:00
|
|
|
var mu sync.Mutex
|
|
|
|
|
mu.Lock()
|
|
|
|
|
go func() {
|
2013-08-14 13:56:01 +04:00
|
|
|
time.Sleep(blockDelay)
|
2013-08-11 13:05:51 +04:00
|
|
|
mu.Unlock()
|
|
|
|
|
}()
|
2017-06-14 11:07:24 -04:00
|
|
|
// Note: Unlock releases mu before recording the mutex event,
|
|
|
|
|
// so it's theoretically possible for this to proceed and
|
|
|
|
|
// capture the profile before the event is recorded. As long
|
|
|
|
|
// as this is blocked before the unlock happens, it's okay.
|
2013-08-11 13:05:51 +04:00
|
|
|
mu.Lock()
|
|
|
|
|
}
|
2014-08-25 11:56:25 +04:00
|
|
|
|
|
|
|
|
func blockCond() {
|
|
|
|
|
var mu sync.Mutex
|
|
|
|
|
c := sync.NewCond(&mu)
|
|
|
|
|
mu.Lock()
|
|
|
|
|
go func() {
|
|
|
|
|
time.Sleep(blockDelay)
|
|
|
|
|
mu.Lock()
|
|
|
|
|
c.Signal()
|
|
|
|
|
mu.Unlock()
|
|
|
|
|
}()
|
|
|
|
|
c.Wait()
|
|
|
|
|
mu.Unlock()
|
|
|
|
|
}
|
2016-03-02 12:32:38 -05:00
|
|
|
|
2016-09-22 09:48:30 -04:00
|
|
|
func TestMutexProfile(t *testing.T) {
|
2017-09-18 13:54:21 -04:00
|
|
|
// Generate mutex profile
|
|
|
|
|
|
2016-09-22 09:48:30 -04:00
|
|
|
old := runtime.SetMutexProfileFraction(1)
|
|
|
|
|
defer runtime.SetMutexProfileFraction(old)
|
|
|
|
|
if old != 0 {
|
|
|
|
|
t.Fatalf("need MutexProfileRate 0, got %d", old)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
blockMutex()
|
|
|
|
|
|
2017-09-18 13:54:21 -04:00
|
|
|
t.Run("debug=1", func(t *testing.T) {
|
|
|
|
|
var w bytes.Buffer
|
|
|
|
|
Lookup("mutex").WriteTo(&w, 1)
|
|
|
|
|
prof := w.String()
|
|
|
|
|
t.Logf("received profile: %v", prof)
|
2016-09-22 09:48:30 -04:00
|
|
|
|
2017-09-18 13:54:21 -04:00
|
|
|
if !strings.HasPrefix(prof, "--- mutex:\ncycles/second=") {
|
|
|
|
|
t.Errorf("Bad profile header:\n%v", prof)
|
|
|
|
|
}
|
|
|
|
|
prof = strings.Trim(prof, "\n")
|
|
|
|
|
lines := strings.Split(prof, "\n")
|
|
|
|
|
if len(lines) != 6 {
|
|
|
|
|
t.Errorf("expected 6 lines, got %d %q\n%s", len(lines), prof, prof)
|
|
|
|
|
}
|
|
|
|
|
if len(lines) < 6 {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
// checking that the line is like "35258904 1 @ 0x48288d 0x47cd28 0x458931"
|
|
|
|
|
r2 := `^\d+ 1 @(?: 0x[[:xdigit:]]+)+`
|
|
|
|
|
//r2 := "^[0-9]+ 1 @ 0x[0-9a-f x]+$"
|
|
|
|
|
if ok, err := regexp.MatchString(r2, lines[3]); err != nil || !ok {
|
|
|
|
|
t.Errorf("%q didn't match %q", lines[3], r2)
|
|
|
|
|
}
|
|
|
|
|
r3 := "^#.*runtime/pprof.blockMutex.*$"
|
|
|
|
|
if ok, err := regexp.MatchString(r3, lines[5]); err != nil || !ok {
|
|
|
|
|
t.Errorf("%q didn't match %q", lines[5], r3)
|
|
|
|
|
}
|
|
|
|
|
t.Logf(prof)
|
|
|
|
|
})
|
|
|
|
|
t.Run("proto", func(t *testing.T) {
|
|
|
|
|
// proto format
|
|
|
|
|
var w bytes.Buffer
|
|
|
|
|
Lookup("mutex").WriteTo(&w, 0)
|
|
|
|
|
p, err := profile.Parse(&w)
|
|
|
|
|
if err != nil {
|
|
|
|
|
t.Fatalf("failed to parse profile: %v", err)
|
|
|
|
|
}
|
|
|
|
|
t.Logf("parsed proto: %s", p)
|
|
|
|
|
if err := p.CheckValid(); err != nil {
|
|
|
|
|
t.Fatalf("invalid profile: %v", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
stks := stacks(p)
|
|
|
|
|
for _, want := range [][]string{
|
|
|
|
|
{"sync.(*Mutex).Unlock", "runtime/pprof.blockMutex.func1"},
|
|
|
|
|
} {
|
|
|
|
|
if !containsStack(stks, want) {
|
|
|
|
|
t.Errorf("No matching stack entry for %+v", want)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
})
|
2016-09-22 09:48:30 -04:00
|
|
|
}
|
|
|
|
|
|
2016-03-02 12:32:38 -05:00
|
|
|
func func1(c chan int) { <-c }
|
|
|
|
|
func func2(c chan int) { <-c }
|
|
|
|
|
func func3(c chan int) { <-c }
|
|
|
|
|
func func4(c chan int) { <-c }
|
|
|
|
|
|
|
|
|
|
func TestGoroutineCounts(t *testing.T) {
|
2017-05-17 19:37:33 +02:00
|
|
|
// Setting GOMAXPROCS to 1 ensures we can force all goroutines to the
|
|
|
|
|
// desired blocking point.
|
|
|
|
|
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
|
|
|
|
|
|
2016-03-02 12:32:38 -05:00
|
|
|
c := make(chan int)
|
|
|
|
|
for i := 0; i < 100; i++ {
|
2017-05-09 09:42:16 -04:00
|
|
|
switch {
|
|
|
|
|
case i%10 == 0:
|
2016-03-02 12:32:38 -05:00
|
|
|
go func1(c)
|
2017-05-09 09:42:16 -04:00
|
|
|
case i%2 == 0:
|
2016-03-02 12:32:38 -05:00
|
|
|
go func2(c)
|
2017-05-09 09:42:16 -04:00
|
|
|
default:
|
|
|
|
|
go func3(c)
|
|
|
|
|
}
|
|
|
|
|
// Let goroutines block on channel
|
|
|
|
|
for j := 0; j < 5; j++ {
|
|
|
|
|
runtime.Gosched()
|
2016-03-02 12:32:38 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-31 15:30:59 +00:00
|
|
|
var w bytes.Buffer
|
2016-11-11 15:01:58 -05:00
|
|
|
goroutineProf := Lookup("goroutine")
|
|
|
|
|
|
|
|
|
|
// Check debug profile
|
|
|
|
|
goroutineProf.WriteTo(&w, 1)
|
2016-10-31 15:30:59 +00:00
|
|
|
prof := w.String()
|
|
|
|
|
|
|
|
|
|
if !containsInOrder(prof, "\n50 @ ", "\n40 @", "\n10 @", "\n1 @") {
|
|
|
|
|
t.Errorf("expected sorted goroutine counts:\n%s", prof)
|
|
|
|
|
}
|
2016-03-02 12:32:38 -05:00
|
|
|
|
2016-11-11 15:01:58 -05:00
|
|
|
// Check proto profile
|
|
|
|
|
w.Reset()
|
|
|
|
|
goroutineProf.WriteTo(&w, 0)
|
|
|
|
|
p, err := profile.Parse(&w)
|
|
|
|
|
if err != nil {
|
|
|
|
|
t.Errorf("error parsing protobuf profile: %v", err)
|
|
|
|
|
}
|
|
|
|
|
if err := p.CheckValid(); err != nil {
|
|
|
|
|
t.Errorf("protobuf profile is invalid: %v", err)
|
|
|
|
|
}
|
|
|
|
|
if !containsCounts(p, []int64{50, 40, 10, 1}) {
|
|
|
|
|
t.Errorf("expected count profile to contain goroutines with counts %v, got %v",
|
|
|
|
|
[]int64{50, 40, 10, 1}, p)
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-02 12:32:38 -05:00
|
|
|
close(c)
|
|
|
|
|
|
|
|
|
|
time.Sleep(10 * time.Millisecond) // let goroutines exit
|
|
|
|
|
}
|
2016-10-31 15:30:59 +00:00
|
|
|
|
|
|
|
|
func containsInOrder(s string, all ...string) bool {
|
|
|
|
|
for _, t := range all {
|
|
|
|
|
i := strings.Index(s, t)
|
|
|
|
|
if i < 0 {
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
s = s[i+len(t):]
|
|
|
|
|
}
|
|
|
|
|
return true
|
|
|
|
|
}
|
2016-11-11 15:01:58 -05:00
|
|
|
|
|
|
|
|
func containsCounts(prof *profile.Profile, counts []int64) bool {
|
|
|
|
|
m := make(map[int64]int)
|
|
|
|
|
for _, c := range counts {
|
|
|
|
|
m[c]++
|
|
|
|
|
}
|
|
|
|
|
for _, s := range prof.Sample {
|
|
|
|
|
// The count is the single value in the sample
|
|
|
|
|
if len(s.Value) != 1 {
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
m[s.Value[0]]--
|
|
|
|
|
}
|
|
|
|
|
for _, n := range m {
|
|
|
|
|
if n > 0 {
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return true
|
|
|
|
|
}
|
2017-02-11 14:44:16 -08:00
|
|
|
|
|
|
|
|
// Issue 18836.
|
|
|
|
|
func TestEmptyCallStack(t *testing.T) {
|
|
|
|
|
t.Parallel()
|
|
|
|
|
var buf bytes.Buffer
|
|
|
|
|
p := NewProfile("test18836")
|
|
|
|
|
p.Add("foo", 47674)
|
|
|
|
|
p.WriteTo(&buf, 1)
|
|
|
|
|
p.Remove("foo")
|
|
|
|
|
got := buf.String()
|
|
|
|
|
prefix := "test18836 profile: total 1\n"
|
|
|
|
|
if !strings.HasPrefix(got, prefix) {
|
|
|
|
|
t.Fatalf("got:\n\t%q\nwant prefix:\n\t%q\n", got, prefix)
|
|
|
|
|
}
|
|
|
|
|
lostevent := "lostProfileEvent"
|
|
|
|
|
if !strings.Contains(got, lostevent) {
|
|
|
|
|
t.Fatalf("got:\n\t%q\ndoes not contain:\n\t%q\n", got, lostevent)
|
|
|
|
|
}
|
|
|
|
|
}
|
2017-04-05 13:50:52 -04:00
|
|
|
|
|
|
|
|
func TestCPUProfileLabel(t *testing.T) {
|
|
|
|
|
testCPUProfile(t, []string{"runtime/pprof.cpuHogger;key=value"}, func(dur time.Duration) {
|
|
|
|
|
Do(context.Background(), Labels("key", "value"), func(context.Context) {
|
2017-11-06 15:33:31 -05:00
|
|
|
cpuHogger(cpuHog1, &salt1, dur)
|
2017-04-05 13:50:52 -04:00
|
|
|
})
|
|
|
|
|
})
|
|
|
|
|
}
|
2017-05-04 16:45:29 +02:00
|
|
|
|
runtime: fix false positive race in profile label reading
Because profile labels are copied from the goroutine into the tag
buffer by the signal handler, there's a carefully-crafted set of race
detector annotations to create the necessary happens-before edges
between setting a goroutine's profile label and retrieving it from the
profile tag buffer.
Given the constraints of the signal handler, we have to approximate
the true synchronization behavior. Currently, that approximation is
too weak.
Ideally, runtime_setProfLabel would perform a store-release on
&getg().labels and copying each label into the profile would perform a
load-acquire on &getg().labels. This would create the necessary
happens-before edges through each individual g.labels object.
Since we can't do this in the signal handler, we instead synchronize
on a "labelSync" global. The problem occurs with the following
sequence:
1. Goroutine 1 calls setProfLabel, which does a store-release on
labelSync.
2. Goroutine 2 calls setProfLabel, which does a store-release on
labelSync.
3. Goroutine 3 reads the profile, which does a load-acquire on
labelSync.
The problem is that the load-acquire only synchronizes with the *most
recent* store-release to labelSync, and the two store-releases don't
synchronize with each other. So, once goroutine 3 touches the label
set by goroutine 1, we report a race.
The solution is to use racereleasemerge. This is like a
read-modify-write, rather than just a store-release. Each RMW of
labelSync in runtime_setProfLabel synchronizes with the previous RMW
of labelSync, and this ultimately carries forward to the load-acquire,
so it synchronizes with *all* setProfLabel operations, not just the
most recent.
Change-Id: Iab58329b156122002fff12cfe64fbeacb31c9613
Reviewed-on: https://go-review.googlesource.com/56670
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Russ Cox <rsc@golang.org>
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
2017-08-17 18:40:07 -04:00
|
|
|
func TestLabelRace(t *testing.T) {
|
|
|
|
|
// Test the race detector annotations for synchronization
|
|
|
|
|
// between settings labels and consuming them from the
|
|
|
|
|
// profile.
|
|
|
|
|
testCPUProfile(t, []string{"runtime/pprof.cpuHogger;key=value"}, func(dur time.Duration) {
|
|
|
|
|
start := time.Now()
|
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
|
for time.Since(start) < dur {
|
2017-11-06 15:33:31 -05:00
|
|
|
var salts [10]int
|
runtime: fix false positive race in profile label reading
Because profile labels are copied from the goroutine into the tag
buffer by the signal handler, there's a carefully-crafted set of race
detector annotations to create the necessary happens-before edges
between setting a goroutine's profile label and retrieving it from the
profile tag buffer.
Given the constraints of the signal handler, we have to approximate
the true synchronization behavior. Currently, that approximation is
too weak.
Ideally, runtime_setProfLabel would perform a store-release on
&getg().labels and copying each label into the profile would perform a
load-acquire on &getg().labels. This would create the necessary
happens-before edges through each individual g.labels object.
Since we can't do this in the signal handler, we instead synchronize
on a "labelSync" global. The problem occurs with the following
sequence:
1. Goroutine 1 calls setProfLabel, which does a store-release on
labelSync.
2. Goroutine 2 calls setProfLabel, which does a store-release on
labelSync.
3. Goroutine 3 reads the profile, which does a load-acquire on
labelSync.
The problem is that the load-acquire only synchronizes with the *most
recent* store-release to labelSync, and the two store-releases don't
synchronize with each other. So, once goroutine 3 touches the label
set by goroutine 1, we report a race.
The solution is to use racereleasemerge. This is like a
read-modify-write, rather than just a store-release. Each RMW of
labelSync in runtime_setProfLabel synchronizes with the previous RMW
of labelSync, and this ultimately carries forward to the load-acquire,
so it synchronizes with *all* setProfLabel operations, not just the
most recent.
Change-Id: Iab58329b156122002fff12cfe64fbeacb31c9613
Reviewed-on: https://go-review.googlesource.com/56670
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Russ Cox <rsc@golang.org>
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
2017-08-17 18:40:07 -04:00
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
|
wg.Add(1)
|
2017-11-06 15:33:31 -05:00
|
|
|
go func(j int) {
|
runtime: fix false positive race in profile label reading
Because profile labels are copied from the goroutine into the tag
buffer by the signal handler, there's a carefully-crafted set of race
detector annotations to create the necessary happens-before edges
between setting a goroutine's profile label and retrieving it from the
profile tag buffer.
Given the constraints of the signal handler, we have to approximate
the true synchronization behavior. Currently, that approximation is
too weak.
Ideally, runtime_setProfLabel would perform a store-release on
&getg().labels and copying each label into the profile would perform a
load-acquire on &getg().labels. This would create the necessary
happens-before edges through each individual g.labels object.
Since we can't do this in the signal handler, we instead synchronize
on a "labelSync" global. The problem occurs with the following
sequence:
1. Goroutine 1 calls setProfLabel, which does a store-release on
labelSync.
2. Goroutine 2 calls setProfLabel, which does a store-release on
labelSync.
3. Goroutine 3 reads the profile, which does a load-acquire on
labelSync.
The problem is that the load-acquire only synchronizes with the *most
recent* store-release to labelSync, and the two store-releases don't
synchronize with each other. So, once goroutine 3 touches the label
set by goroutine 1, we report a race.
The solution is to use racereleasemerge. This is like a
read-modify-write, rather than just a store-release. Each RMW of
labelSync in runtime_setProfLabel synchronizes with the previous RMW
of labelSync, and this ultimately carries forward to the load-acquire,
so it synchronizes with *all* setProfLabel operations, not just the
most recent.
Change-Id: Iab58329b156122002fff12cfe64fbeacb31c9613
Reviewed-on: https://go-review.googlesource.com/56670
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Russ Cox <rsc@golang.org>
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
2017-08-17 18:40:07 -04:00
|
|
|
Do(context.Background(), Labels("key", "value"), func(context.Context) {
|
2017-11-06 15:33:31 -05:00
|
|
|
cpuHogger(cpuHog1, &salts[j], time.Millisecond)
|
runtime: fix false positive race in profile label reading
Because profile labels are copied from the goroutine into the tag
buffer by the signal handler, there's a carefully-crafted set of race
detector annotations to create the necessary happens-before edges
between setting a goroutine's profile label and retrieving it from the
profile tag buffer.
Given the constraints of the signal handler, we have to approximate
the true synchronization behavior. Currently, that approximation is
too weak.
Ideally, runtime_setProfLabel would perform a store-release on
&getg().labels and copying each label into the profile would perform a
load-acquire on &getg().labels. This would create the necessary
happens-before edges through each individual g.labels object.
Since we can't do this in the signal handler, we instead synchronize
on a "labelSync" global. The problem occurs with the following
sequence:
1. Goroutine 1 calls setProfLabel, which does a store-release on
labelSync.
2. Goroutine 2 calls setProfLabel, which does a store-release on
labelSync.
3. Goroutine 3 reads the profile, which does a load-acquire on
labelSync.
The problem is that the load-acquire only synchronizes with the *most
recent* store-release to labelSync, and the two store-releases don't
synchronize with each other. So, once goroutine 3 touches the label
set by goroutine 1, we report a race.
The solution is to use racereleasemerge. This is like a
read-modify-write, rather than just a store-release. Each RMW of
labelSync in runtime_setProfLabel synchronizes with the previous RMW
of labelSync, and this ultimately carries forward to the load-acquire,
so it synchronizes with *all* setProfLabel operations, not just the
most recent.
Change-Id: Iab58329b156122002fff12cfe64fbeacb31c9613
Reviewed-on: https://go-review.googlesource.com/56670
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Russ Cox <rsc@golang.org>
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
2017-08-17 18:40:07 -04:00
|
|
|
})
|
|
|
|
|
wg.Done()
|
2017-11-06 15:33:31 -05:00
|
|
|
}(i)
|
runtime: fix false positive race in profile label reading
Because profile labels are copied from the goroutine into the tag
buffer by the signal handler, there's a carefully-crafted set of race
detector annotations to create the necessary happens-before edges
between setting a goroutine's profile label and retrieving it from the
profile tag buffer.
Given the constraints of the signal handler, we have to approximate
the true synchronization behavior. Currently, that approximation is
too weak.
Ideally, runtime_setProfLabel would perform a store-release on
&getg().labels and copying each label into the profile would perform a
load-acquire on &getg().labels. This would create the necessary
happens-before edges through each individual g.labels object.
Since we can't do this in the signal handler, we instead synchronize
on a "labelSync" global. The problem occurs with the following
sequence:
1. Goroutine 1 calls setProfLabel, which does a store-release on
labelSync.
2. Goroutine 2 calls setProfLabel, which does a store-release on
labelSync.
3. Goroutine 3 reads the profile, which does a load-acquire on
labelSync.
The problem is that the load-acquire only synchronizes with the *most
recent* store-release to labelSync, and the two store-releases don't
synchronize with each other. So, once goroutine 3 touches the label
set by goroutine 1, we report a race.
The solution is to use racereleasemerge. This is like a
read-modify-write, rather than just a store-release. Each RMW of
labelSync in runtime_setProfLabel synchronizes with the previous RMW
of labelSync, and this ultimately carries forward to the load-acquire,
so it synchronizes with *all* setProfLabel operations, not just the
most recent.
Change-Id: Iab58329b156122002fff12cfe64fbeacb31c9613
Reviewed-on: https://go-review.googlesource.com/56670
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Russ Cox <rsc@golang.org>
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
2017-08-17 18:40:07 -04:00
|
|
|
}
|
|
|
|
|
wg.Wait()
|
|
|
|
|
}
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
2017-05-04 16:45:29 +02:00
|
|
|
// Check that there is no deadlock when the program receives SIGPROF while in
|
|
|
|
|
// 64bit atomics' critical section. Used to happen on mips{,le}. See #20146.
|
|
|
|
|
func TestAtomicLoadStore64(t *testing.T) {
|
|
|
|
|
f, err := ioutil.TempFile("", "profatomic")
|
|
|
|
|
if err != nil {
|
|
|
|
|
t.Fatalf("TempFile: %v", err)
|
|
|
|
|
}
|
|
|
|
|
defer os.Remove(f.Name())
|
|
|
|
|
defer f.Close()
|
|
|
|
|
|
|
|
|
|
if err := StartCPUProfile(f); err != nil {
|
|
|
|
|
t.Fatal(err)
|
|
|
|
|
}
|
|
|
|
|
defer StopCPUProfile()
|
|
|
|
|
|
|
|
|
|
var flag uint64
|
|
|
|
|
done := make(chan bool, 1)
|
|
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
|
for atomic.LoadUint64(&flag) == 0 {
|
|
|
|
|
runtime.Gosched()
|
|
|
|
|
}
|
|
|
|
|
done <- true
|
|
|
|
|
}()
|
|
|
|
|
time.Sleep(50 * time.Millisecond)
|
|
|
|
|
atomic.StoreUint64(&flag, 1)
|
|
|
|
|
<-done
|
|
|
|
|
}
|