mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime: fix false positive race in profile label reading
Because profile labels are copied from the goroutine into the tag buffer by the signal handler, there's a carefully-crafted set of race detector annotations to create the necessary happens-before edges between setting a goroutine's profile label and retrieving it from the profile tag buffer. Given the constraints of the signal handler, we have to approximate the true synchronization behavior. Currently, that approximation is too weak. Ideally, runtime_setProfLabel would perform a store-release on &getg().labels and copying each label into the profile would perform a load-acquire on &getg().labels. This would create the necessary happens-before edges through each individual g.labels object. Since we can't do this in the signal handler, we instead synchronize on a "labelSync" global. The problem occurs with the following sequence: 1. Goroutine 1 calls setProfLabel, which does a store-release on labelSync. 2. Goroutine 2 calls setProfLabel, which does a store-release on labelSync. 3. Goroutine 3 reads the profile, which does a load-acquire on labelSync. The problem is that the load-acquire only synchronizes with the *most recent* store-release to labelSync, and the two store-releases don't synchronize with each other. So, once goroutine 3 touches the label set by goroutine 1, we report a race. The solution is to use racereleasemerge. This is like a read-modify-write, rather than just a store-release. Each RMW of labelSync in runtime_setProfLabel synchronizes with the previous RMW of labelSync, and this ultimately carries forward to the load-acquire, so it synchronizes with *all* setProfLabel operations, not just the most recent. Change-Id: Iab58329b156122002fff12cfe64fbeacb31c9613 Reviewed-on: https://go-review.googlesource.com/56670 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Russ Cox <rsc@golang.org> Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
This commit is contained in:
parent
ac29f4d01c
commit
57584a0ee1
3 changed files with 46 additions and 8 deletions
|
|
@ -26,7 +26,7 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
func cpuHogger(f func(), dur time.Duration) {
|
||||
func cpuHogger(f func() int, dur time.Duration) {
|
||||
// We only need to get one 100 Hz clock tick, so we've got
|
||||
// a large safety buffer.
|
||||
// But do at least 500 iterations (which should take about 100ms),
|
||||
|
|
@ -46,7 +46,7 @@ var (
|
|||
// The actual CPU hogging function.
|
||||
// Must not call other functions nor access heap/globals in the loop,
|
||||
// otherwise under race detector the samples will be in the race runtime.
|
||||
func cpuHog1() {
|
||||
func cpuHog1() int {
|
||||
foo := salt1
|
||||
for i := 0; i < 1e5; i++ {
|
||||
if foo > 0 {
|
||||
|
|
@ -55,10 +55,10 @@ func cpuHog1() {
|
|||
foo *= foo + 1
|
||||
}
|
||||
}
|
||||
salt1 = foo
|
||||
return foo
|
||||
}
|
||||
|
||||
func cpuHog2() {
|
||||
func cpuHog2() int {
|
||||
foo := salt2
|
||||
for i := 0; i < 1e5; i++ {
|
||||
if foo > 0 {
|
||||
|
|
@ -67,7 +67,7 @@ func cpuHog2() {
|
|||
foo *= foo + 2
|
||||
}
|
||||
}
|
||||
salt2 = foo
|
||||
return foo
|
||||
}
|
||||
|
||||
func TestCPUProfile(t *testing.T) {
|
||||
|
|
@ -95,8 +95,9 @@ func TestCPUProfileInlining(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func inlinedCaller() {
|
||||
func inlinedCaller() int {
|
||||
inlinedCallee()
|
||||
return 0
|
||||
}
|
||||
|
||||
func inlinedCallee() {
|
||||
|
|
@ -716,6 +717,28 @@ func TestCPUProfileLabel(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestLabelRace(t *testing.T) {
|
||||
// Test the race detector annotations for synchronization
|
||||
// between settings labels and consuming them from the
|
||||
// profile.
|
||||
testCPUProfile(t, []string{"runtime/pprof.cpuHogger;key=value"}, func(dur time.Duration) {
|
||||
start := time.Now()
|
||||
var wg sync.WaitGroup
|
||||
for time.Since(start) < dur {
|
||||
for i := 0; i < 10; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
Do(context.Background(), Labels("key", "value"), func(context.Context) {
|
||||
cpuHogger(cpuHog1, time.Millisecond)
|
||||
})
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Check that there is no deadlock when the program receives SIGPROF while in
|
||||
// 64bit atomics' critical section. Used to happen on mips{,le}. See #20146.
|
||||
func TestAtomicLoadStore64(t *testing.T) {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue