runtime: prevent sigprof during all stack barrier ops

A sigprof during stack barrier insertion or removal can crash if it
detects an inconsistency between the stkbar array and the stack
itself. Currently we protect against this when scanning another G's
stack using stackLock, but we don't protect against it when unwinding
stack barriers for a recover or a memmove to the stack.

This commit cleans up and improves the stack locking code. It
abstracts out the lock and unlock operations. It uses the lock
consistently everywhere we perform stack operations, and pushes the
lock/unlock down closer to where the stack barrier operations happen
to make it more obvious what it's protecting. Finally, it modifies
sigprof so that instead of spinning until it acquires the lock, it
simply doesn't perform a traceback if it can't acquire it. This is
necessary to prevent self-deadlock.

Updates #11863, which introduced stackLock to fix some of these
issues, but didn't go far enough.

Updates #12528.

Change-Id: I9d1fa88ae3744d31ba91500c96c6988ce1a3a349
Reviewed-on: https://go-review.googlesource.com/17036
Reviewed-by: Russ Cox <rsc@golang.org>
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
This commit is contained in:
Austin Clements 2015-11-18 14:10:40 -05:00
parent 3a2fc06833
commit 9c9d74aba7
3 changed files with 47 additions and 12 deletions

View file

@ -735,13 +735,7 @@ func scang(gp *g) {
// the goroutine until we're done.
if castogscanstatus(gp, s, s|_Gscan) {
if !gp.gcscandone {
// Coordinate with traceback
// in sigprof.
for !atomic.Cas(&gp.stackLock, 0, 1) {
osyield()
}
scanstack(gp)
atomic.Store(&gp.stackLock, 0)
gp.gcscandone = true
}
restartg(gp)
@ -2846,11 +2840,6 @@ func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
// Profiling runs concurrently with GC, so it must not allocate.
mp.mallocing++
// Coordinate with stack barrier insertion in scanstack.
for !atomic.Cas(&gp.stackLock, 0, 1) {
osyield()
}
// Define that a "user g" is a user-created goroutine, and a "system g"
// is one that is m->g0 or m->gsignal.
//
@ -2917,8 +2906,18 @@ func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
// transition. We simply require that g and SP match and that the PC is not
// in gogo.
traceback := true
haveStackLock := false
if gp == nil || sp < gp.stack.lo || gp.stack.hi < sp || setsSP(pc) {
traceback = false
} else if gp.m.curg != nil {
if gcTryLockStackBarriers(gp.m.curg) {
haveStackLock = true
} else {
// Stack barriers are being inserted or
// removed, so we can't get a consistent
// traceback right now.
traceback = false
}
}
var stk [maxCPUProfStack]uintptr
n := 0
@ -2961,7 +2960,9 @@ func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
}
}
}
atomic.Store(&gp.stackLock, 0)
if haveStackLock {
gcUnlockStackBarriers(gp.m.curg)
}
if prof.hz != 0 {
// Simple cas-lock to coordinate with setcpuprofilerate.