mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime: fix a few GC-related bugs
1) Move non-preemption check even earlier in newstack. This avoids a few priority inversion problems. 2) Always use atomic operations to update bitmap for 1-word objects. This avoids lost mark bits during concurrent GC. 3) Stop using work.nproc == 1 as a signal for being single-threaded. The concurrent GC runs with work.nproc == 1 but other procs are running mutator code. The use of work.nproc == 1 in getfull *is* safe, but remove it anyway, since it is saving only a single atomic operation per GC round. Fixes #9225. Change-Id: I24134f100ad592ea8cb59efb6a54f5a1311093dc Reviewed-on: https://go-review.googlesource.com/2745 Reviewed-by: Rick Hudson <rlh@golang.org>
This commit is contained in:
parent
b8d67596f6
commit
aae0f074c0
3 changed files with 42 additions and 36 deletions
|
|
@ -249,7 +249,10 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
|
|||
var ptrmask *uint8
|
||||
if size == ptrSize {
|
||||
// It's one word and it has pointers, it must be a pointer.
|
||||
*xbits |= (bitsPointer << 2) << shift
|
||||
// The bitmap byte is shared with the one-word object
|
||||
// next to it, and concurrent GC might be marking that
|
||||
// object, so we must use an atomic update.
|
||||
atomicor8(xbits, (bitsPointer<<2)<<shift)
|
||||
goto marked
|
||||
}
|
||||
if typ.kind&kindGCProg != 0 {
|
||||
|
|
|
|||
|
|
@ -391,15 +391,10 @@ func gcmarknewobject_m(obj uintptr) {
|
|||
}
|
||||
|
||||
// Each byte of GC bitmap holds info for two words.
|
||||
// If the current object is larger than two words, or if the object is one word
|
||||
// but the object it shares the byte with is already marked,
|
||||
// then all the possible concurrent updates are trying to set the same bit,
|
||||
// so we can use a non-atomic update.
|
||||
if mbits.xbits&(bitMask|(bitMask<<gcBits)) != bitBoundary|bitBoundary<<gcBits || work.nproc == 1 {
|
||||
*mbits.bitp = mbits.xbits | bitMarked<<mbits.shift
|
||||
} else {
|
||||
// Might be racing with other updates, so use atomic update always.
|
||||
// We used to be clever here and use a non-atomic update in certain
|
||||
// cases, but it's not worth the risk.
|
||||
atomicor8(mbits.bitp, bitMarked<<mbits.shift)
|
||||
}
|
||||
}
|
||||
|
||||
// obj is the start of an object with mark mbits.
|
||||
|
|
@ -451,16 +446,11 @@ func greyobject(obj uintptr, base, off uintptr, mbits *markbits, wbuf *workbuf)
|
|||
}
|
||||
|
||||
// Each byte of GC bitmap holds info for two words.
|
||||
// If the current object is larger than two words, or if the object is one word
|
||||
// but the object it shares the byte with is already marked,
|
||||
// then all the possible concurrent updates are trying to set the same bit,
|
||||
// so we can use a non-atomic update.
|
||||
if mbits.xbits&(bitMask|bitMask<<gcBits) != bitBoundary|bitBoundary<<gcBits || work.nproc == 1 {
|
||||
*mbits.bitp = mbits.xbits | bitMarked<<mbits.shift
|
||||
} else {
|
||||
// Might be racing with other updates, so use atomic update always.
|
||||
// We used to be clever here and use a non-atomic update in certain
|
||||
// cases, but it's not worth the risk.
|
||||
atomicor8(mbits.bitp, bitMarked<<mbits.shift)
|
||||
}
|
||||
}
|
||||
|
||||
if !checkmark && (mbits.xbits>>(mbits.shift+2))&_BitsMask == _BitsDead {
|
||||
return wbuf // noscan object
|
||||
|
|
@ -865,7 +855,7 @@ func getfull(b *workbuf) *workbuf {
|
|||
if b == nil {
|
||||
b = (*workbuf)(lfstackpop(&work.partial))
|
||||
}
|
||||
if b != nil || work.nproc == 1 {
|
||||
if b != nil {
|
||||
return b
|
||||
}
|
||||
|
||||
|
|
@ -2336,7 +2326,12 @@ func unrollgcproginplace_m(v unsafe.Pointer, typ *_type, size, size0 uintptr) {
|
|||
off := (uintptr(v) - arena_start) / ptrSize
|
||||
bitp := (*byte)(unsafe.Pointer(arena_start - off/wordsPerBitmapByte - 1))
|
||||
shift := (off % wordsPerBitmapByte) * gcBits
|
||||
*bitp |= bitBoundary << shift
|
||||
|
||||
// NOTE(rsc): An argument can be made that unrollgcproginplace
|
||||
// is only used for very large objects, and in particular it is not used
|
||||
// for 1-word objects, so the atomic here is not necessary.
|
||||
// But if that's true, neither is the shift, and yet here it is.
|
||||
atomicor8(bitp, bitBoundary<<shift)
|
||||
|
||||
// Mark word after last as BitsDead.
|
||||
if size0 < size {
|
||||
|
|
|
|||
|
|
@ -634,21 +634,39 @@ func newstack() {
|
|||
throw("runtime: stack split at bad time")
|
||||
}
|
||||
|
||||
// The goroutine must be executing in order to call newstack,
|
||||
// so it must be Grunning or Gscanrunning.
|
||||
|
||||
gp := thisg.m.curg
|
||||
morebuf := thisg.m.morebuf
|
||||
thisg.m.morebuf.pc = 0
|
||||
thisg.m.morebuf.lr = 0
|
||||
thisg.m.morebuf.sp = 0
|
||||
thisg.m.morebuf.g = 0
|
||||
rewindmorestack(&gp.sched)
|
||||
|
||||
// Be conservative about where we preempt.
|
||||
// We are interested in preempting user Go code, not runtime code.
|
||||
// If we're holding locks, mallocing, or GCing, don't preempt.
|
||||
// This check is very early in newstack so that even the status change
|
||||
// from Grunning to Gwaiting and back doesn't happen in this case.
|
||||
// That status change by itself can be viewed as a small preemption,
|
||||
// because the GC might change Gwaiting to Gscanwaiting, and then
|
||||
// this goroutine has to wait for the GC to finish before continuing.
|
||||
// If the GC is in some way dependent on this goroutine (for example,
|
||||
// it needs a lock held by the goroutine), that small preemption turns
|
||||
// into a real deadlock.
|
||||
if gp.stackguard0 == stackPreempt {
|
||||
if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.gcing != 0 || thisg.m.p.status != _Prunning {
|
||||
// Let the goroutine keep running for now.
|
||||
// gp->preempt is set, so it will be preempted next time.
|
||||
gp.stackguard0 = gp.stack.lo + _StackGuard
|
||||
gogo(&gp.sched) // never return
|
||||
}
|
||||
}
|
||||
|
||||
// The goroutine must be executing in order to call newstack,
|
||||
// so it must be Grunning (or Gscanrunning).
|
||||
casgstatus(gp, _Grunning, _Gwaiting)
|
||||
gp.waitreason = "stack growth"
|
||||
|
||||
rewindmorestack(&gp.sched)
|
||||
|
||||
if gp.stack.lo == 0 {
|
||||
throw("missing stack in newstack")
|
||||
}
|
||||
|
|
@ -697,16 +715,6 @@ func newstack() {
|
|||
gogo(&gp.sched) // never return
|
||||
}
|
||||
|
||||
// Be conservative about where we preempt.
|
||||
// We are interested in preempting user Go code, not runtime code.
|
||||
if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.gcing != 0 || thisg.m.p.status != _Prunning {
|
||||
// Let the goroutine keep running for now.
|
||||
// gp->preempt is set, so it will be preempted next time.
|
||||
gp.stackguard0 = gp.stack.lo + _StackGuard
|
||||
casgstatus(gp, _Gwaiting, _Grunning)
|
||||
gogo(&gp.sched) // never return
|
||||
}
|
||||
|
||||
// Act like goroutine called runtime.Gosched.
|
||||
casgstatus(gp, _Gwaiting, _Grunning)
|
||||
gosched_m(gp) // never return
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue