mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime: atomically set span state and use as publication barrier
When everything is working correctly, any pointer the garbage collector encounters can only point into a fully initialized heap span, since the span must have been initialized before that pointer could escape the heap allocator and become visible to the GC. However, in various cases, we try to be defensive against bad pointers. In findObject, this is just a sanity check: we never expect to find a bad pointer, but programming errors can lead to them. In spanOfHeap, we don't necessarily trust the pointer and we're trying to check if it really does point to the heap, though it should always point to something. Conservative scanning takes this to a new level, since it can only guess that a word may be a pointer and verify this. In all of these cases, we have a problem that the span lookup and check can race with span initialization, since the span becomes visible to lookups before it's fully initialized. Furthermore, we're about to start initializing the span without the heap lock held, which is going to introduce races where accesses were previously protected by the heap lock. To address this, this CL makes accesses to mspan.state atomic, and ensures that the span is fully initialized before setting the state to mSpanInUse. All loads are now atomic, and in any case where we don't trust the pointer, it first atomically loads the span state and checks that it's mSpanInUse, after which it will have synchronized with span initialization and can safely check the other span fields. For #10958, #24543, but a good fix in general. Change-Id: I518b7c63555b02064b98aa5f802c92b758fef853 Reviewed-on: https://go-review.googlesource.com/c/go/+/203286 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Michael Knyszek <mknyszek@google.com>
This commit is contained in:
parent
a9b37ae026
commit
7de15e362b
9 changed files with 104 additions and 57 deletions
|
|
@ -305,6 +305,14 @@ type arenaHint struct {
|
|||
// * During GC (gcphase != _GCoff), a span *must not* transition from
|
||||
// manual or in-use to free. Because concurrent GC may read a pointer
|
||||
// and then look up its span, the span state must be monotonic.
|
||||
//
|
||||
// Setting mspan.state to mSpanInUse or mSpanManual must be done
|
||||
// atomically and only after all other span fields are valid.
|
||||
// Likewise, if inspecting a span is contingent on it being
|
||||
// mSpanInUse, the state should be loaded atomically and checked
|
||||
// before depending on other fields. This allows the garbage collector
|
||||
// to safely deal with potentially invalid pointers, since resolving
|
||||
// such pointers may race with a span being allocated.
|
||||
type mSpanState uint8
|
||||
|
||||
const (
|
||||
|
|
@ -323,6 +331,21 @@ var mSpanStateNames = []string{
|
|||
"mSpanFree",
|
||||
}
|
||||
|
||||
// mSpanStateBox holds an mSpanState and provides atomic operations on
|
||||
// it. This is a separate type to disallow accidental comparison or
|
||||
// assignment with mSpanState.
|
||||
type mSpanStateBox struct {
|
||||
s mSpanState
|
||||
}
|
||||
|
||||
func (b *mSpanStateBox) set(s mSpanState) {
|
||||
atomic.Store8((*uint8)(&b.s), uint8(s))
|
||||
}
|
||||
|
||||
func (b *mSpanStateBox) get() mSpanState {
|
||||
return mSpanState(atomic.Load8((*uint8)(&b.s)))
|
||||
}
|
||||
|
||||
// mSpanList heads a linked list of spans.
|
||||
//
|
||||
//go:notinheap
|
||||
|
|
@ -404,19 +427,19 @@ type mspan struct {
|
|||
// h->sweepgen is incremented by 2 after every GC
|
||||
|
||||
sweepgen uint32
|
||||
divMul uint16 // for divide by elemsize - divMagic.mul
|
||||
baseMask uint16 // if non-0, elemsize is a power of 2, & this will get object allocation base
|
||||
allocCount uint16 // number of allocated objects
|
||||
spanclass spanClass // size class and noscan (uint8)
|
||||
state mSpanState // mspaninuse etc
|
||||
needzero uint8 // needs to be zeroed before allocation
|
||||
divShift uint8 // for divide by elemsize - divMagic.shift
|
||||
divShift2 uint8 // for divide by elemsize - divMagic.shift2
|
||||
scavenged bool // whether this span has had its pages released to the OS
|
||||
elemsize uintptr // computed from sizeclass or from npages
|
||||
limit uintptr // end of data in span
|
||||
speciallock mutex // guards specials list
|
||||
specials *special // linked list of special records sorted by offset.
|
||||
divMul uint16 // for divide by elemsize - divMagic.mul
|
||||
baseMask uint16 // if non-0, elemsize is a power of 2, & this will get object allocation base
|
||||
allocCount uint16 // number of allocated objects
|
||||
spanclass spanClass // size class and noscan (uint8)
|
||||
state mSpanStateBox // mSpanInUse etc; accessed atomically (get/set methods)
|
||||
needzero uint8 // needs to be zeroed before allocation
|
||||
divShift uint8 // for divide by elemsize - divMagic.shift
|
||||
divShift2 uint8 // for divide by elemsize - divMagic.shift2
|
||||
scavenged bool // whether this span has had its pages released to the OS
|
||||
elemsize uintptr // computed from sizeclass or from npages
|
||||
limit uintptr // end of data in span
|
||||
speciallock mutex // guards specials list
|
||||
specials *special // linked list of special records sorted by offset.
|
||||
}
|
||||
|
||||
func (s *mspan) base() uintptr {
|
||||
|
|
@ -483,7 +506,7 @@ func (h *mheap) coalesce(s *mspan) {
|
|||
// The size is potentially changing so the treap needs to delete adjacent nodes and
|
||||
// insert back as a combined node.
|
||||
h.free.removeSpan(other)
|
||||
other.state = mSpanDead
|
||||
other.state.set(mSpanDead)
|
||||
h.spanalloc.free(unsafe.Pointer(other))
|
||||
}
|
||||
|
||||
|
|
@ -525,7 +548,7 @@ func (h *mheap) coalesce(s *mspan) {
|
|||
|
||||
// Coalesce with earlier, later spans.
|
||||
var hpBefore uintptr
|
||||
if before := spanOf(s.base() - 1); before != nil && before.state == mSpanFree {
|
||||
if before := spanOf(s.base() - 1); before != nil && before.state.get() == mSpanFree {
|
||||
if s.scavenged == before.scavenged {
|
||||
hpBefore = before.hugePages()
|
||||
merge(before, s, before)
|
||||
|
|
@ -536,7 +559,7 @@ func (h *mheap) coalesce(s *mspan) {
|
|||
|
||||
// Now check to see if next (greater addresses) span is free and can be coalesced.
|
||||
var hpAfter uintptr
|
||||
if after := spanOf(s.base() + s.npages*pageSize); after != nil && after.state == mSpanFree {
|
||||
if after := spanOf(s.base() + s.npages*pageSize); after != nil && after.state.get() == mSpanFree {
|
||||
if s.scavenged == after.scavenged {
|
||||
hpAfter = after.hugePages()
|
||||
merge(s, after, after)
|
||||
|
|
@ -733,7 +756,7 @@ func inHeapOrStack(b uintptr) bool {
|
|||
if s == nil || b < s.base() {
|
||||
return false
|
||||
}
|
||||
switch s.state {
|
||||
switch s.state.get() {
|
||||
case mSpanInUse, mSpanManual:
|
||||
return b < s.limit
|
||||
default:
|
||||
|
|
@ -800,9 +823,12 @@ func spanOfUnchecked(p uintptr) *mspan {
|
|||
//go:nosplit
|
||||
func spanOfHeap(p uintptr) *mspan {
|
||||
s := spanOf(p)
|
||||
// If p is not allocated, it may point to a stale span, so we
|
||||
// have to check the span's bounds and state.
|
||||
if s == nil || p < s.base() || p >= s.limit || s.state != mSpanInUse {
|
||||
// s is nil if it's never been allocated. Otherwise, we check
|
||||
// its state first because we don't trust this pointer, so we
|
||||
// have to synchronize with span initialization. Then, it's
|
||||
// still possible we picked up a stale span pointer, so we
|
||||
// have to check the span's bounds.
|
||||
if s == nil || s.state.get() != mSpanInUse || p < s.base() || p >= s.limit {
|
||||
return nil
|
||||
}
|
||||
return s
|
||||
|
|
@ -1042,7 +1068,6 @@ func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan {
|
|||
// able to map interior pointer to containing span.
|
||||
atomic.Store(&s.sweepgen, h.sweepgen)
|
||||
h.sweepSpans[h.sweepgen/2%2].push(s) // Add to swept in-use list.
|
||||
s.state = mSpanInUse
|
||||
s.allocCount = 0
|
||||
s.spanclass = spanclass
|
||||
s.elemsize = elemSize
|
||||
|
|
@ -1066,6 +1091,18 @@ func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan {
|
|||
s.gcmarkBits = gcmarkBits
|
||||
s.allocBits = allocBits
|
||||
|
||||
// Now that the span is filled in, set its state. This
|
||||
// is a publication barrier for the other fields in
|
||||
// the span. While valid pointers into this span
|
||||
// should never be visible until the span is returned,
|
||||
// if the garbage collector finds an invalid pointer,
|
||||
// access to the span may race with initialization of
|
||||
// the span. We resolve this race by atomically
|
||||
// setting the state after the span is fully
|
||||
// initialized, and atomically checking the state in
|
||||
// any situation where a pointer is suspect.
|
||||
s.state.set(mSpanInUse)
|
||||
|
||||
// Mark in-use span in arena page bitmap.
|
||||
arena, pageIdx, pageMask := pageIndexOf(s.base())
|
||||
arena.pageInUse[pageIdx] |= pageMask
|
||||
|
|
@ -1143,13 +1180,13 @@ func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan {
|
|||
lock(&h.lock)
|
||||
s := h.allocSpanLocked(npage, stat)
|
||||
if s != nil {
|
||||
s.state = mSpanManual
|
||||
s.manualFreeList = 0
|
||||
s.allocCount = 0
|
||||
s.spanclass = 0
|
||||
s.nelems = 0
|
||||
s.elemsize = 0
|
||||
s.limit = s.base() + s.npages<<_PageShift
|
||||
s.state.set(mSpanManual) // Publish the span
|
||||
// Manually managed memory doesn't count toward heap_sys.
|
||||
memstats.heap_sys -= uint64(s.npages << _PageShift)
|
||||
}
|
||||
|
|
@ -1201,7 +1238,7 @@ func (h *mheap) allocSpanLocked(npage uintptr, stat *uint64) *mspan {
|
|||
|
||||
HaveSpan:
|
||||
s := t.span()
|
||||
if s.state != mSpanFree {
|
||||
if s.state.get() != mSpanFree {
|
||||
throw("candidate mspan for allocation is not free")
|
||||
}
|
||||
|
||||
|
|
@ -1332,7 +1369,7 @@ func (h *mheap) growAddSpan(v unsafe.Pointer, size uintptr) {
|
|||
s := (*mspan)(h.spanalloc.alloc())
|
||||
s.init(uintptr(v), size/pageSize)
|
||||
h.setSpans(s.base(), s.npages, s)
|
||||
s.state = mSpanFree
|
||||
s.state.set(mSpanFree)
|
||||
// [v, v+size) is always in the Prepared state. The new span
|
||||
// must be marked scavenged so the allocator transitions it to
|
||||
// Ready when allocating from it.
|
||||
|
|
@ -1395,7 +1432,7 @@ func (h *mheap) freeManual(s *mspan, stat *uint64) {
|
|||
}
|
||||
|
||||
func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool) {
|
||||
switch s.state {
|
||||
switch s.state.get() {
|
||||
case mSpanManual:
|
||||
if s.allocCount != 0 {
|
||||
throw("mheap.freeSpanLocked - invalid stack free")
|
||||
|
|
@ -1420,7 +1457,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool) {
|
|||
if acctidle {
|
||||
memstats.heap_idle += uint64(s.npages << _PageShift)
|
||||
}
|
||||
s.state = mSpanFree
|
||||
s.state.set(mSpanFree)
|
||||
|
||||
// Coalesce span with neighbors.
|
||||
h.coalesce(s)
|
||||
|
|
@ -1481,7 +1518,7 @@ func (h *mheap) scavengeSplit(t treapIter, size uintptr) *mspan {
|
|||
h.setSpan(n.base(), n)
|
||||
h.setSpan(n.base()+nbytes-1, n)
|
||||
n.needzero = s.needzero
|
||||
n.state = s.state
|
||||
n.state.set(s.state.get())
|
||||
})
|
||||
return n
|
||||
}
|
||||
|
|
@ -1580,7 +1617,6 @@ func (span *mspan) init(base uintptr, npages uintptr) {
|
|||
span.allocCount = 0
|
||||
span.spanclass = 0
|
||||
span.elemsize = 0
|
||||
span.state = mSpanDead
|
||||
span.scavenged = false
|
||||
span.speciallock.key = 0
|
||||
span.specials = nil
|
||||
|
|
@ -1588,6 +1624,7 @@ func (span *mspan) init(base uintptr, npages uintptr) {
|
|||
span.freeindex = 0
|
||||
span.allocBits = nil
|
||||
span.gcmarkBits = nil
|
||||
span.state.set(mSpanDead)
|
||||
}
|
||||
|
||||
func (span *mspan) inList() bool {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue