runtime: ensure heap memstats are updated atomically

For the most part, heap memstats are already updated atomically when
passed down to OS-level memory functions (e.g. sysMap). Elsewhere,
however, they're updated with the heap lock.

In order to facilitate holding the heap lock for less time during
allocation paths, this change more consistently makes the update of
these statistics atomic by calling mSysStat{Inc,Dec} appropriately
instead of simply adding or subtracting. It also ensures these values
are loaded atomically.

Furthermore, an undocumented but safe update condition for these
memstats is during STW, at which point using atomics is unnecessary.
This change also documents this condition in mstats.go.

Updates #35112.

Change-Id: I87d0b6c27b98c88099acd2563ea23f8da1239b66
Reviewed-on: https://go-review.googlesource.com/c/go/+/196638
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
Michael Anthony Knyszek 2019-09-18 15:03:50 +00:00 committed by Michael Knyszek
parent 814c5058bb
commit ae4534e659
3 changed files with 19 additions and 18 deletions

View file

@ -1003,7 +1003,7 @@ func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan {
s.limit = s.base() + s.npages<<_PageShift
s.state.set(mSpanManual) // Publish the span
// Manually managed memory doesn't count toward heap_sys.
memstats.heap_sys -= uint64(s.npages << _PageShift)
mSysStatDec(&memstats.heap_sys, s.npages*pageSize)
}
// This unlock acts as a release barrier. See mheap.alloc_m.
@ -1113,7 +1113,7 @@ HaveBase:
// sysUsed all the pages that are actually available
// in the span.
sysUsed(unsafe.Pointer(base), npage*pageSize)
memstats.heap_released -= uint64(scav)
mSysStatDec(&memstats.heap_released, scav)
}
s := (*mspan)(h.spanalloc.alloc())
@ -1123,8 +1123,10 @@ HaveBase:
}
h.setSpans(s.base(), npage, s)
*stat += uint64(npage << _PageShift)
memstats.heap_idle -= uint64(npage << _PageShift)
// Update stats.
nbytes := npage * pageSize
mSysStatInc(stat, nbytes)
mSysStatDec(&memstats.heap_idle, nbytes)
return s
}
@ -1172,8 +1174,8 @@ func (h *mheap) grow(npage uintptr) bool {
// The allocation is always aligned to the heap arena
// size which is always > physPageSize, so its safe to
// just add directly to heap_released.
memstats.heap_released += uint64(asize)
memstats.heap_idle += uint64(asize)
mSysStatInc(&memstats.heap_released, asize)
mSysStatInc(&memstats.heap_idle, asize)
// Recalculate nBase
nBase = alignUp(h.curArena.base+ask, physPageSize)
@ -1237,8 +1239,8 @@ func (h *mheap) freeSpan(s *mspan) {
func (h *mheap) freeManual(s *mspan, stat *uint64) {
s.needzero = 1
lock(&h.lock)
*stat -= uint64(s.npages << _PageShift)
memstats.heap_sys += uint64(s.npages << _PageShift)
mSysStatDec(stat, s.npages*pageSize)
mSysStatInc(&memstats.heap_sys, s.npages*pageSize)
h.freeSpanLocked(s, false, true)
unlock(&h.lock)
}
@ -1264,10 +1266,10 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool) {
}
if acctinuse {
memstats.heap_inuse -= uint64(s.npages << _PageShift)
mSysStatDec(&memstats.heap_inuse, s.npages*pageSize)
}
if acctidle {
memstats.heap_idle += uint64(s.npages << _PageShift)
mSysStatInc(&memstats.heap_idle, s.npages*pageSize)
}
// Mark the space as free.