mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime: ensure heap memstats are updated atomically
For the most part, heap memstats are already updated atomically when
passed down to OS-level memory functions (e.g. sysMap). Elsewhere,
however, they're updated with the heap lock.
In order to facilitate holding the heap lock for less time during
allocation paths, this change more consistently makes the update of
these statistics atomic by calling mSysStat{Inc,Dec} appropriately
instead of simply adding or subtracting. It also ensures these values
are loaded atomically.
Furthermore, an undocumented but safe update condition for these
memstats is during STW, at which point using atomics is unnecessary.
This change also documents this condition in mstats.go.
Updates #35112.
Change-Id: I87d0b6c27b98c88099acd2563ea23f8da1239b66
Reviewed-on: https://go-review.googlesource.com/c/go/+/196638
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
parent
814c5058bb
commit
ae4534e659
3 changed files with 19 additions and 18 deletions
|
|
@ -57,6 +57,7 @@ package runtime
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math/bits"
|
"math/bits"
|
||||||
|
"runtime/internal/atomic"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -78,10 +79,8 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
// heapRetained returns an estimate of the current heap RSS.
|
// heapRetained returns an estimate of the current heap RSS.
|
||||||
//
|
|
||||||
// mheap_.lock must be held or the world must be stopped.
|
|
||||||
func heapRetained() uint64 {
|
func heapRetained() uint64 {
|
||||||
return memstats.heap_sys - memstats.heap_released
|
return atomic.Load64(&memstats.heap_sys) - atomic.Load64(&memstats.heap_released)
|
||||||
}
|
}
|
||||||
|
|
||||||
// gcPaceScavenger updates the scavenger's pacing, particularly
|
// gcPaceScavenger updates the scavenger's pacing, particularly
|
||||||
|
|
@ -489,7 +488,7 @@ func (s *pageAlloc) scavengeRangeLocked(ci chunkIdx, base, npages uint) {
|
||||||
|
|
||||||
// Update global accounting only when not in test, otherwise
|
// Update global accounting only when not in test, otherwise
|
||||||
// the runtime's accounting will be wrong.
|
// the runtime's accounting will be wrong.
|
||||||
memstats.heap_released += uint64(npages) * pageSize
|
mSysStatInc(&memstats.heap_released, uintptr(npages)*pageSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
// fillAligned returns x but with all zeroes in m-aligned
|
// fillAligned returns x but with all zeroes in m-aligned
|
||||||
|
|
|
||||||
|
|
@ -1003,7 +1003,7 @@ func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan {
|
||||||
s.limit = s.base() + s.npages<<_PageShift
|
s.limit = s.base() + s.npages<<_PageShift
|
||||||
s.state.set(mSpanManual) // Publish the span
|
s.state.set(mSpanManual) // Publish the span
|
||||||
// Manually managed memory doesn't count toward heap_sys.
|
// Manually managed memory doesn't count toward heap_sys.
|
||||||
memstats.heap_sys -= uint64(s.npages << _PageShift)
|
mSysStatDec(&memstats.heap_sys, s.npages*pageSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
// This unlock acts as a release barrier. See mheap.alloc_m.
|
// This unlock acts as a release barrier. See mheap.alloc_m.
|
||||||
|
|
@ -1113,7 +1113,7 @@ HaveBase:
|
||||||
// sysUsed all the pages that are actually available
|
// sysUsed all the pages that are actually available
|
||||||
// in the span.
|
// in the span.
|
||||||
sysUsed(unsafe.Pointer(base), npage*pageSize)
|
sysUsed(unsafe.Pointer(base), npage*pageSize)
|
||||||
memstats.heap_released -= uint64(scav)
|
mSysStatDec(&memstats.heap_released, scav)
|
||||||
}
|
}
|
||||||
|
|
||||||
s := (*mspan)(h.spanalloc.alloc())
|
s := (*mspan)(h.spanalloc.alloc())
|
||||||
|
|
@ -1123,8 +1123,10 @@ HaveBase:
|
||||||
}
|
}
|
||||||
h.setSpans(s.base(), npage, s)
|
h.setSpans(s.base(), npage, s)
|
||||||
|
|
||||||
*stat += uint64(npage << _PageShift)
|
// Update stats.
|
||||||
memstats.heap_idle -= uint64(npage << _PageShift)
|
nbytes := npage * pageSize
|
||||||
|
mSysStatInc(stat, nbytes)
|
||||||
|
mSysStatDec(&memstats.heap_idle, nbytes)
|
||||||
|
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
@ -1172,8 +1174,8 @@ func (h *mheap) grow(npage uintptr) bool {
|
||||||
// The allocation is always aligned to the heap arena
|
// The allocation is always aligned to the heap arena
|
||||||
// size which is always > physPageSize, so its safe to
|
// size which is always > physPageSize, so its safe to
|
||||||
// just add directly to heap_released.
|
// just add directly to heap_released.
|
||||||
memstats.heap_released += uint64(asize)
|
mSysStatInc(&memstats.heap_released, asize)
|
||||||
memstats.heap_idle += uint64(asize)
|
mSysStatInc(&memstats.heap_idle, asize)
|
||||||
|
|
||||||
// Recalculate nBase
|
// Recalculate nBase
|
||||||
nBase = alignUp(h.curArena.base+ask, physPageSize)
|
nBase = alignUp(h.curArena.base+ask, physPageSize)
|
||||||
|
|
@ -1237,8 +1239,8 @@ func (h *mheap) freeSpan(s *mspan) {
|
||||||
func (h *mheap) freeManual(s *mspan, stat *uint64) {
|
func (h *mheap) freeManual(s *mspan, stat *uint64) {
|
||||||
s.needzero = 1
|
s.needzero = 1
|
||||||
lock(&h.lock)
|
lock(&h.lock)
|
||||||
*stat -= uint64(s.npages << _PageShift)
|
mSysStatDec(stat, s.npages*pageSize)
|
||||||
memstats.heap_sys += uint64(s.npages << _PageShift)
|
mSysStatInc(&memstats.heap_sys, s.npages*pageSize)
|
||||||
h.freeSpanLocked(s, false, true)
|
h.freeSpanLocked(s, false, true)
|
||||||
unlock(&h.lock)
|
unlock(&h.lock)
|
||||||
}
|
}
|
||||||
|
|
@ -1264,10 +1266,10 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if acctinuse {
|
if acctinuse {
|
||||||
memstats.heap_inuse -= uint64(s.npages << _PageShift)
|
mSysStatDec(&memstats.heap_inuse, s.npages*pageSize)
|
||||||
}
|
}
|
||||||
if acctidle {
|
if acctidle {
|
||||||
memstats.heap_idle += uint64(s.npages << _PageShift)
|
mSysStatInc(&memstats.heap_idle, s.npages*pageSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mark the space as free.
|
// Mark the space as free.
|
||||||
|
|
|
||||||
|
|
@ -31,7 +31,7 @@ type mstats struct {
|
||||||
nfree uint64 // number of frees
|
nfree uint64 // number of frees
|
||||||
|
|
||||||
// Statistics about malloc heap.
|
// Statistics about malloc heap.
|
||||||
// Protected by mheap.lock
|
// Updated atomically, or with the world stopped.
|
||||||
//
|
//
|
||||||
// Like MemStats, heap_sys and heap_inuse do not count memory
|
// Like MemStats, heap_sys and heap_inuse do not count memory
|
||||||
// in manually-managed spans.
|
// in manually-managed spans.
|
||||||
|
|
@ -47,15 +47,15 @@ type mstats struct {
|
||||||
|
|
||||||
// Statistics about allocation of low-level fixed-size structures.
|
// Statistics about allocation of low-level fixed-size structures.
|
||||||
// Protected by FixAlloc locks.
|
// Protected by FixAlloc locks.
|
||||||
stacks_inuse uint64 // bytes in manually-managed stack spans
|
stacks_inuse uint64 // bytes in manually-managed stack spans; updated atomically or during STW
|
||||||
stacks_sys uint64 // only counts newosproc0 stack in mstats; differs from MemStats.StackSys
|
stacks_sys uint64 // only counts newosproc0 stack in mstats; differs from MemStats.StackSys
|
||||||
mspan_inuse uint64 // mspan structures
|
mspan_inuse uint64 // mspan structures
|
||||||
mspan_sys uint64
|
mspan_sys uint64
|
||||||
mcache_inuse uint64 // mcache structures
|
mcache_inuse uint64 // mcache structures
|
||||||
mcache_sys uint64
|
mcache_sys uint64
|
||||||
buckhash_sys uint64 // profiling bucket hash table
|
buckhash_sys uint64 // profiling bucket hash table
|
||||||
gc_sys uint64
|
gc_sys uint64 // updated atomically or during STW
|
||||||
other_sys uint64
|
other_sys uint64 // updated atomically or during STW
|
||||||
|
|
||||||
// Statistics about garbage collector.
|
// Statistics about garbage collector.
|
||||||
// Protected by mheap or stopping the world during GC.
|
// Protected by mheap or stopping the world during GC.
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue