mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime: move internal GC statistics from memstats to gcController
This change moves certain important but internal-only GC statistics from
memstats into gcController. These statistics are mainly used in pacing
the GC, so it makes sense to keep them in the pacer's state.
This CL was mostly generated via
rf '
ex . {
memstats.gc_trigger -> gcController.trigger
memstats.triggerRatio -> gcController.triggerRatio
memstats.heap_marked -> gcController.heapMarked
memstats.heap_live -> gcController.heapLive
memstats.heap_scan -> gcController.heapScan
}
'
except for a few special cases, like updating names in comments and when
these fields are used within gcControllerState methods (at which point
they're accessed through the reciever).
For #44167.
Change-Id: I6bd1602585aeeb80818ded24c07d8e6fec992b93
Reviewed-on: https://go-review.googlesource.com/c/go/+/306598
Trust: Michael Knyszek <mknyszek@google.com>
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Michael Pratt <mpratt@google.com>
This commit is contained in:
parent
8c2a8b1771
commit
f2d5bd1ad3
7 changed files with 146 additions and 138 deletions
|
|
@ -178,9 +178,9 @@ func (c *mcache) refill(spc spanClass) {
|
|||
atomic.Xadduintptr(&stats.smallAllocCount[spc.sizeclass()], uintptr(s.nelems)-uintptr(s.allocCount))
|
||||
memstats.heapStats.release()
|
||||
|
||||
// Update heap_live with the same assumption.
|
||||
// Update gcController.heapLive with the same assumption.
|
||||
usedBytes := uintptr(s.allocCount) * s.elemsize
|
||||
atomic.Xadd64(&memstats.heap_live, int64(s.npages*pageSize)-int64(usedBytes))
|
||||
atomic.Xadd64(&gcController.heapLive, int64(s.npages*pageSize)-int64(usedBytes))
|
||||
|
||||
// Flush tinyAllocs.
|
||||
if spc == tinySpanClass {
|
||||
|
|
@ -190,15 +190,15 @@ func (c *mcache) refill(spc spanClass) {
|
|||
|
||||
// While we're here, flush scanAlloc, since we have to call
|
||||
// revise anyway.
|
||||
atomic.Xadd64(&memstats.heap_scan, int64(c.scanAlloc))
|
||||
atomic.Xadd64(&gcController.heapScan, int64(c.scanAlloc))
|
||||
c.scanAlloc = 0
|
||||
|
||||
if trace.enabled {
|
||||
// heap_live changed.
|
||||
// gcController.heapLive changed.
|
||||
traceHeapAlloc()
|
||||
}
|
||||
if gcBlackenEnabled != 0 {
|
||||
// heap_live and heap_scan changed.
|
||||
// gcController.heapLive and heapScan changed.
|
||||
gcController.revise()
|
||||
}
|
||||
|
||||
|
|
@ -230,10 +230,10 @@ func (c *mcache) allocLarge(size uintptr, needzero bool, noscan bool) *mspan {
|
|||
atomic.Xadduintptr(&stats.largeAllocCount, 1)
|
||||
memstats.heapStats.release()
|
||||
|
||||
// Update heap_live and revise pacing if needed.
|
||||
atomic.Xadd64(&memstats.heap_live, int64(npages*pageSize))
|
||||
// Update gcController.heapLive and revise pacing if needed.
|
||||
atomic.Xadd64(&gcController.heapLive, int64(npages*pageSize))
|
||||
if trace.enabled {
|
||||
// Trace that a heap alloc occurred because heap_live changed.
|
||||
// Trace that a heap alloc occurred because gcController.heapLive changed.
|
||||
traceHeapAlloc()
|
||||
}
|
||||
if gcBlackenEnabled != 0 {
|
||||
|
|
@ -250,7 +250,7 @@ func (c *mcache) allocLarge(size uintptr, needzero bool, noscan bool) *mspan {
|
|||
|
||||
func (c *mcache) releaseAll() {
|
||||
// Take this opportunity to flush scanAlloc.
|
||||
atomic.Xadd64(&memstats.heap_scan, int64(c.scanAlloc))
|
||||
atomic.Xadd64(&gcController.heapScan, int64(c.scanAlloc))
|
||||
c.scanAlloc = 0
|
||||
|
||||
sg := mheap_.sweepgen
|
||||
|
|
@ -263,14 +263,14 @@ func (c *mcache) releaseAll() {
|
|||
atomic.Xadduintptr(&stats.smallAllocCount[spanClass(i).sizeclass()], -n)
|
||||
memstats.heapStats.release()
|
||||
if s.sweepgen != sg+1 {
|
||||
// refill conservatively counted unallocated slots in heap_live.
|
||||
// refill conservatively counted unallocated slots in gcController.heapLive.
|
||||
// Undo this.
|
||||
//
|
||||
// If this span was cached before sweep, then
|
||||
// heap_live was totally recomputed since
|
||||
// gcController.heapLive was totally recomputed since
|
||||
// caching this span, so we don't do this for
|
||||
// stale spans.
|
||||
atomic.Xadd64(&memstats.heap_live, -int64(n)*int64(s.elemsize))
|
||||
atomic.Xadd64(&gcController.heapLive, -int64(n)*int64(s.elemsize))
|
||||
}
|
||||
// Release the span to the mcentral.
|
||||
mheap_.central[i].mcentral.uncacheSpan(s)
|
||||
|
|
@ -283,7 +283,7 @@ func (c *mcache) releaseAll() {
|
|||
atomic.Xadd64(&memstats.tinyallocs, int64(c.tinyAllocs))
|
||||
c.tinyAllocs = 0
|
||||
|
||||
// Updated heap_scan and possible heap_live.
|
||||
// Updated heapScan and possible gcController.heapLive.
|
||||
if gcBlackenEnabled != 0 {
|
||||
gcController.revise()
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue