runtime: move heapLive and heapScan updates into a method

This change moves heapLive and heapScan updates on gcController into a
method for better testability. It's also less error-prone because code
that updates these fields needs to remember to emit traces and/or call
gcController.revise; this method now handles those cases.

For #44167.

Change-Id: I3d6f2e7abb22def27c93feacff50162b0b074da2
Reviewed-on: https://go-review.googlesource.com/c/go/+/309275
Trust: Michael Knyszek <mknyszek@google.com>
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Michael Pratt <mpratt@google.com>
This commit is contained in:
Michael Anthony Knyszek 2021-04-11 18:29:03 +00:00 committed by Michael Knyszek
parent 4a5d78ff8b
commit 8e112a7c2a
2 changed files with 27 additions and 29 deletions

View file

@ -184,24 +184,13 @@ func (c *mcache) refill(spc spanClass) {
}
memstats.heapStats.release()
// Update gcController.heapLive with the same assumption.
usedBytes := uintptr(s.allocCount) * s.elemsize
atomic.Xadd64(&gcController.heapLive, int64(s.npages*pageSize)-int64(usedBytes))
// Update heapLive with the same assumption.
// While we're here, flush scanAlloc, since we have to call
// revise anyway.
atomic.Xadd64(&gcController.heapScan, int64(c.scanAlloc))
usedBytes := uintptr(s.allocCount) * s.elemsize
gcController.update(int64(s.npages*pageSize)-int64(usedBytes), int64(c.scanAlloc))
c.scanAlloc = 0
if trace.enabled {
// gcController.heapLive changed.
traceHeapAlloc()
}
if gcBlackenEnabled != 0 {
// gcController.heapLive and heapScan changed.
gcController.revise()
}
c.alloc[spc] = s
}
@ -230,15 +219,8 @@ func (c *mcache) allocLarge(size uintptr, noscan bool) *mspan {
atomic.Xadduintptr(&stats.largeAllocCount, 1)
memstats.heapStats.release()
// Update gcController.heapLive and revise pacing if needed.
atomic.Xadd64(&gcController.heapLive, int64(npages*pageSize))
if trace.enabled {
// Trace that a heap alloc occurred because gcController.heapLive changed.
traceHeapAlloc()
}
if gcBlackenEnabled != 0 {
gcController.revise()
}
// Update heapLive.
gcController.update(int64(s.npages*pageSize), 0)
// Put the large span in the mcentral swept list so that it's
// visible to the background sweeper.
@ -250,10 +232,11 @@ func (c *mcache) allocLarge(size uintptr, noscan bool) *mspan {
func (c *mcache) releaseAll() {
// Take this opportunity to flush scanAlloc.
atomic.Xadd64(&gcController.heapScan, int64(c.scanAlloc))
scanAlloc := int64(c.scanAlloc)
c.scanAlloc = 0
sg := mheap_.sweepgen
dHeapLive := int64(0)
for i := range c.alloc {
s := c.alloc[i]
if s != &emptymspan {
@ -270,7 +253,7 @@ func (c *mcache) releaseAll() {
// gcController.heapLive was totally recomputed since
// caching this span, so we don't do this for
// stale spans.
atomic.Xadd64(&gcController.heapLive, -int64(n)*int64(s.elemsize))
dHeapLive -= int64(n) * int64(s.elemsize)
}
// Release the span to the mcentral.
mheap_.central[i].mcentral.uncacheSpan(s)
@ -287,10 +270,8 @@ func (c *mcache) releaseAll() {
c.tinyAllocs = 0
memstats.heapStats.release()
// Updated heapScan and possible gcController.heapLive.
if gcBlackenEnabled != 0 {
gcController.revise()
}
// Updated heapScan and heapLive.
gcController.update(dHeapLive, scanAlloc)
}
// prepareForSweep flushes c if the system has entered a new sweep phase