mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime: make alloc count metrics truly monotonic
Right now we export alloc count metrics via the runtime/metrics package and mark them as monotonic, but that's not actually true. As an optimization, the runtime assumes a span is always fully allocated before being uncached, and updates the accounting as such. In the rare case that it's wrong, the span has enough information to back out what did not get allocated. This change uses 16 bits of padding in the mspan to house another field that represents the amount of mspan slots filled just as the mspan is cached. This is information is enough to get an exact count, allowing us to make the metrics truly monotonic. Change-Id: Iaff3ca43f8745dc1bbb0232372423e014b89b920 Reviewed-on: https://go-review.googlesource.com/c/go/+/377516 Reviewed-by: Michael Pratt <mpratt@google.com> Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Gopher Robot <gobot@golang.org>
This commit is contained in:
parent
a0f77e56b7
commit
79db59ded9
3 changed files with 125 additions and 45 deletions
|
|
@ -156,6 +156,25 @@ func (c *mcache) refill(spc spanClass) {
|
|||
throw("bad sweepgen in refill")
|
||||
}
|
||||
mheap_.central[spc].mcentral.uncacheSpan(s)
|
||||
|
||||
// Count up how many slots were used and record it.
|
||||
stats := memstats.heapStats.acquire()
|
||||
slotsUsed := uintptr(s.allocCount) - uintptr(s.allocCountBeforeCache)
|
||||
atomic.Xadduintptr(&stats.smallAllocCount[spc.sizeclass()], slotsUsed)
|
||||
|
||||
// Flush tinyAllocs.
|
||||
if spc == tinySpanClass {
|
||||
atomic.Xadduintptr(&stats.tinyAllocCount, c.tinyAllocs)
|
||||
c.tinyAllocs = 0
|
||||
}
|
||||
memstats.heapStats.release()
|
||||
|
||||
// Update heapLive and flush scanAlloc.
|
||||
gcController.update(int64(slotsUsed*s.elemsize), int64(c.scanAlloc))
|
||||
c.scanAlloc = 0
|
||||
|
||||
// Clear the second allocCount just to be safe.
|
||||
s.allocCountBeforeCache = 0
|
||||
}
|
||||
|
||||
// Get a new cached span from the central lists.
|
||||
|
|
@ -172,24 +191,8 @@ func (c *mcache) refill(spc spanClass) {
|
|||
// sweeping in the next sweep phase.
|
||||
s.sweepgen = mheap_.sweepgen + 3
|
||||
|
||||
// Assume all objects from this span will be allocated in the
|
||||
// mcache. If it gets uncached, we'll adjust this.
|
||||
stats := memstats.heapStats.acquire()
|
||||
atomic.Xadduintptr(&stats.smallAllocCount[spc.sizeclass()], uintptr(s.nelems)-uintptr(s.allocCount))
|
||||
|
||||
// Flush tinyAllocs.
|
||||
if spc == tinySpanClass {
|
||||
atomic.Xadduintptr(&stats.tinyAllocCount, c.tinyAllocs)
|
||||
c.tinyAllocs = 0
|
||||
}
|
||||
memstats.heapStats.release()
|
||||
|
||||
// Update heapLive with the same assumption.
|
||||
// While we're here, flush scanAlloc, since we have to call
|
||||
// revise anyway.
|
||||
usedBytes := uintptr(s.allocCount) * s.elemsize
|
||||
gcController.update(int64(s.npages*pageSize)-int64(usedBytes), int64(c.scanAlloc))
|
||||
c.scanAlloc = 0
|
||||
// Store the current alloc count for accounting later.
|
||||
s.allocCountBeforeCache = s.allocCount
|
||||
|
||||
c.alloc[spc] = s
|
||||
}
|
||||
|
|
@ -235,26 +238,16 @@ func (c *mcache) releaseAll() {
|
|||
scanAlloc := int64(c.scanAlloc)
|
||||
c.scanAlloc = 0
|
||||
|
||||
sg := mheap_.sweepgen
|
||||
dHeapLive := int64(0)
|
||||
for i := range c.alloc {
|
||||
s := c.alloc[i]
|
||||
if s != &emptymspan {
|
||||
// Adjust nsmallalloc in case the span wasn't fully allocated.
|
||||
n := uintptr(s.nelems) - uintptr(s.allocCount)
|
||||
// Adjust smallAllocCount for whatever was allocated.
|
||||
stats := memstats.heapStats.acquire()
|
||||
atomic.Xadduintptr(&stats.smallAllocCount[spanClass(i).sizeclass()], -n)
|
||||
slotsUsed := uintptr(s.allocCount) - uintptr(s.allocCountBeforeCache)
|
||||
atomic.Xadduintptr(&stats.smallAllocCount[spanClass(i).sizeclass()], slotsUsed)
|
||||
memstats.heapStats.release()
|
||||
if s.sweepgen != sg+1 {
|
||||
// refill conservatively counted unallocated slots in gcController.heapLive.
|
||||
// Undo this.
|
||||
//
|
||||
// If this span was cached before sweep, then
|
||||
// gcController.heapLive was totally recomputed since
|
||||
// caching this span, so we don't do this for
|
||||
// stale spans.
|
||||
dHeapLive -= int64(n) * int64(s.elemsize)
|
||||
}
|
||||
s.allocCountBeforeCache = 0
|
||||
|
||||
// Release the span to the mcentral.
|
||||
mheap_.central[i].mcentral.uncacheSpan(s)
|
||||
c.alloc[i] = &emptymspan
|
||||
|
|
@ -270,8 +263,8 @@ func (c *mcache) releaseAll() {
|
|||
c.tinyAllocs = 0
|
||||
memstats.heapStats.release()
|
||||
|
||||
// Updated heapScan and heapLive.
|
||||
gcController.update(dHeapLive, scanAlloc)
|
||||
// Updated heapScan.
|
||||
gcController.update(0, scanAlloc)
|
||||
}
|
||||
|
||||
// prepareForSweep flushes c if the system has entered a new sweep phase
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue