mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime: don't flush local_tinyallocs
This change makes local_tinyallocs work like the rest of the malloc stats and doesn't flush local_tinyallocs, instead making that the source-of-truth. Change-Id: I3e6cb5f1b3d086e432ce7d456895511a48e3617a Reviewed-on: https://go-review.googlesource.com/c/go/+/246967 Trust: Michael Knyszek <mknyszek@google.com> Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Michael Pratt <mpratt@google.com>
This commit is contained in:
parent
a5088e76f1
commit
cca3d1e553
4 changed files with 14 additions and 11 deletions
|
|
@ -339,7 +339,7 @@ func ReadMemStatsSlow() (base, slow MemStats) {
|
|||
|
||||
// Add in frees. readmemstats_m flushed the cached stats, so
|
||||
// these are up-to-date.
|
||||
var largeFree, smallFree uint64
|
||||
var tinyAllocs, largeFree, smallFree uint64
|
||||
for _, p := range allp {
|
||||
c := p.mcache
|
||||
if c == nil {
|
||||
|
|
@ -349,6 +349,9 @@ func ReadMemStatsSlow() (base, slow MemStats) {
|
|||
largeFree += uint64(c.local_largefree)
|
||||
slow.Frees += uint64(c.local_nlargefree)
|
||||
|
||||
// Collect tiny allocation stats.
|
||||
tinyAllocs += uint64(c.local_tinyallocs)
|
||||
|
||||
// Collect per-sizeclass stats.
|
||||
for i := 0; i < _NumSizeClasses; i++ {
|
||||
slow.Frees += uint64(c.local_nsmallfree[i])
|
||||
|
|
@ -357,7 +360,7 @@ func ReadMemStatsSlow() (base, slow MemStats) {
|
|||
smallFree += uint64(c.local_nsmallfree[i]) * uint64(class_to_size[i])
|
||||
}
|
||||
}
|
||||
slow.Frees += memstats.tinyallocs
|
||||
slow.Frees += tinyAllocs
|
||||
slow.Mallocs += slow.Frees
|
||||
|
||||
slow.TotalAlloc = slow.Alloc + largeFree + smallFree
|
||||
|
|
|
|||
|
|
@ -34,7 +34,6 @@ type mcache struct {
|
|||
// termination.
|
||||
tiny uintptr
|
||||
tinyoffset uintptr
|
||||
local_tinyallocs uintptr // number of tiny allocs not counted in other stats
|
||||
|
||||
// The rest is not accessed on every malloc.
|
||||
|
||||
|
|
@ -49,6 +48,7 @@ type mcache struct {
|
|||
// When read with stats from other mcaches and with the world
|
||||
// stopped, the result will accurately reflect the state of the
|
||||
// application.
|
||||
local_tinyallocs uintptr // number of tiny allocs not counted in other stats
|
||||
local_largealloc uintptr // bytes allocated for large objects
|
||||
local_nlargealloc uintptr // number of large object allocations
|
||||
local_nsmallalloc [_NumSizeClasses]uintptr // number of allocs for small objects
|
||||
|
|
@ -151,6 +151,8 @@ func (c *mcache) donate(d *mcache) {
|
|||
d.local_nsmallfree[i] += c.local_nsmallfree[i]
|
||||
c.local_nsmallfree[i] = 0
|
||||
}
|
||||
d.local_tinyallocs += c.local_tinyallocs
|
||||
c.local_tinyallocs = 0
|
||||
}
|
||||
|
||||
// refill acquires a new span of span class spc for c. This span will
|
||||
|
|
|
|||
|
|
@ -1163,8 +1163,6 @@ func (h *mheap) allocSpan(npages uintptr, manual bool, spanclass spanClass, sysS
|
|||
}
|
||||
atomic.Xadd64(&memstats.heap_scan, int64(c.local_scan))
|
||||
c.local_scan = 0
|
||||
memstats.tinyallocs += uint64(c.local_tinyallocs)
|
||||
c.local_tinyallocs = 0
|
||||
|
||||
// heap_scan was been updated.
|
||||
if gcBlackenEnabled != 0 {
|
||||
|
|
@ -1358,8 +1356,6 @@ func (h *mheap) freeSpan(s *mspan) {
|
|||
lock(&h.lock)
|
||||
atomic.Xadd64(&memstats.heap_scan, int64(c.local_scan))
|
||||
c.local_scan = 0
|
||||
memstats.tinyallocs += uint64(c.local_tinyallocs)
|
||||
c.local_tinyallocs = 0
|
||||
if msanenabled {
|
||||
// Tell msan that this entire span is no longer in use.
|
||||
base := unsafe.Pointer(s.base())
|
||||
|
|
|
|||
|
|
@ -550,6 +550,7 @@ func updatememstats() {
|
|||
memstats.total_alloc = 0
|
||||
memstats.nmalloc = 0
|
||||
memstats.nfree = 0
|
||||
memstats.tinyallocs = 0
|
||||
for i := 0; i < len(memstats.by_size); i++ {
|
||||
memstats.by_size[i].nmalloc = 0
|
||||
memstats.by_size[i].nfree = 0
|
||||
|
|
@ -572,6 +573,9 @@ func updatememstats() {
|
|||
totalFree += uint64(c.local_largefree)
|
||||
memstats.nfree += uint64(c.local_nlargefree)
|
||||
|
||||
// Collect tiny allocation stats.
|
||||
memstats.tinyallocs += uint64(c.local_tinyallocs)
|
||||
|
||||
// Collect per-sizeclass stats.
|
||||
for i := 0; i < _NumSizeClasses; i++ {
|
||||
// Malloc stats.
|
||||
|
|
@ -644,8 +648,6 @@ func purgecachedstats(c *mcache) {
|
|||
// Protected by heap lock.
|
||||
atomic.Xadd64(&memstats.heap_scan, int64(c.local_scan))
|
||||
c.local_scan = 0
|
||||
memstats.tinyallocs += uint64(c.local_tinyallocs)
|
||||
c.local_tinyallocs = 0
|
||||
}
|
||||
|
||||
// Atomically increases a given *system* memory stat. We are counting on this
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue