diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index 76e56828b62..edeb0d679ce 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -585,7 +585,7 @@ func (h *mheap) alloc_m(npage uintptr, sizeclass int32, large bool) *mspan { memstats.tinyallocs += uint64(_g_.m.mcache.local_tinyallocs) _g_.m.mcache.local_tinyallocs = 0 - s := h.allocSpanLocked(npage) + s := h.allocSpanLocked(npage, &memstats.heap_inuse) if s != nil { // Record span info, because gc needs to be // able to map interior pointer to containing span. @@ -664,9 +664,12 @@ func (h *mheap) alloc(npage uintptr, sizeclass int32, large bool, needzero bool) return s } -// allocManual allocates a manually-managed span of npage pages and -// adds the bytes used to *stat, which should be a memstats in-use -// field. allocManual returns nil if allocation fails. +// allocManual allocates a manually-managed span of npage pages. +// allocManual returns nil if allocation fails. +// +// allocManual adds the bytes used to *stat, which should be a +// memstats in-use field. Unlike allocations in the GC'd heap, the +// allocation does *not* count toward heap_inuse or heap_sys. // // The memory backing the returned span may not be zeroed if // span.needzero is set. @@ -678,7 +681,7 @@ func (h *mheap) alloc(npage uintptr, sizeclass int32, large bool, needzero bool) //go:systemstack func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan { lock(&h.lock) - s := h.allocSpanLocked(npage) + s := h.allocSpanLocked(npage, stat) if s != nil { s.state = _MSpanManual s.manualFreeList = 0 @@ -687,7 +690,8 @@ func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan { s.nelems = 0 s.elemsize = 0 s.limit = s.base() + s.npages<<_PageShift - *stat += uint64(s.npages << _PageShift) + // Manually manged memory doesn't count toward heap_sys. + memstats.heap_sys -= uint64(s.npages << _PageShift) } // This unlock acts as a release barrier. See mheap.alloc_m. @@ -699,7 +703,7 @@ func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan { // Allocates a span of the given size. h must be locked. // The returned span has been removed from the // free list, but its state is still MSpanFree. -func (h *mheap) allocSpanLocked(npage uintptr) *mspan { +func (h *mheap) allocSpanLocked(npage uintptr, stat *uint64) *mspan { var list *mSpanList var s *mspan @@ -762,7 +766,7 @@ HaveSpan: h.spans[p+n] = s } - memstats.heap_inuse += uint64(npage << _PageShift) + *stat += uint64(npage << _PageShift) memstats.heap_idle -= uint64(npage << _PageShift) //println("spanalloc", hex(s.start<<_PageShift)) @@ -903,7 +907,8 @@ func (h *mheap) freeManual(s *mspan, stat *uint64) { s.needzero = 1 lock(&h.lock) *stat -= uint64(s.npages << _PageShift) - h.freeSpanLocked(s, true, true, 0) + memstats.heap_sys += uint64(s.npages << _PageShift) + h.freeSpanLocked(s, false, true, 0) unlock(&h.lock) } @@ -1096,8 +1101,6 @@ func (h *mheap) scavenge(k int32, now, limit uint64) { if sumreleased > 0 { print("scvg", k, ": ", sumreleased>>20, " MB released\n") } - // TODO(dvyukov): these stats are incorrect as we don't subtract stack usage from heap. - // But we can't call ReadMemStats on g0 holding locks. print("scvg", k, ": inuse: ", memstats.heap_inuse>>20, ", idle: ", memstats.heap_idle>>20, ", sys: ", memstats.heap_sys>>20, ", released: ", memstats.heap_released>>20, ", consumed: ", (memstats.heap_sys-memstats.heap_released)>>20, " (MB)\n") } } diff --git a/src/runtime/mstats.go b/src/runtime/mstats.go index 36b5b5077da..c2fa6ad9a92 100644 --- a/src/runtime/mstats.go +++ b/src/runtime/mstats.go @@ -33,13 +33,12 @@ type mstats struct { // Statistics about malloc heap. // Protected by mheap.lock // - // In mstats, heap_sys and heap_inuse includes stack memory, - // while in MemStats stack memory is separated out from the - // heap stats. + // Like MemStats, heap_sys and heap_inuse do not count memory + // in manually-managed spans. heap_alloc uint64 // bytes allocated and not yet freed (same as alloc above) - heap_sys uint64 // virtual address space obtained from system + heap_sys uint64 // virtual address space obtained from system for GC'd heap heap_idle uint64 // bytes in idle spans - heap_inuse uint64 // bytes in non-idle spans + heap_inuse uint64 // bytes in _MSpanInUse spans heap_released uint64 // bytes released to the os heap_objects uint64 // total number of allocated objects @@ -59,7 +58,7 @@ type mstats struct { // Statistics about allocation of low-level fixed-size structures. // Protected by FixAlloc locks. - stacks_inuse uint64 // this number is included in heap_inuse above; differs from MemStats.StackInuse + stacks_inuse uint64 // bytes in manually-managed stack spans stacks_sys uint64 // only counts newosproc0 stack in mstats; differs from MemStats.StackSys mspan_inuse uint64 // mspan structures mspan_sys uint64 @@ -459,10 +458,9 @@ func readmemstats_m(stats *MemStats) { // cannot change MemStats because of backward compatibility. memmove(unsafe.Pointer(stats), unsafe.Pointer(&memstats), sizeof_C_MStats) - // Stack numbers are part of the heap numbers, separate those out for user consumption + // memstats.stacks_sys is only memory mapped directly for OS stacks. + // Add in heap-allocated stack memory for user consumption. stats.StackSys += stats.StackInuse - stats.HeapInuse -= stats.StackInuse - stats.HeapSys -= stats.StackInuse } //go:linkname readGCStats runtime/debug.readGCStats @@ -512,6 +510,9 @@ func updatememstats() { memstats.sys = memstats.heap_sys + memstats.stacks_sys + memstats.mspan_sys + memstats.mcache_sys + memstats.buckhash_sys + memstats.gc_sys + memstats.other_sys + // We also count stacks_inuse as sys memory. + memstats.sys += memstats.stacks_inuse + // Calculate memory allocator stats. // During program execution we only count number of frees and amount of freed memory. // Current number of alive object in the heap and amount of alive heap memory