runtime: delineate which memstats are system stats with a type

This change modifies the type of several mstats fields to be a new type:
sysMemStat. This type has the same structure as the fields used to have.

The purpose of this change is to make it very clear which stats may be
used in various functions for accounting (usually the platform-specific
sys* functions, but there are others). Currently there's an implicit
understanding that the *uint64 value passed to these functions is some
kind of statistic whose value is atomically managed. This understanding
isn't inherently problematic, but we're about to change how some stats
(which currently use mSysStatInc and mSysStatDec) work, so we want to
make it very clear what the various requirements are around "sysStat".

This change also removes mSysStatInc and mSysStatDec in favor of a
method on sysMemStat. Note that those two functions were originally
written the way they were because atomic 64-bit adds required a valid G
on ARM, but this hasn't been the case for a very long time (since
golang.org/cl/14204, but even before then it wasn't clear if mutexes
required a valid G anymore). Today we implement 64-bit adds on ARM with
a spinlock table.

Change-Id: I4e9b37cf14afc2ae20cf736e874eb0064af086d7
Reviewed-on: https://go-review.googlesource.com/c/go/+/246971
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Trust: Michael Knyszek <mknyszek@google.com>
Reviewed-by: Michael Pratt <mpratt@google.com>
This commit is contained in:
Michael Anthony Knyszek 2020-07-29 20:25:05 +00:00 committed by Michael Knyszek
parent dc02578ac8
commit 8ebc58452a
17 changed files with 109 additions and 130 deletions

View file

@ -820,7 +820,7 @@ type AddrRanges struct {
// Add. // Add.
func NewAddrRanges() AddrRanges { func NewAddrRanges() AddrRanges {
r := addrRanges{} r := addrRanges{}
r.init(new(uint64)) r.init(new(sysMemStat))
return AddrRanges{r, true} return AddrRanges{r, true}
} }
@ -844,7 +844,7 @@ func MakeAddrRanges(a ...AddrRange) AddrRanges {
return AddrRanges{addrRanges{ return AddrRanges{addrRanges{
ranges: ranges, ranges: ranges,
totalBytes: total, totalBytes: total,
sysStat: new(uint64), sysStat: new(sysMemStat),
}, false} }, false}
} }

View file

@ -548,20 +548,20 @@ func dumpmemstats() {
dumpint(memstats.nmalloc) dumpint(memstats.nmalloc)
dumpint(memstats.nfree) dumpint(memstats.nfree)
dumpint(memstats.heap_alloc) dumpint(memstats.heap_alloc)
dumpint(memstats.heap_sys) dumpint(memstats.heap_sys.load())
dumpint(memstats.heap_idle) dumpint(memstats.heap_idle)
dumpint(memstats.heap_inuse) dumpint(memstats.heap_inuse)
dumpint(memstats.heap_released) dumpint(memstats.heap_released)
dumpint(memstats.heap_objects) dumpint(memstats.heap_objects)
dumpint(memstats.stacks_inuse) dumpint(memstats.stacks_inuse)
dumpint(memstats.stacks_sys) dumpint(memstats.stacks_sys.load())
dumpint(memstats.mspan_inuse) dumpint(memstats.mspan_inuse)
dumpint(memstats.mspan_sys) dumpint(memstats.mspan_sys.load())
dumpint(memstats.mcache_inuse) dumpint(memstats.mcache_inuse)
dumpint(memstats.mcache_sys) dumpint(memstats.mcache_sys.load())
dumpint(memstats.buckhash_sys) dumpint(memstats.buckhash_sys.load())
dumpint(memstats.gc_sys) dumpint(memstats.gc_sys.load())
dumpint(memstats.other_sys) dumpint(memstats.other_sys.load())
dumpint(memstats.next_gc) dumpint(memstats.next_gc)
dumpint(memstats.last_gc_unix) dumpint(memstats.last_gc_unix)
dumpint(memstats.pause_total_ns) dumpint(memstats.pause_total_ns)

View file

@ -1313,7 +1313,7 @@ var persistentChunks *notInHeap
// The returned memory will be zeroed. // The returned memory will be zeroed.
// //
// Consider marking persistentalloc'd types go:notinheap. // Consider marking persistentalloc'd types go:notinheap.
func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer { func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
var p *notInHeap var p *notInHeap
systemstack(func() { systemstack(func() {
p = persistentalloc1(size, align, sysStat) p = persistentalloc1(size, align, sysStat)
@ -1324,7 +1324,7 @@ func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer {
// Must run on system stack because stack growth can (re)invoke it. // Must run on system stack because stack growth can (re)invoke it.
// See issue 9174. // See issue 9174.
//go:systemstack //go:systemstack
func persistentalloc1(size, align uintptr, sysStat *uint64) *notInHeap { func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap {
const ( const (
maxBlock = 64 << 10 // VM reservation granularity is 64K on windows maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
) )
@ -1383,8 +1383,8 @@ func persistentalloc1(size, align uintptr, sysStat *uint64) *notInHeap {
} }
if sysStat != &memstats.other_sys { if sysStat != &memstats.other_sys {
mSysStatInc(sysStat, size) sysStat.add(int64(size))
mSysStatDec(&memstats.other_sys, size) memstats.other_sys.add(-int64(size))
} }
return p return p
} }
@ -1425,7 +1425,7 @@ func (l *linearAlloc) init(base, size uintptr) {
l.end = base + size l.end = base + size
} }
func (l *linearAlloc) alloc(size, align uintptr, sysStat *uint64) unsafe.Pointer { func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
p := alignUp(l.next, align) p := alignUp(l.next, align)
if p+size > l.end { if p+size > l.end {
return nil return nil

View file

@ -11,7 +11,7 @@ import (
// Don't split the stack as this method may be invoked without a valid G, which // Don't split the stack as this method may be invoked without a valid G, which
// prevents us from allocating more stack. // prevents us from allocating more stack.
//go:nosplit //go:nosplit
func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer { func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer {
p, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0) p, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
if err != 0 { if err != 0 {
if err == _EACCES { if err == _EACCES {
@ -24,7 +24,7 @@ func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {
} }
return nil return nil
} }
mSysStatInc(sysStat, n) sysStat.add(int64(n))
return p return p
} }
@ -41,8 +41,8 @@ func sysHugePage(v unsafe.Pointer, n uintptr) {
// Don't split the stack as this function may be invoked without a valid G, // Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack. // which prevents us from allocating more stack.
//go:nosplit //go:nosplit
func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) { func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
mSysStatDec(sysStat, n) sysStat.add(-int64(n))
munmap(v, n) munmap(v, n)
} }
@ -59,8 +59,8 @@ func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer {
return p return p
} }
func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) { func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
mSysStatInc(sysStat, n) sysStat.add(int64(n))
// AIX does not allow mapping a range that is already mapped. // AIX does not allow mapping a range that is already mapped.
// So, call mprotect to change permissions. // So, call mprotect to change permissions.

View file

@ -13,12 +13,12 @@ import (
// Don't split the stack as this function may be invoked without a valid G, // Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack. // which prevents us from allocating more stack.
//go:nosplit //go:nosplit
func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer { func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer {
v, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0) v, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
if err != 0 { if err != 0 {
return nil return nil
} }
mSysStatInc(sysStat, n) sysStat.add(int64(n))
return v return v
} }
@ -35,8 +35,8 @@ func sysHugePage(v unsafe.Pointer, n uintptr) {
// Don't split the stack as this function may be invoked without a valid G, // Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack. // which prevents us from allocating more stack.
//go:nosplit //go:nosplit
func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) { func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
mSysStatDec(sysStat, n) sysStat.add(-int64(n))
munmap(v, n) munmap(v, n)
} }
@ -65,8 +65,8 @@ func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer {
const _sunosEAGAIN = 11 const _sunosEAGAIN = 11
const _ENOMEM = 12 const _ENOMEM = 12
func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) { func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
mSysStatInc(sysStat, n) sysStat.add(int64(n))
p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0) p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
if err == _ENOMEM || ((GOOS == "solaris" || GOOS == "illumos") && err == _sunosEAGAIN) { if err == _ENOMEM || ((GOOS == "solaris" || GOOS == "illumos") && err == _sunosEAGAIN) {

View file

@ -11,12 +11,12 @@ import (
// Don't split the stack as this function may be invoked without a valid G, // Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack. // which prevents us from allocating more stack.
//go:nosplit //go:nosplit
func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer { func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer {
v, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0) v, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
if err != 0 { if err != 0 {
return nil return nil
} }
mSysStatInc(sysStat, n) sysStat.add(int64(n))
return v return v
} }
@ -39,8 +39,8 @@ func sysHugePage(v unsafe.Pointer, n uintptr) {
// Don't split the stack as this function may be invoked without a valid G, // Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack. // which prevents us from allocating more stack.
//go:nosplit //go:nosplit
func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) { func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
mSysStatDec(sysStat, n) sysStat.add(-int64(n))
munmap(v, n) munmap(v, n)
} }
@ -58,8 +58,8 @@ func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer {
const _ENOMEM = 12 const _ENOMEM = 12
func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) { func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
mSysStatInc(sysStat, n) sysStat.add(int64(n))
p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0) p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
if err == _ENOMEM { if err == _ENOMEM {

View file

@ -13,7 +13,7 @@ import (
// Don't split the stack as this function may be invoked without a valid G, // Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack. // which prevents us from allocating more stack.
//go:nosplit //go:nosplit
func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer { func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer {
p := sysReserve(nil, n) p := sysReserve(nil, n)
sysMap(p, n, sysStat) sysMap(p, n, sysStat)
return p return p
@ -31,8 +31,8 @@ func sysHugePage(v unsafe.Pointer, n uintptr) {
// Don't split the stack as this function may be invoked without a valid G, // Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack. // which prevents us from allocating more stack.
//go:nosplit //go:nosplit
func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) { func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
mSysStatDec(sysStat, n) sysStat.add(-int64(n))
} }
func sysFault(v unsafe.Pointer, n uintptr) { func sysFault(v unsafe.Pointer, n uintptr) {
@ -80,6 +80,6 @@ func growMemory(pages int32) int32
// This allows the front-end to replace the old DataView object with a new one. // This allows the front-end to replace the old DataView object with a new one.
func resetMemoryDataView() func resetMemoryDataView()
func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) { func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
mSysStatInc(sysStat, n) sysStat.add(int64(n))
} }

View file

@ -17,7 +17,7 @@ const (
// Don't split the stack as this method may be invoked without a valid G, which // Don't split the stack as this method may be invoked without a valid G, which
// prevents us from allocating more stack. // prevents us from allocating more stack.
//go:nosplit //go:nosplit
func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer { func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer {
p, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0) p, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
if err != 0 { if err != 0 {
if err == _EACCES { if err == _EACCES {
@ -30,7 +30,7 @@ func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {
} }
return nil return nil
} }
mSysStatInc(sysStat, n) sysStat.add(int64(n))
return p return p
} }
@ -144,8 +144,8 @@ func sysHugePage(v unsafe.Pointer, n uintptr) {
// Don't split the stack as this function may be invoked without a valid G, // Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack. // which prevents us from allocating more stack.
//go:nosplit //go:nosplit
func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) { func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
mSysStatDec(sysStat, n) sysStat.add(-int64(n))
munmap(v, n) munmap(v, n)
} }
@ -161,8 +161,8 @@ func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer {
return p return p
} }
func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) { func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
mSysStatInc(sysStat, n) sysStat.add(int64(n))
p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0) p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
if err == _ENOMEM { if err == _ENOMEM {

View file

@ -140,19 +140,19 @@ func sbrk(n uintptr) unsafe.Pointer {
return unsafe.Pointer(bl) return unsafe.Pointer(bl)
} }
func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer { func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer {
lock(&memlock) lock(&memlock)
p := memAlloc(n) p := memAlloc(n)
memCheck() memCheck()
unlock(&memlock) unlock(&memlock)
if p != nil { if p != nil {
mSysStatInc(sysStat, n) sysStat.add(int64(n))
} }
return p return p
} }
func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) { func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
mSysStatDec(sysStat, n) sysStat.add(-int64(n))
lock(&memlock) lock(&memlock)
if uintptr(v)+n == bloc { if uintptr(v)+n == bloc {
// Address range being freed is at the end of memory, // Address range being freed is at the end of memory,
@ -176,10 +176,10 @@ func sysUsed(v unsafe.Pointer, n uintptr) {
func sysHugePage(v unsafe.Pointer, n uintptr) { func sysHugePage(v unsafe.Pointer, n uintptr) {
} }
func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) { func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
// sysReserve has already allocated all heap memory, // sysReserve has already allocated all heap memory,
// but has not adjusted stats. // but has not adjusted stats.
mSysStatInc(sysStat, n) sysStat.add(int64(n))
} }
func sysFault(v unsafe.Pointer, n uintptr) { func sysFault(v unsafe.Pointer, n uintptr) {

View file

@ -24,8 +24,8 @@ const (
// Don't split the stack as this function may be invoked without a valid G, // Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack. // which prevents us from allocating more stack.
//go:nosplit //go:nosplit
func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer { func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer {
mSysStatInc(sysStat, n) sysStat.add(int64(n))
return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_COMMIT|_MEM_RESERVE, _PAGE_READWRITE)) return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_COMMIT|_MEM_RESERVE, _PAGE_READWRITE))
} }
@ -97,8 +97,8 @@ func sysHugePage(v unsafe.Pointer, n uintptr) {
// Don't split the stack as this function may be invoked without a valid G, // Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack. // which prevents us from allocating more stack.
//go:nosplit //go:nosplit
func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) { func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
mSysStatDec(sysStat, n) sysStat.add(-int64(n))
r := stdcall3(_VirtualFree, uintptr(v), 0, _MEM_RELEASE) r := stdcall3(_VirtualFree, uintptr(v), 0, _MEM_RELEASE)
if r == 0 { if r == 0 {
print("runtime: VirtualFree of ", n, " bytes failed with errno=", getlasterror(), "\n") print("runtime: VirtualFree of ", n, " bytes failed with errno=", getlasterror(), "\n")
@ -124,6 +124,6 @@ func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer {
return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_RESERVE, _PAGE_READWRITE)) return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_RESERVE, _PAGE_READWRITE))
} }
func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) { func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
mSysStatInc(sysStat, n) sysStat.add(int64(n))
} }

View file

@ -32,7 +32,7 @@ type fixalloc struct {
chunk uintptr // use uintptr instead of unsafe.Pointer to avoid write barriers chunk uintptr // use uintptr instead of unsafe.Pointer to avoid write barriers
nchunk uint32 nchunk uint32
inuse uintptr // in-use bytes now inuse uintptr // in-use bytes now
stat *uint64 stat *sysMemStat
zero bool // zero allocations zero bool // zero allocations
} }
@ -49,7 +49,7 @@ type mlink struct {
// Initialize f to allocate objects of the given size, // Initialize f to allocate objects of the given size,
// using the allocator to obtain chunks of memory. // using the allocator to obtain chunks of memory.
func (f *fixalloc) init(size uintptr, first func(arg, p unsafe.Pointer), arg unsafe.Pointer, stat *uint64) { func (f *fixalloc) init(size uintptr, first func(arg, p unsafe.Pointer), arg unsafe.Pointer, stat *sysMemStat) {
f.size = size f.size = size
f.first = first f.first = first
f.arg = arg f.arg = arg

View file

@ -100,7 +100,7 @@ const (
// heapRetained returns an estimate of the current heap RSS. // heapRetained returns an estimate of the current heap RSS.
func heapRetained() uint64 { func heapRetained() uint64 {
return atomic.Load64(&memstats.heap_sys) - atomic.Load64(&memstats.heap_released) return memstats.heap_sys.load() - atomic.Load64(&memstats.heap_released)
} }
// gcPaceScavenger updates the scavenger's pacing, particularly // gcPaceScavenger updates the scavenger's pacing, particularly
@ -711,7 +711,7 @@ func (p *pageAlloc) scavengeRangeLocked(ci chunkIdx, base, npages uint) uintptr
// Update global accounting only when not in test, otherwise // Update global accounting only when not in test, otherwise
// the runtime's accounting will be wrong. // the runtime's accounting will be wrong.
mSysStatInc(&memstats.heap_released, uintptr(npages)*pageSize) atomic.Xadd64(&memstats.heap_released, int64(npages)*pageSize)
return addr return addr
} }

View file

@ -1222,22 +1222,22 @@ HaveSpan:
// sysUsed all the pages that are actually available // sysUsed all the pages that are actually available
// in the span since some of them might be scavenged. // in the span since some of them might be scavenged.
sysUsed(unsafe.Pointer(base), nbytes) sysUsed(unsafe.Pointer(base), nbytes)
mSysStatDec(&memstats.heap_released, scav) atomic.Xadd64(&memstats.heap_released, -int64(scav))
} }
// Update stats. // Update stats.
switch typ { switch typ {
case spanAllocHeap: case spanAllocHeap:
mSysStatInc(&memstats.heap_inuse, nbytes) atomic.Xadd64(&memstats.heap_inuse, int64(nbytes))
case spanAllocStack: case spanAllocStack:
mSysStatInc(&memstats.stacks_inuse, nbytes) atomic.Xadd64(&memstats.stacks_inuse, int64(nbytes))
case spanAllocPtrScalarBits, spanAllocWorkBuf: case spanAllocPtrScalarBits, spanAllocWorkBuf:
mSysStatInc(&memstats.gc_sys, nbytes) memstats.gc_sys.add(int64(nbytes))
} }
if typ.manual() { if typ.manual() {
// Manually managed memory doesn't count toward heap_sys. // Manually managed memory doesn't count toward heap_sys.
mSysStatDec(&memstats.heap_sys, nbytes) memstats.heap_sys.add(-int64(nbytes))
} }
mSysStatDec(&memstats.heap_idle, nbytes) atomic.Xadd64(&memstats.heap_idle, -int64(nbytes))
// Publish the span in various locations. // Publish the span in various locations.
@ -1314,8 +1314,8 @@ func (h *mheap) grow(npage uintptr) bool {
// The allocation is always aligned to the heap arena // The allocation is always aligned to the heap arena
// size which is always > physPageSize, so its safe to // size which is always > physPageSize, so its safe to
// just add directly to heap_released. // just add directly to heap_released.
mSysStatInc(&memstats.heap_released, asize) atomic.Xadd64(&memstats.heap_released, int64(asize))
mSysStatInc(&memstats.heap_idle, asize) atomic.Xadd64(&memstats.heap_idle, int64(asize))
// Recalculate nBase. // Recalculate nBase.
// We know this won't overflow, because sysAlloc returned // We know this won't overflow, because sysAlloc returned
@ -1400,18 +1400,20 @@ func (h *mheap) freeSpanLocked(s *mspan, typ spanAllocType) {
// Update stats. // Update stats.
// //
// Mirrors the code in allocSpan. // Mirrors the code in allocSpan.
nbytes := s.npages * pageSize
switch typ { switch typ {
case spanAllocHeap: case spanAllocHeap:
mSysStatDec(&memstats.heap_inuse, s.npages*pageSize) atomic.Xadd64(&memstats.heap_inuse, -int64(nbytes))
case spanAllocStack: case spanAllocStack:
mSysStatDec(&memstats.stacks_inuse, s.npages*pageSize) atomic.Xadd64(&memstats.stacks_inuse, -int64(nbytes))
case spanAllocPtrScalarBits, spanAllocWorkBuf: case spanAllocPtrScalarBits, spanAllocWorkBuf:
mSysStatDec(&memstats.gc_sys, s.npages*pageSize) memstats.gc_sys.add(-int64(nbytes))
} }
if typ.manual() { if typ.manual() {
mSysStatInc(&memstats.heap_sys, s.npages*pageSize) // Manually managed memory doesn't count toward heap_sys, so add it back.
memstats.heap_sys.add(int64(nbytes))
} }
mSysStatInc(&memstats.heap_idle, s.npages*pageSize) atomic.Xadd64(&memstats.heap_idle, int64(nbytes))
// Mark the space as free. // Mark the space as free.
h.pages.free(s.base(), s.npages) h.pages.free(s.base(), s.npages)

View file

@ -293,13 +293,13 @@ type pageAlloc struct {
// sysStat is the runtime memstat to update when new system // sysStat is the runtime memstat to update when new system
// memory is committed by the pageAlloc for allocation metadata. // memory is committed by the pageAlloc for allocation metadata.
sysStat *uint64 sysStat *sysMemStat
// Whether or not this struct is being used in tests. // Whether or not this struct is being used in tests.
test bool test bool
} }
func (p *pageAlloc) init(mheapLock *mutex, sysStat *uint64) { func (p *pageAlloc) init(mheapLock *mutex, sysStat *sysMemStat) {
if levelLogPages[0] > logMaxPackedValue { if levelLogPages[0] > logMaxPackedValue {
// We can't represent 1<<levelLogPages[0] pages, the maximum number // We can't represent 1<<levelLogPages[0] pages, the maximum number
// of pages we need to represent at the root level, in a summary, which // of pages we need to represent at the root level, in a summary, which

View file

@ -160,10 +160,10 @@ type addrRanges struct {
totalBytes uintptr totalBytes uintptr
// sysStat is the stat to track allocations by this type // sysStat is the stat to track allocations by this type
sysStat *uint64 sysStat *sysMemStat
} }
func (a *addrRanges) init(sysStat *uint64) { func (a *addrRanges) init(sysStat *sysMemStat) {
ranges := (*notInHeapSlice)(unsafe.Pointer(&a.ranges)) ranges := (*notInHeapSlice)(unsafe.Pointer(&a.ranges))
ranges.len = 0 ranges.len = 0
ranges.cap = 16 ranges.cap = 16

View file

@ -8,7 +8,6 @@ package runtime
import ( import (
"runtime/internal/atomic" "runtime/internal/atomic"
"runtime/internal/sys"
"unsafe" "unsafe"
) )
@ -35,11 +34,11 @@ type mstats struct {
// //
// Like MemStats, heap_sys and heap_inuse do not count memory // Like MemStats, heap_sys and heap_inuse do not count memory
// in manually-managed spans. // in manually-managed spans.
heap_alloc uint64 // bytes allocated and not yet freed (same as alloc above) heap_alloc uint64 // bytes allocated and not yet freed (same as alloc above)
heap_sys uint64 // virtual address space obtained from system for GC'd heap heap_sys sysMemStat // virtual address space obtained from system for GC'd heap
heap_idle uint64 // bytes in idle spans heap_idle uint64 // bytes in idle spans
heap_inuse uint64 // bytes in mSpanInUse spans heap_inuse uint64 // bytes in mSpanInUse spans
heap_released uint64 // bytes released to the os heap_released uint64 // bytes released to the os
// heap_objects is not used by the runtime directly and instead // heap_objects is not used by the runtime directly and instead
// computed on the fly by updatememstats. // computed on the fly by updatememstats.
@ -47,15 +46,15 @@ type mstats struct {
// Statistics about allocation of low-level fixed-size structures. // Statistics about allocation of low-level fixed-size structures.
// Protected by FixAlloc locks. // Protected by FixAlloc locks.
stacks_inuse uint64 // bytes in manually-managed stack spans; updated atomically or during STW stacks_inuse uint64 // bytes in manually-managed stack spans; updated atomically or during STW
stacks_sys uint64 // only counts newosproc0 stack in mstats; differs from MemStats.StackSys stacks_sys sysMemStat // only counts newosproc0 stack in mstats; differs from MemStats.StackSys
mspan_inuse uint64 // mspan structures mspan_inuse uint64 // mspan structures
mspan_sys uint64 mspan_sys sysMemStat
mcache_inuse uint64 // mcache structures mcache_inuse uint64 // mcache structures
mcache_sys uint64 mcache_sys sysMemStat
buckhash_sys uint64 // profiling bucket hash table buckhash_sys sysMemStat // profiling bucket hash table
gc_sys uint64 // updated atomically or during STW gc_sys sysMemStat // updated atomically or during STW
other_sys uint64 // updated atomically or during STW other_sys sysMemStat // updated atomically or during STW
// Statistics about the garbage collector. // Statistics about the garbage collector.
@ -533,8 +532,9 @@ func updatememstats() {
memstats.mcache_inuse = uint64(mheap_.cachealloc.inuse) memstats.mcache_inuse = uint64(mheap_.cachealloc.inuse)
memstats.mspan_inuse = uint64(mheap_.spanalloc.inuse) memstats.mspan_inuse = uint64(mheap_.spanalloc.inuse)
memstats.sys = memstats.heap_sys + memstats.stacks_sys + memstats.mspan_sys + memstats.sys = memstats.heap_sys.load() + memstats.stacks_sys.load() + memstats.mspan_sys.load() +
memstats.mcache_sys + memstats.buckhash_sys + memstats.gc_sys + memstats.other_sys memstats.mcache_sys.load() + memstats.buckhash_sys.load() + memstats.gc_sys.load() +
memstats.other_sys.load()
// We also count stacks_inuse as sys memory. // We also count stacks_inuse as sys memory.
memstats.sys += memstats.stacks_inuse memstats.sys += memstats.stacks_inuse
@ -625,46 +625,24 @@ func flushallmcaches() {
} }
} }
// Atomically increases a given *system* memory stat. We are counting on this // sysMemStat represents a global system statistic that is managed atomically.
// stat never overflowing a uintptr, so this function must only be used for
// system memory stats.
// //
// The current implementation for little endian architectures is based on // This type must structurally be a uint64 so that mstats aligns with MemStats.
// xadduintptr(), which is less than ideal: xadd64() should really be used. type sysMemStat uint64
// Using xadduintptr() is a stop-gap solution until arm supports xadd64() that
// doesn't use locks. (Locks are a problem as they require a valid G, which // load atomically reads the value of the stat.
// restricts their useability.) func (s *sysMemStat) load() uint64 {
// return atomic.Load64((*uint64)(s))
// A side-effect of using xadduintptr() is that we need to check for
// overflow errors.
//go:nosplit
func mSysStatInc(sysStat *uint64, n uintptr) {
if sysStat == nil {
return
}
if sys.BigEndian {
atomic.Xadd64(sysStat, int64(n))
return
}
if val := atomic.Xadduintptr((*uintptr)(unsafe.Pointer(sysStat)), n); val < n {
print("runtime: stat overflow: val ", val, ", n ", n, "\n")
exit(2)
}
} }
// Atomically decreases a given *system* memory stat. Same comments as // add atomically adds the sysMemStat by n.
// mSysStatInc apply. func (s *sysMemStat) add(n int64) {
//go:nosplit if s == nil {
func mSysStatDec(sysStat *uint64, n uintptr) {
if sysStat == nil {
return return
} }
if sys.BigEndian { val := atomic.Xadd64((*uint64)(s), n)
atomic.Xadd64(sysStat, -int64(n)) if (n > 0 && int64(val) < n) || (n < 0 && int64(val)+n < n) {
return print("runtime: val=", val, " n=", n, "\n")
} throw("sysMemStat overflow")
if val := atomic.Xadduintptr((*uintptr)(unsafe.Pointer(sysStat)), uintptr(-int64(n))); val+n < n {
print("runtime: stat underflow: val ", val, ", n ", n, "\n")
exit(2)
} }
} }

View file

@ -198,7 +198,6 @@ func newosproc(mp *m) {
exit(1) exit(1)
} }
mp.g0.stack.hi = stacksize // for mstart mp.g0.stack.hi = stacksize // for mstart
//mSysStatInc(&memstats.stacks_sys, stacksize) //TODO: do this?
// Tell the pthread library we won't join with this thread. // Tell the pthread library we won't join with this thread.
if pthread_attr_setdetachstate(&attr, _PTHREAD_CREATE_DETACHED) != 0 { if pthread_attr_setdetachstate(&attr, _PTHREAD_CREATE_DETACHED) != 0 {
@ -247,7 +246,7 @@ func newosproc0(stacksize uintptr, fn uintptr) {
exit(1) exit(1)
} }
g0.stack.hi = stacksize // for mstart g0.stack.hi = stacksize // for mstart
mSysStatInc(&memstats.stacks_sys, stacksize) memstats.stacks_sys.add(int64(stacksize))
// Tell the pthread library we won't join with this thread. // Tell the pthread library we won't join with this thread.
if pthread_attr_setdetachstate(&attr, _PTHREAD_CREATE_DETACHED) != 0 { if pthread_attr_setdetachstate(&attr, _PTHREAD_CREATE_DETACHED) != 0 {