2014-08-21 08:07:42 +02:00
|
|
|
// Copyright 2009 The Go Authors. All rights reserved.
|
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
2014-09-01 18:51:12 -04:00
|
|
|
// Malloc profiling.
|
|
|
|
|
// Patterned after tcmalloc's algorithms; shorter code.
|
|
|
|
|
|
2014-08-21 08:07:42 +02:00
|
|
|
package runtime
|
|
|
|
|
|
|
|
|
|
import (
|
2021-05-21 13:37:19 -04:00
|
|
|
"internal/abi"
|
2015-11-02 14:09:24 -05:00
|
|
|
"runtime/internal/atomic"
|
2022-08-07 17:43:57 +07:00
|
|
|
"runtime/internal/sys"
|
2014-08-21 08:07:42 +02:00
|
|
|
"unsafe"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
// NOTE(rsc): Everything here could use cas if contention became an issue.
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
var (
|
|
|
|
|
// profInsertLock protects changes to the start of all *bucket linked lists
|
|
|
|
|
profInsertLock mutex
|
|
|
|
|
// profBlockLock protects the contents of every blockRecord struct
|
|
|
|
|
profBlockLock mutex
|
|
|
|
|
// profMemActiveLock protects the active field of every memRecord struct
|
|
|
|
|
profMemActiveLock mutex
|
|
|
|
|
// profMemFutureLock is a set of locks that protect the respective elements
|
|
|
|
|
// of the future array of every memRecord struct
|
|
|
|
|
profMemFutureLock [len(memRecord{}.future)]mutex
|
|
|
|
|
)
|
2014-08-21 08:07:42 +02:00
|
|
|
|
|
|
|
|
// All memory allocations are local and do not escape outside of the profiler.
|
|
|
|
|
// The profiler is forbidden from referring to garbage-collected memory.
|
|
|
|
|
|
2014-09-01 18:51:12 -04:00
|
|
|
const (
|
|
|
|
|
// profile types
|
|
|
|
|
memProfile bucketType = 1 + iota
|
|
|
|
|
blockProfile
|
2016-09-22 09:48:30 -04:00
|
|
|
mutexProfile
|
2014-09-01 00:06:26 -04:00
|
|
|
|
2014-09-01 18:51:12 -04:00
|
|
|
// size of bucket hash table
|
|
|
|
|
buckHashSize = 179999
|
|
|
|
|
|
2023-08-10 14:02:03 +08:00
|
|
|
// maxStack is the max depth of stack to record in bucket.
|
|
|
|
|
// Note that it's only used internally as a guard against
|
|
|
|
|
// wildly out-of-bounds slicing of the PCs that come after
|
|
|
|
|
// a bucket struct, and it could increase in the future.
|
2014-09-01 18:51:12 -04:00
|
|
|
maxStack = 32
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
type bucketType int
|
|
|
|
|
|
|
|
|
|
// A bucket holds per-call-stack profiling information.
|
|
|
|
|
// The representation is a bit sleazy, inherited from C.
|
|
|
|
|
// This struct defines the bucket header. It is followed in
|
|
|
|
|
// memory by the stack words and then the actual record
|
|
|
|
|
// data, either a memRecord or a blockRecord.
|
|
|
|
|
//
|
2014-09-01 00:06:26 -04:00
|
|
|
// Per-call-stack profiling information.
|
|
|
|
|
// Lookup by hashing call stack into a linked-list hash table.
|
2016-10-11 22:58:21 -04:00
|
|
|
//
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
// None of the fields in this bucket header are modified after
|
|
|
|
|
// creation, including its next and allnext links.
|
|
|
|
|
//
|
2016-10-11 22:58:21 -04:00
|
|
|
// No heap pointers.
|
2014-09-01 18:51:12 -04:00
|
|
|
type bucket struct {
|
2022-08-07 17:43:57 +07:00
|
|
|
_ sys.NotInHeap
|
2014-09-01 18:51:12 -04:00
|
|
|
next *bucket
|
|
|
|
|
allnext *bucket
|
2016-09-22 09:48:30 -04:00
|
|
|
typ bucketType // memBucket or blockBucket (includes mutexProfile)
|
2014-09-01 18:51:12 -04:00
|
|
|
hash uintptr
|
|
|
|
|
size uintptr
|
|
|
|
|
nstk uintptr
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// A memRecord is the bucket data for a bucket of type memProfile,
|
|
|
|
|
// part of the memory profile.
|
|
|
|
|
type memRecord struct {
|
|
|
|
|
// The following complex 3-stage scheme of stats accumulation
|
|
|
|
|
// is required to obtain a consistent picture of mallocs and frees
|
|
|
|
|
// for some point in time.
|
|
|
|
|
// The problem is that mallocs come in real time, while frees
|
|
|
|
|
// come only after a GC during concurrent sweeping. So if we would
|
|
|
|
|
// naively count them, we would get a skew toward mallocs.
|
|
|
|
|
//
|
2017-02-27 11:36:37 -05:00
|
|
|
// Hence, we delay information to get consistent snapshots as
|
|
|
|
|
// of mark termination. Allocations count toward the next mark
|
|
|
|
|
// termination's snapshot, while sweep frees count toward the
|
|
|
|
|
// previous mark termination's snapshot:
|
|
|
|
|
//
|
|
|
|
|
// MT MT MT MT
|
|
|
|
|
// .·| .·| .·| .·|
|
|
|
|
|
// .·˙ | .·˙ | .·˙ | .·˙ |
|
|
|
|
|
// .·˙ | .·˙ | .·˙ | .·˙ |
|
|
|
|
|
// .·˙ |.·˙ |.·˙ |.·˙ |
|
|
|
|
|
//
|
|
|
|
|
// alloc → ▲ ← free
|
|
|
|
|
// ┠┅┅┅┅┅┅┅┅┅┅┅P
|
2017-03-01 13:58:22 -05:00
|
|
|
// C+2 → C+1 → C
|
2017-02-27 11:36:37 -05:00
|
|
|
//
|
|
|
|
|
// alloc → ▲ ← free
|
|
|
|
|
// ┠┅┅┅┅┅┅┅┅┅┅┅P
|
2017-03-01 13:58:22 -05:00
|
|
|
// C+2 → C+1 → C
|
2017-02-27 11:36:37 -05:00
|
|
|
//
|
|
|
|
|
// Since we can't publish a consistent snapshot until all of
|
|
|
|
|
// the sweep frees are accounted for, we wait until the next
|
|
|
|
|
// mark termination ("MT" above) to publish the previous mark
|
2017-03-01 13:58:22 -05:00
|
|
|
// termination's snapshot ("P" above). To do this, allocation
|
|
|
|
|
// and free events are accounted to *future* heap profile
|
|
|
|
|
// cycles ("C+n" above) and we only publish a cycle once all
|
|
|
|
|
// of the events from that cycle must be done. Specifically:
|
2017-02-27 11:36:37 -05:00
|
|
|
//
|
2017-03-01 13:58:22 -05:00
|
|
|
// Mallocs are accounted to cycle C+2.
|
|
|
|
|
// Explicit frees are accounted to cycle C+2.
|
|
|
|
|
// GC frees (done during sweeping) are accounted to cycle C+1.
|
|
|
|
|
//
|
|
|
|
|
// After mark termination, we increment the global heap
|
|
|
|
|
// profile cycle counter and accumulate the stats from cycle C
|
|
|
|
|
// into the active profile.
|
2017-03-01 11:50:38 -05:00
|
|
|
|
|
|
|
|
// active is the currently published profile. A profiling
|
|
|
|
|
// cycle can be accumulated into active once its complete.
|
|
|
|
|
active memRecordCycle
|
2014-09-01 18:51:12 -04:00
|
|
|
|
2017-03-01 13:58:22 -05:00
|
|
|
// future records the profile events we're counting for cycles
|
|
|
|
|
// that have not yet been published. This is ring buffer
|
|
|
|
|
// indexed by the global heap profile cycle C and stores
|
|
|
|
|
// cycles C, C+1, and C+2. Unlike active, these counts are
|
|
|
|
|
// only for a single cycle; they are not cumulative across
|
|
|
|
|
// cycles.
|
|
|
|
|
//
|
|
|
|
|
// We store cycle C here because there's a window between when
|
|
|
|
|
// C becomes the active cycle and when we've flushed it to
|
|
|
|
|
// active.
|
|
|
|
|
future [3]memRecordCycle
|
2017-03-01 11:50:38 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// memRecordCycle
|
|
|
|
|
type memRecordCycle struct {
|
|
|
|
|
allocs, frees uintptr
|
|
|
|
|
alloc_bytes, free_bytes uintptr
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// add accumulates b into a. It does not zero b.
|
|
|
|
|
func (a *memRecordCycle) add(b *memRecordCycle) {
|
|
|
|
|
a.allocs += b.allocs
|
|
|
|
|
a.frees += b.frees
|
|
|
|
|
a.alloc_bytes += b.alloc_bytes
|
|
|
|
|
a.free_bytes += b.free_bytes
|
2014-09-01 18:51:12 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// A blockRecord is the bucket data for a bucket of type blockProfile,
|
2016-09-22 09:48:30 -04:00
|
|
|
// which is used in blocking and mutex profiles.
|
2014-09-01 18:51:12 -04:00
|
|
|
type blockRecord struct {
|
2021-02-26 14:41:19 +01:00
|
|
|
count float64
|
2014-09-01 18:51:12 -04:00
|
|
|
cycles int64
|
|
|
|
|
}
|
2014-09-01 00:06:26 -04:00
|
|
|
|
2014-08-21 08:07:42 +02:00
|
|
|
var (
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
mbuckets atomic.UnsafePointer // *bucket, memory profile buckets
|
|
|
|
|
bbuckets atomic.UnsafePointer // *bucket, blocking profile buckets
|
|
|
|
|
xbuckets atomic.UnsafePointer // *bucket, mutex profile buckets
|
|
|
|
|
buckhash atomic.UnsafePointer // *buckhashArray
|
|
|
|
|
|
|
|
|
|
mProfCycle mProfCycleHolder
|
2014-08-21 08:07:42 +02:00
|
|
|
)
|
|
|
|
|
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
type buckhashArray [buckHashSize]atomic.UnsafePointer // *bucket
|
|
|
|
|
|
2017-03-01 13:58:22 -05:00
|
|
|
const mProfCycleWrap = uint32(len(memRecord{}.future)) * (2 << 24)
|
|
|
|
|
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
// mProfCycleHolder holds the global heap profile cycle number (wrapped at
|
|
|
|
|
// mProfCycleWrap, stored starting at bit 1), and a flag (stored at bit 0) to
|
|
|
|
|
// indicate whether future[cycle] in all buckets has been queued to flush into
|
|
|
|
|
// the active profile.
|
|
|
|
|
type mProfCycleHolder struct {
|
|
|
|
|
value atomic.Uint32
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// read returns the current cycle count.
|
|
|
|
|
func (c *mProfCycleHolder) read() (cycle uint32) {
|
|
|
|
|
v := c.value.Load()
|
|
|
|
|
cycle = v >> 1
|
|
|
|
|
return cycle
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// setFlushed sets the flushed flag. It returns the current cycle count and the
|
|
|
|
|
// previous value of the flushed flag.
|
|
|
|
|
func (c *mProfCycleHolder) setFlushed() (cycle uint32, alreadyFlushed bool) {
|
|
|
|
|
for {
|
|
|
|
|
prev := c.value.Load()
|
|
|
|
|
cycle = prev >> 1
|
|
|
|
|
alreadyFlushed = (prev & 0x1) != 0
|
|
|
|
|
next := prev | 0x1
|
|
|
|
|
if c.value.CompareAndSwap(prev, next) {
|
|
|
|
|
return cycle, alreadyFlushed
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// increment increases the cycle count by one, wrapping the value at
|
|
|
|
|
// mProfCycleWrap. It clears the flushed flag.
|
|
|
|
|
func (c *mProfCycleHolder) increment() {
|
|
|
|
|
// We explicitly wrap mProfCycle rather than depending on
|
|
|
|
|
// uint wraparound because the memRecord.future ring does not
|
|
|
|
|
// itself wrap at a power of two.
|
|
|
|
|
for {
|
|
|
|
|
prev := c.value.Load()
|
|
|
|
|
cycle := prev >> 1
|
|
|
|
|
cycle = (cycle + 1) % mProfCycleWrap
|
|
|
|
|
next := cycle << 1
|
|
|
|
|
if c.value.CompareAndSwap(prev, next) {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-09-01 18:51:12 -04:00
|
|
|
// newBucket allocates a bucket with the given type and number of stack entries.
|
|
|
|
|
func newBucket(typ bucketType, nstk int) *bucket {
|
|
|
|
|
size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0))
|
|
|
|
|
switch typ {
|
|
|
|
|
default:
|
2014-12-27 20:58:00 -08:00
|
|
|
throw("invalid profile bucket type")
|
2014-09-01 18:51:12 -04:00
|
|
|
case memProfile:
|
|
|
|
|
size += unsafe.Sizeof(memRecord{})
|
2016-09-22 09:48:30 -04:00
|
|
|
case blockProfile, mutexProfile:
|
2014-09-01 18:51:12 -04:00
|
|
|
size += unsafe.Sizeof(blockRecord{})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
b := (*bucket)(persistentalloc(size, 0, &memstats.buckhash_sys))
|
|
|
|
|
b.typ = typ
|
|
|
|
|
b.nstk = uintptr(nstk)
|
|
|
|
|
return b
|
2014-09-01 00:06:26 -04:00
|
|
|
}
|
|
|
|
|
|
2014-09-01 18:51:12 -04:00
|
|
|
// stk returns the slice in b holding the stack.
|
|
|
|
|
func (b *bucket) stk() []uintptr {
|
|
|
|
|
stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
|
|
|
|
|
return stk[:b.nstk:b.nstk]
|
|
|
|
|
}
|
2014-09-01 00:06:26 -04:00
|
|
|
|
2014-09-01 18:51:12 -04:00
|
|
|
// mp returns the memRecord associated with the memProfile bucket b.
|
|
|
|
|
func (b *bucket) mp() *memRecord {
|
|
|
|
|
if b.typ != memProfile {
|
2014-12-27 20:58:00 -08:00
|
|
|
throw("bad use of bucket.mp")
|
2014-09-01 18:51:12 -04:00
|
|
|
}
|
|
|
|
|
data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
|
|
|
|
|
return (*memRecord)(data)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// bp returns the blockRecord associated with the blockProfile bucket b.
|
|
|
|
|
func (b *bucket) bp() *blockRecord {
|
2016-09-22 09:48:30 -04:00
|
|
|
if b.typ != blockProfile && b.typ != mutexProfile {
|
2014-12-27 20:58:00 -08:00
|
|
|
throw("bad use of bucket.bp")
|
2014-09-01 18:51:12 -04:00
|
|
|
}
|
|
|
|
|
data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
|
|
|
|
|
return (*blockRecord)(data)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Return the bucket for stk[0:nstk], allocating new bucket if needed.
|
|
|
|
|
func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket {
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
bh := (*buckhashArray)(buckhash.Load())
|
|
|
|
|
if bh == nil {
|
|
|
|
|
lock(&profInsertLock)
|
|
|
|
|
// check again under the lock
|
|
|
|
|
bh = (*buckhashArray)(buckhash.Load())
|
|
|
|
|
if bh == nil {
|
|
|
|
|
bh = (*buckhashArray)(sysAlloc(unsafe.Sizeof(buckhashArray{}), &memstats.buckhash_sys))
|
|
|
|
|
if bh == nil {
|
|
|
|
|
throw("runtime: cannot allocate memory")
|
|
|
|
|
}
|
|
|
|
|
buckhash.StoreNoWB(unsafe.Pointer(bh))
|
2014-09-01 18:51:12 -04:00
|
|
|
}
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
unlock(&profInsertLock)
|
2014-09-01 00:06:26 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Hash stack.
|
2014-09-01 18:51:12 -04:00
|
|
|
var h uintptr
|
|
|
|
|
for _, pc := range stk {
|
|
|
|
|
h += pc
|
|
|
|
|
h += h << 10
|
|
|
|
|
h ^= h >> 6
|
2014-09-01 00:06:26 -04:00
|
|
|
}
|
|
|
|
|
// hash in size
|
|
|
|
|
h += size
|
2014-09-01 18:51:12 -04:00
|
|
|
h += h << 10
|
|
|
|
|
h ^= h >> 6
|
2014-09-01 00:06:26 -04:00
|
|
|
// finalize
|
2014-09-01 18:51:12 -04:00
|
|
|
h += h << 3
|
|
|
|
|
h ^= h >> 11
|
2014-09-01 00:06:26 -04:00
|
|
|
|
2014-09-01 18:51:12 -04:00
|
|
|
i := int(h % buckHashSize)
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
// first check optimistically, without the lock
|
|
|
|
|
for b := (*bucket)(bh[i].Load()); b != nil; b = b.next {
|
2014-09-01 18:51:12 -04:00
|
|
|
if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
|
2014-09-01 00:06:26 -04:00
|
|
|
return b
|
2014-09-01 18:51:12 -04:00
|
|
|
}
|
|
|
|
|
}
|
2014-09-01 00:06:26 -04:00
|
|
|
|
2014-09-01 18:51:12 -04:00
|
|
|
if !alloc {
|
2014-09-01 00:06:26 -04:00
|
|
|
return nil
|
2014-09-01 18:51:12 -04:00
|
|
|
}
|
2014-09-01 00:06:26 -04:00
|
|
|
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
lock(&profInsertLock)
|
|
|
|
|
// check again under the insertion lock
|
|
|
|
|
for b := (*bucket)(bh[i].Load()); b != nil; b = b.next {
|
|
|
|
|
if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
|
|
|
|
|
unlock(&profInsertLock)
|
|
|
|
|
return b
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-09-01 18:51:12 -04:00
|
|
|
// Create new bucket.
|
|
|
|
|
b := newBucket(typ, len(stk))
|
|
|
|
|
copy(b.stk(), stk)
|
2014-09-01 00:06:26 -04:00
|
|
|
b.hash = h
|
|
|
|
|
b.size = size
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
|
|
|
|
|
var allnext *atomic.UnsafePointer
|
2014-09-01 18:51:12 -04:00
|
|
|
if typ == memProfile {
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
allnext = &mbuckets
|
2016-09-22 09:48:30 -04:00
|
|
|
} else if typ == mutexProfile {
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
allnext = &xbuckets
|
2014-09-01 00:06:26 -04:00
|
|
|
} else {
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
allnext = &bbuckets
|
2014-09-01 00:06:26 -04:00
|
|
|
}
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
|
|
|
|
|
b.next = (*bucket)(bh[i].Load())
|
|
|
|
|
b.allnext = (*bucket)(allnext.Load())
|
|
|
|
|
|
|
|
|
|
bh[i].StoreNoWB(unsafe.Pointer(b))
|
|
|
|
|
allnext.StoreNoWB(unsafe.Pointer(b))
|
|
|
|
|
|
|
|
|
|
unlock(&profInsertLock)
|
2014-09-01 00:06:26 -04:00
|
|
|
return b
|
|
|
|
|
}
|
|
|
|
|
|
2014-09-01 18:51:12 -04:00
|
|
|
func eqslice(x, y []uintptr) bool {
|
|
|
|
|
if len(x) != len(y) {
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
for i, xi := range x {
|
|
|
|
|
if xi != y[i] {
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return true
|
|
|
|
|
}
|
2014-09-01 00:06:26 -04:00
|
|
|
|
2017-03-01 13:58:22 -05:00
|
|
|
// mProf_NextCycle publishes the next heap profile cycle and creates a
|
|
|
|
|
// fresh heap profile cycle. This operation is fast and can be done
|
|
|
|
|
// during STW. The caller must call mProf_Flush before calling
|
|
|
|
|
// mProf_NextCycle again.
|
|
|
|
|
//
|
|
|
|
|
// This is called by mark termination during STW so allocations and
|
|
|
|
|
// frees after the world is started again count towards a new heap
|
|
|
|
|
// profiling cycle.
|
|
|
|
|
func mProf_NextCycle() {
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
mProfCycle.increment()
|
2014-09-01 00:06:26 -04:00
|
|
|
}
|
|
|
|
|
|
2017-03-01 13:58:22 -05:00
|
|
|
// mProf_Flush flushes the events from the current heap profiling
|
|
|
|
|
// cycle into the active profile. After this it is safe to start a new
|
|
|
|
|
// heap profiling cycle with mProf_NextCycle.
|
|
|
|
|
//
|
|
|
|
|
// This is called by GC after mark termination starts the world. In
|
|
|
|
|
// contrast with mProf_NextCycle, this is somewhat expensive, but safe
|
|
|
|
|
// to do concurrently.
|
|
|
|
|
func mProf_Flush() {
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
cycle, alreadyFlushed := mProfCycle.setFlushed()
|
|
|
|
|
if alreadyFlushed {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
index := cycle % uint32(len(memRecord{}.future))
|
|
|
|
|
lock(&profMemActiveLock)
|
|
|
|
|
lock(&profMemFutureLock[index])
|
|
|
|
|
mProf_FlushLocked(index)
|
|
|
|
|
unlock(&profMemFutureLock[index])
|
|
|
|
|
unlock(&profMemActiveLock)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// mProf_FlushLocked flushes the events from the heap profiling cycle at index
|
|
|
|
|
// into the active profile. The caller must hold the lock for the active profile
|
|
|
|
|
// (profMemActiveLock) and for the profiling cycle at index
|
|
|
|
|
// (profMemFutureLock[index]).
|
|
|
|
|
func mProf_FlushLocked(index uint32) {
|
|
|
|
|
assertLockHeld(&profMemActiveLock)
|
|
|
|
|
assertLockHeld(&profMemFutureLock[index])
|
|
|
|
|
head := (*bucket)(mbuckets.Load())
|
|
|
|
|
for b := head; b != nil; b = b.allnext {
|
2017-03-01 13:58:22 -05:00
|
|
|
mp := b.mp()
|
|
|
|
|
|
|
|
|
|
// Flush cycle C into the published profile and clear
|
|
|
|
|
// it for reuse.
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
mpc := &mp.future[index]
|
2017-03-01 13:58:22 -05:00
|
|
|
mp.active.add(mpc)
|
|
|
|
|
*mpc = memRecordCycle{}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-02-23 21:50:19 -05:00
|
|
|
// mProf_PostSweep records that all sweep frees for this GC cycle have
|
|
|
|
|
// completed. This has the effect of publishing the heap profile
|
|
|
|
|
// snapshot as of the last mark termination without advancing the heap
|
|
|
|
|
// profile cycle.
|
|
|
|
|
func mProf_PostSweep() {
|
|
|
|
|
// Flush cycle C+1 to the active profile so everything as of
|
|
|
|
|
// the last mark termination becomes visible. *Don't* advance
|
|
|
|
|
// the cycle, since we're still accumulating allocs in cycle
|
|
|
|
|
// C+2, which have to become C+1 in the next mark termination
|
|
|
|
|
// and so on.
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
cycle := mProfCycle.read() + 1
|
|
|
|
|
|
|
|
|
|
index := cycle % uint32(len(memRecord{}.future))
|
|
|
|
|
lock(&profMemActiveLock)
|
|
|
|
|
lock(&profMemFutureLock[index])
|
|
|
|
|
mProf_FlushLocked(index)
|
|
|
|
|
unlock(&profMemFutureLock[index])
|
|
|
|
|
unlock(&profMemActiveLock)
|
2017-02-23 21:50:19 -05:00
|
|
|
}
|
|
|
|
|
|
2014-09-01 00:06:26 -04:00
|
|
|
// Called by malloc to record a profiled block.
|
2014-09-01 18:51:12 -04:00
|
|
|
func mProf_Malloc(p unsafe.Pointer, size uintptr) {
|
|
|
|
|
var stk [maxStack]uintptr
|
2015-02-25 14:41:21 +09:00
|
|
|
nstk := callers(4, stk[:])
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
|
|
|
|
|
index := (mProfCycle.read() + 2) % uint32(len(memRecord{}.future))
|
|
|
|
|
|
2014-09-01 18:51:12 -04:00
|
|
|
b := stkbucket(memProfile, size, stk[:nstk], true)
|
|
|
|
|
mp := b.mp()
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
mpc := &mp.future[index]
|
|
|
|
|
|
|
|
|
|
lock(&profMemFutureLock[index])
|
2017-03-01 13:58:22 -05:00
|
|
|
mpc.allocs++
|
|
|
|
|
mpc.alloc_bytes += size
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
unlock(&profMemFutureLock[index])
|
2014-09-01 00:06:26 -04:00
|
|
|
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
// Setprofilebucket locks a bunch of other mutexes, so we call it outside of
|
|
|
|
|
// the profiler locks. This reduces potential contention and chances of
|
|
|
|
|
// deadlocks. Since the object must be alive during the call to
|
|
|
|
|
// mProf_Malloc, it's fine to do this non-atomically.
|
[dev.cc] runtime: delete scalararg, ptrarg; rename onM to systemstack
Scalararg and ptrarg are not "signal safe".
Go code filling them out can be interrupted by a signal,
and then the signal handler runs, and if it also ends up
in Go code that uses scalararg or ptrarg, now the old
values have been smashed.
For the pieces of code that do need to run in a signal handler,
we introduced onM_signalok, which is really just onM
except that the _signalok is meant to convey that the caller
asserts that scalarg and ptrarg will be restored to their old
values after the call (instead of the usual behavior, zeroing them).
Scalararg and ptrarg are also untyped and therefore error-prone.
Go code can always pass a closure instead of using scalararg
and ptrarg; they were only really necessary for C code.
And there's no more C code.
For all these reasons, delete scalararg and ptrarg, converting
the few remaining references to use closures.
Once those are gone, there is no need for a distinction between
onM and onM_signalok, so replace both with a single function
equivalent to the current onM_signalok (that is, it can be called
on any of the curg, g0, and gsignal stacks).
The name onM and the phrase 'm stack' are misnomers,
because on most system an M has two system stacks:
the main thread stack and the signal handling stack.
Correct the misnomer by naming the replacement function systemstack.
Fix a few references to "M stack" in code.
The main motivation for this change is to eliminate scalararg/ptrarg.
Rick and I have already seen them cause problems because
the calling sequence m.ptrarg[0] = p is a heap pointer assignment,
so it gets a write barrier. The write barrier also uses onM, so it has
all the same problems as if it were being invoked by a signal handler.
We worked around this by saving and restoring the old values
and by calling onM_signalok, but there's no point in keeping this nice
home for bugs around any longer.
This CL also changes funcline to return the file name as a result
instead of filling in a passed-in *string. (The *string signature is
left over from when the code was written in and called from C.)
That's arguably an unrelated change, except that once I had done
the ptrarg/scalararg/onM cleanup I started getting false positives
about the *string argument escaping (not allowed in package runtime).
The compiler is wrong, but the easiest fix is to write the code like
Go code instead of like C code. I am a bit worried that the compiler
is wrong because of some use of uninitialized memory in the escape
analysis. If that's the reason, it will go away when we convert the
compiler to Go. (And if not, we'll debug it the next time.)
LGTM=khr
R=r, khr
CC=austin, golang-codereviews, iant, rlh
https://golang.org/cl/174950043
2014-11-12 14:54:31 -05:00
|
|
|
systemstack(func() {
|
2014-11-11 17:05:02 -05:00
|
|
|
setprofilebucket(p, b)
|
|
|
|
|
})
|
2014-09-04 00:54:06 -04:00
|
|
|
}
|
2014-09-01 18:51:12 -04:00
|
|
|
|
|
|
|
|
// Called when freeing a profiled block.
|
2015-11-03 20:00:21 +01:00
|
|
|
func mProf_Free(b *bucket, size uintptr) {
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
index := (mProfCycle.read() + 1) % uint32(len(memRecord{}.future))
|
|
|
|
|
|
2014-09-01 18:51:12 -04:00
|
|
|
mp := b.mp()
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
mpc := &mp.future[index]
|
|
|
|
|
|
|
|
|
|
lock(&profMemFutureLock[index])
|
2017-03-01 13:58:22 -05:00
|
|
|
mpc.frees++
|
|
|
|
|
mpc.free_bytes += size
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
unlock(&profMemFutureLock[index])
|
2014-09-01 00:06:26 -04:00
|
|
|
}
|
|
|
|
|
|
2014-09-01 18:51:12 -04:00
|
|
|
var blockprofilerate uint64 // in CPU ticks
|
2014-09-01 00:06:26 -04:00
|
|
|
|
2014-09-01 18:51:12 -04:00
|
|
|
// SetBlockProfileRate controls the fraction of goroutine blocking events
|
2016-03-01 23:21:55 +00:00
|
|
|
// that are reported in the blocking profile. The profiler aims to sample
|
2014-09-01 18:51:12 -04:00
|
|
|
// an average of one blocking event per rate nanoseconds spent blocked.
|
|
|
|
|
//
|
|
|
|
|
// To include every blocking event in the profile, pass rate = 1.
|
|
|
|
|
// To turn off profiling entirely, pass rate <= 0.
|
|
|
|
|
func SetBlockProfileRate(rate int) {
|
|
|
|
|
var r int64
|
|
|
|
|
if rate <= 0 {
|
|
|
|
|
r = 0 // disable profiling
|
2014-10-20 15:48:42 -07:00
|
|
|
} else if rate == 1 {
|
|
|
|
|
r = 1 // profile everything
|
2014-09-01 18:51:12 -04:00
|
|
|
} else {
|
2014-09-01 00:06:26 -04:00
|
|
|
// convert ns to cycles, use float64 to prevent overflow during multiplication
|
2014-09-01 18:51:12 -04:00
|
|
|
r = int64(float64(rate) * float64(tickspersecond()) / (1000 * 1000 * 1000))
|
|
|
|
|
if r == 0 {
|
2014-09-01 00:06:26 -04:00
|
|
|
r = 1
|
2014-09-01 18:51:12 -04:00
|
|
|
}
|
2014-09-01 00:06:26 -04:00
|
|
|
}
|
2014-09-01 18:51:12 -04:00
|
|
|
|
2015-11-02 14:09:24 -05:00
|
|
|
atomic.Store64(&blockprofilerate, uint64(r))
|
2014-09-01 00:06:26 -04:00
|
|
|
}
|
|
|
|
|
|
2014-09-01 18:51:12 -04:00
|
|
|
func blockevent(cycles int64, skip int) {
|
|
|
|
|
if cycles <= 0 {
|
2014-10-20 15:48:42 -07:00
|
|
|
cycles = 1
|
2014-09-01 18:51:12 -04:00
|
|
|
}
|
2021-02-26 14:41:19 +01:00
|
|
|
|
|
|
|
|
rate := int64(atomic.Load64(&blockprofilerate))
|
|
|
|
|
if blocksampled(cycles, rate) {
|
|
|
|
|
saveblockevent(cycles, rate, skip+1, blockProfile)
|
2016-09-22 09:48:30 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-02-26 14:41:19 +01:00
|
|
|
// blocksampled returns true for all events where cycles >= rate. Shorter
|
|
|
|
|
// events have a cycles/rate random chance of returning true.
|
|
|
|
|
func blocksampled(cycles, rate int64) bool {
|
2016-06-28 09:22:46 -07:00
|
|
|
if rate <= 0 || (rate > cycles && int64(fastrand())%rate > cycles) {
|
2016-09-22 09:48:30 -04:00
|
|
|
return false
|
2014-09-01 18:51:12 -04:00
|
|
|
}
|
2016-09-22 09:48:30 -04:00
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
|
2021-02-26 14:41:19 +01:00
|
|
|
func saveblockevent(cycles, rate int64, skip int, which bucketType) {
|
2014-09-01 18:51:12 -04:00
|
|
|
gp := getg()
|
|
|
|
|
var nstk int
|
|
|
|
|
var stk [maxStack]uintptr
|
|
|
|
|
if gp.m.curg == nil || gp.m.curg == gp {
|
2015-02-25 14:41:21 +09:00
|
|
|
nstk = callers(skip, stk[:])
|
2014-09-01 18:51:12 -04:00
|
|
|
} else {
|
2015-02-25 14:41:21 +09:00
|
|
|
nstk = gcallers(gp.m.curg, skip, stk[:])
|
2014-09-01 18:51:12 -04:00
|
|
|
}
|
2016-09-22 09:48:30 -04:00
|
|
|
b := stkbucket(which, 0, stk[:nstk], true)
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
bp := b.bp()
|
2021-02-26 14:41:19 +01:00
|
|
|
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
lock(&profBlockLock)
|
2022-10-12 10:05:51 -04:00
|
|
|
// We want to up-scale the count and cycles according to the
|
|
|
|
|
// probability that the event was sampled. For block profile events,
|
|
|
|
|
// the sample probability is 1 if cycles >= rate, and cycles / rate
|
|
|
|
|
// otherwise. For mutex profile events, the sample probability is 1 / rate.
|
|
|
|
|
// We scale the events by 1 / (probability the event was sampled).
|
2021-02-26 14:41:19 +01:00
|
|
|
if which == blockProfile && cycles < rate {
|
|
|
|
|
// Remove sampling bias, see discussion on http://golang.org/cl/299991.
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
bp.count += float64(rate) / float64(cycles)
|
|
|
|
|
bp.cycles += rate
|
2022-10-12 10:05:51 -04:00
|
|
|
} else if which == mutexProfile {
|
|
|
|
|
bp.count += float64(rate)
|
|
|
|
|
bp.cycles += rate * cycles
|
2021-02-26 14:41:19 +01:00
|
|
|
} else {
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
bp.count++
|
|
|
|
|
bp.cycles += cycles
|
2021-02-26 14:41:19 +01:00
|
|
|
}
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
unlock(&profBlockLock)
|
2014-09-01 00:06:26 -04:00
|
|
|
}
|
|
|
|
|
|
2016-09-22 09:48:30 -04:00
|
|
|
var mutexprofilerate uint64 // fraction sampled
|
|
|
|
|
|
|
|
|
|
// SetMutexProfileFraction controls the fraction of mutex contention events
|
|
|
|
|
// that are reported in the mutex profile. On average 1/rate events are
|
|
|
|
|
// reported. The previous rate is returned.
|
|
|
|
|
//
|
|
|
|
|
// To turn off profiling entirely, pass rate 0.
|
2018-04-19 12:24:53 -04:00
|
|
|
// To just read the current rate, pass rate < 0.
|
2016-09-22 09:48:30 -04:00
|
|
|
// (For n>1 the details of sampling may change.)
|
|
|
|
|
func SetMutexProfileFraction(rate int) int {
|
|
|
|
|
if rate < 0 {
|
|
|
|
|
return int(mutexprofilerate)
|
|
|
|
|
}
|
|
|
|
|
old := mutexprofilerate
|
|
|
|
|
atomic.Store64(&mutexprofilerate, uint64(rate))
|
|
|
|
|
return int(old)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:linkname mutexevent sync.event
|
|
|
|
|
func mutexevent(cycles int64, skip int) {
|
2016-10-28 15:12:18 -04:00
|
|
|
if cycles < 0 {
|
|
|
|
|
cycles = 0
|
|
|
|
|
}
|
2016-09-22 09:48:30 -04:00
|
|
|
rate := int64(atomic.Load64(&mutexprofilerate))
|
|
|
|
|
if rate > 0 && int64(fastrand())%rate == 0 {
|
2021-02-26 14:41:19 +01:00
|
|
|
saveblockevent(cycles, rate, skip+1, mutexProfile)
|
2016-09-22 09:48:30 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-09-01 00:06:26 -04:00
|
|
|
// Go interface to profile data.
|
|
|
|
|
|
2014-09-01 18:51:12 -04:00
|
|
|
// A StackRecord describes a single execution stack.
|
|
|
|
|
type StackRecord struct {
|
|
|
|
|
Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Stack returns the stack trace associated with the record,
|
|
|
|
|
// a prefix of r.Stack0.
|
|
|
|
|
func (r *StackRecord) Stack() []uintptr {
|
|
|
|
|
for i, v := range r.Stack0 {
|
|
|
|
|
if v == 0 {
|
|
|
|
|
return r.Stack0[0:i]
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return r.Stack0[0:]
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// MemProfileRate controls the fraction of memory allocations
|
|
|
|
|
// that are recorded and reported in the memory profile.
|
|
|
|
|
// The profiler aims to sample an average of
|
|
|
|
|
// one allocation per MemProfileRate bytes allocated.
|
|
|
|
|
//
|
|
|
|
|
// To include every allocated block in the profile, set MemProfileRate to 1.
|
|
|
|
|
// To turn off profiling entirely, set MemProfileRate to 0.
|
|
|
|
|
//
|
|
|
|
|
// The tools that process the memory profiles assume that the
|
|
|
|
|
// profile rate is constant across the lifetime of the program
|
2016-03-01 23:21:55 +00:00
|
|
|
// and equal to the current value. Programs that change the
|
2014-09-01 18:51:12 -04:00
|
|
|
// memory profiling rate should do so just once, as early as
|
|
|
|
|
// possible in the execution of the program (for example,
|
|
|
|
|
// at the beginning of main).
|
2022-09-16 18:56:48 +08:00
|
|
|
var MemProfileRate int = 512 * 1024
|
2021-03-07 20:52:48 -08:00
|
|
|
|
|
|
|
|
// disableMemoryProfiling is set by the linker if runtime.MemProfile
|
|
|
|
|
// is not used and the link type guarantees nobody else could use it
|
|
|
|
|
// elsewhere.
|
|
|
|
|
var disableMemoryProfiling bool
|
2014-09-01 18:51:12 -04:00
|
|
|
|
|
|
|
|
// A MemProfileRecord describes the live objects allocated
|
|
|
|
|
// by a particular call sequence (stack trace).
|
|
|
|
|
type MemProfileRecord struct {
|
|
|
|
|
AllocBytes, FreeBytes int64 // number of bytes allocated, freed
|
|
|
|
|
AllocObjects, FreeObjects int64 // number of objects allocated, freed
|
|
|
|
|
Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes).
|
|
|
|
|
func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes }
|
|
|
|
|
|
|
|
|
|
// InUseObjects returns the number of objects in use (AllocObjects - FreeObjects).
|
|
|
|
|
func (r *MemProfileRecord) InUseObjects() int64 {
|
|
|
|
|
return r.AllocObjects - r.FreeObjects
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Stack returns the stack trace associated with the record,
|
|
|
|
|
// a prefix of r.Stack0.
|
|
|
|
|
func (r *MemProfileRecord) Stack() []uintptr {
|
|
|
|
|
for i, v := range r.Stack0 {
|
|
|
|
|
if v == 0 {
|
|
|
|
|
return r.Stack0[0:i]
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return r.Stack0[0:]
|
|
|
|
|
}
|
|
|
|
|
|
2015-11-12 17:33:15 -05:00
|
|
|
// MemProfile returns a profile of memory allocated and freed per allocation
|
|
|
|
|
// site.
|
|
|
|
|
//
|
2014-08-21 08:07:42 +02:00
|
|
|
// MemProfile returns n, the number of records in the current memory profile.
|
|
|
|
|
// If len(p) >= n, MemProfile copies the profile into p and returns n, true.
|
|
|
|
|
// If len(p) < n, MemProfile does not change p and returns n, false.
|
|
|
|
|
//
|
|
|
|
|
// If inuseZero is true, the profile includes allocation records
|
|
|
|
|
// where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes.
|
|
|
|
|
// These are sites where memory was allocated, but it has all
|
|
|
|
|
// been released back to the runtime.
|
|
|
|
|
//
|
2015-11-12 17:33:15 -05:00
|
|
|
// The returned profile may be up to two garbage collection cycles old.
|
|
|
|
|
// This is to avoid skewing the profile toward allocations; because
|
|
|
|
|
// allocations happen in real time but frees are delayed until the garbage
|
|
|
|
|
// collector performs sweeping, the profile only accounts for allocations
|
|
|
|
|
// that have had a chance to be freed by the garbage collector.
|
|
|
|
|
//
|
2014-08-21 08:07:42 +02:00
|
|
|
// Most clients should use the runtime/pprof package or
|
|
|
|
|
// the testing package's -test.memprofile flag instead
|
|
|
|
|
// of calling MemProfile directly.
|
|
|
|
|
func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
cycle := mProfCycle.read()
|
2017-03-01 13:58:22 -05:00
|
|
|
// If we're between mProf_NextCycle and mProf_Flush, take care
|
|
|
|
|
// of flushing to the active profile so we only have to look
|
|
|
|
|
// at the active profile below.
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
index := cycle % uint32(len(memRecord{}.future))
|
|
|
|
|
lock(&profMemActiveLock)
|
|
|
|
|
lock(&profMemFutureLock[index])
|
|
|
|
|
mProf_FlushLocked(index)
|
|
|
|
|
unlock(&profMemFutureLock[index])
|
2014-08-21 08:07:42 +02:00
|
|
|
clear := true
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
head := (*bucket)(mbuckets.Load())
|
|
|
|
|
for b := head; b != nil; b = b.allnext {
|
2014-09-01 18:51:12 -04:00
|
|
|
mp := b.mp()
|
2017-03-01 11:50:38 -05:00
|
|
|
if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
|
2014-08-21 08:07:42 +02:00
|
|
|
n++
|
|
|
|
|
}
|
2017-03-01 11:50:38 -05:00
|
|
|
if mp.active.allocs != 0 || mp.active.frees != 0 {
|
2014-08-21 08:07:42 +02:00
|
|
|
clear = false
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if clear {
|
|
|
|
|
// Absolutely no data, suggesting that a garbage collection
|
|
|
|
|
// has not yet happened. In order to allow profiling when
|
|
|
|
|
// garbage collection is disabled from the beginning of execution,
|
2017-03-01 13:58:22 -05:00
|
|
|
// accumulate all of the cycles, and recount buckets.
|
2014-08-21 08:07:42 +02:00
|
|
|
n = 0
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
for b := head; b != nil; b = b.allnext {
|
2014-09-01 18:51:12 -04:00
|
|
|
mp := b.mp()
|
2017-03-01 13:58:22 -05:00
|
|
|
for c := range mp.future {
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
lock(&profMemFutureLock[c])
|
2017-03-01 13:58:22 -05:00
|
|
|
mp.active.add(&mp.future[c])
|
|
|
|
|
mp.future[c] = memRecordCycle{}
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
unlock(&profMemFutureLock[c])
|
2017-03-01 13:58:22 -05:00
|
|
|
}
|
2017-03-01 11:50:38 -05:00
|
|
|
if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
|
2014-08-21 08:07:42 +02:00
|
|
|
n++
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if n <= len(p) {
|
|
|
|
|
ok = true
|
|
|
|
|
idx := 0
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
for b := head; b != nil; b = b.allnext {
|
2014-09-01 18:51:12 -04:00
|
|
|
mp := b.mp()
|
2017-03-01 11:50:38 -05:00
|
|
|
if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
|
2014-08-21 08:07:42 +02:00
|
|
|
record(&p[idx], b)
|
|
|
|
|
idx++
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
unlock(&profMemActiveLock)
|
2014-08-21 08:07:42 +02:00
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Write b's data to r.
|
|
|
|
|
func record(r *MemProfileRecord, b *bucket) {
|
2014-09-01 18:51:12 -04:00
|
|
|
mp := b.mp()
|
2017-03-01 11:50:38 -05:00
|
|
|
r.AllocBytes = int64(mp.active.alloc_bytes)
|
|
|
|
|
r.FreeBytes = int64(mp.active.free_bytes)
|
|
|
|
|
r.AllocObjects = int64(mp.active.allocs)
|
|
|
|
|
r.FreeObjects = int64(mp.active.frees)
|
2016-09-21 09:44:40 -07:00
|
|
|
if raceenabled {
|
2021-05-21 13:37:19 -04:00
|
|
|
racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(MemProfile))
|
2016-09-21 09:44:40 -07:00
|
|
|
}
|
|
|
|
|
if msanenabled {
|
|
|
|
|
msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
|
|
|
|
|
}
|
2021-01-05 17:52:43 +08:00
|
|
|
if asanenabled {
|
|
|
|
|
asanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
|
|
|
|
|
}
|
2014-09-01 18:51:12 -04:00
|
|
|
copy(r.Stack0[:], b.stk())
|
|
|
|
|
for i := int(b.nstk); i < len(r.Stack0); i++ {
|
2014-08-21 08:07:42 +02:00
|
|
|
r.Stack0[i] = 0
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-09-01 18:51:12 -04:00
|
|
|
func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) {
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
lock(&profMemActiveLock)
|
|
|
|
|
head := (*bucket)(mbuckets.Load())
|
|
|
|
|
for b := head; b != nil; b = b.allnext {
|
2014-09-01 18:51:12 -04:00
|
|
|
mp := b.mp()
|
2017-03-01 11:50:38 -05:00
|
|
|
fn(b, b.nstk, &b.stk()[0], b.size, mp.active.allocs, mp.active.frees)
|
2014-09-01 00:06:26 -04:00
|
|
|
}
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
unlock(&profMemActiveLock)
|
2014-09-01 00:06:26 -04:00
|
|
|
}
|
2014-09-01 18:51:12 -04:00
|
|
|
|
|
|
|
|
// BlockProfileRecord describes blocking events originated
|
|
|
|
|
// at a particular call sequence (stack trace).
|
|
|
|
|
type BlockProfileRecord struct {
|
|
|
|
|
Count int64
|
|
|
|
|
Cycles int64
|
|
|
|
|
StackRecord
|
|
|
|
|
}
|
2014-09-01 00:06:26 -04:00
|
|
|
|
2014-08-21 08:07:42 +02:00
|
|
|
// BlockProfile returns n, the number of records in the current blocking profile.
|
|
|
|
|
// If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
|
|
|
|
|
// If len(p) < n, BlockProfile does not change p and returns n, false.
|
|
|
|
|
//
|
|
|
|
|
// Most clients should use the runtime/pprof package or
|
|
|
|
|
// the testing package's -test.blockprofile flag instead
|
|
|
|
|
// of calling BlockProfile directly.
|
|
|
|
|
func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
lock(&profBlockLock)
|
|
|
|
|
head := (*bucket)(bbuckets.Load())
|
|
|
|
|
for b := head; b != nil; b = b.allnext {
|
2014-08-21 08:07:42 +02:00
|
|
|
n++
|
|
|
|
|
}
|
|
|
|
|
if n <= len(p) {
|
|
|
|
|
ok = true
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
for b := head; b != nil; b = b.allnext {
|
2014-09-01 18:51:12 -04:00
|
|
|
bp := b.bp()
|
|
|
|
|
r := &p[0]
|
2021-02-26 14:41:19 +01:00
|
|
|
r.Count = int64(bp.count)
|
|
|
|
|
// Prevent callers from having to worry about division by zero errors.
|
|
|
|
|
// See discussion on http://golang.org/cl/299991.
|
|
|
|
|
if r.Count == 0 {
|
|
|
|
|
r.Count = 1
|
|
|
|
|
}
|
2016-02-29 15:01:00 -08:00
|
|
|
r.Cycles = bp.cycles
|
2016-09-21 09:44:40 -07:00
|
|
|
if raceenabled {
|
2021-05-21 13:37:19 -04:00
|
|
|
racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(BlockProfile))
|
2016-09-21 09:44:40 -07:00
|
|
|
}
|
|
|
|
|
if msanenabled {
|
|
|
|
|
msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
|
|
|
|
|
}
|
2021-01-05 17:52:43 +08:00
|
|
|
if asanenabled {
|
|
|
|
|
asanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
|
|
|
|
|
}
|
2014-09-01 18:51:12 -04:00
|
|
|
i := copy(r.Stack0[:], b.stk())
|
|
|
|
|
for ; i < len(r.Stack0); i++ {
|
|
|
|
|
r.Stack0[i] = 0
|
2014-08-21 08:07:42 +02:00
|
|
|
}
|
2014-09-01 18:51:12 -04:00
|
|
|
p = p[1:]
|
2014-08-21 08:07:42 +02:00
|
|
|
}
|
|
|
|
|
}
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
unlock(&profBlockLock)
|
2014-08-21 08:07:42 +02:00
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2016-09-22 09:48:30 -04:00
|
|
|
// MutexProfile returns n, the number of records in the current mutex profile.
|
|
|
|
|
// If len(p) >= n, MutexProfile copies the profile into p and returns n, true.
|
|
|
|
|
// Otherwise, MutexProfile does not change p, and returns n, false.
|
|
|
|
|
//
|
|
|
|
|
// Most clients should use the runtime/pprof package
|
|
|
|
|
// instead of calling MutexProfile directly.
|
|
|
|
|
func MutexProfile(p []BlockProfileRecord) (n int, ok bool) {
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
lock(&profBlockLock)
|
|
|
|
|
head := (*bucket)(xbuckets.Load())
|
|
|
|
|
for b := head; b != nil; b = b.allnext {
|
2016-09-22 09:48:30 -04:00
|
|
|
n++
|
|
|
|
|
}
|
|
|
|
|
if n <= len(p) {
|
|
|
|
|
ok = true
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
for b := head; b != nil; b = b.allnext {
|
2016-09-22 09:48:30 -04:00
|
|
|
bp := b.bp()
|
|
|
|
|
r := &p[0]
|
|
|
|
|
r.Count = int64(bp.count)
|
|
|
|
|
r.Cycles = bp.cycles
|
|
|
|
|
i := copy(r.Stack0[:], b.stk())
|
|
|
|
|
for ; i < len(r.Stack0); i++ {
|
|
|
|
|
r.Stack0[i] = 0
|
|
|
|
|
}
|
|
|
|
|
p = p[1:]
|
|
|
|
|
}
|
|
|
|
|
}
|
runtime: split mprof locks
The profiles for memory allocations, sync.Mutex contention, and general
blocking store their data in a shared hash table. The bookkeeping work
at the end of a garbage collection cycle involves maintenance on each
memory allocation record. Previously, a single lock guarded access to
the hash table and the contents of all records. When a program has
allocated memory at a large number of unique call stacks, the
maintenance following every garbage collection can hold that lock for
several milliseconds. That can prevent progress on all other goroutines
by delaying acquirep's call to mcache.prepareForSweep, which needs the
lock in mProf_Free to report when a profiled allocation is no longer in
use. With no user goroutines making progress, it is in effect a
multi-millisecond GC-related stop-the-world pause.
Split the lock so the call to mProf_Flush no longer delays each P's call
to mProf_Free: mProf_Free uses a lock on the memory records' N+1 cycle,
and mProf_Flush uses locks on the memory records' accumulator and their
N cycle. mProf_Malloc also no longer competes with mProf_Flush, as it
uses a lock on the memory records' N+2 cycle. The profiles for
sync.Mutex contention and general blocking now share a separate lock,
and another lock guards insertions to the shared hash table (uncommon in
the steady-state). Consumers of each type of profile take the matching
accumulator lock, so will observe consistent count and magnitude values
for each record.
For #45894
Change-Id: I615ff80618d10e71025423daa64b0b7f9dc57daa
Reviewed-on: https://go-review.googlesource.com/c/go/+/399956
Reviewed-by: Carlos Amedee <carlos@golang.org>
Run-TryBot: Rhys Hiltner <rhys@justin.tv>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 12:56:49 -07:00
|
|
|
unlock(&profBlockLock)
|
2016-09-22 09:48:30 -04:00
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2014-09-01 00:06:26 -04:00
|
|
|
// ThreadCreateProfile returns n, the number of records in the thread creation profile.
|
|
|
|
|
// If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true.
|
|
|
|
|
// If len(p) < n, ThreadCreateProfile does not change p and returns n, false.
|
|
|
|
|
//
|
|
|
|
|
// Most clients should use the runtime/pprof package instead
|
|
|
|
|
// of calling ThreadCreateProfile directly.
|
|
|
|
|
func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
|
2015-11-02 14:09:24 -05:00
|
|
|
first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
|
2014-09-01 00:06:26 -04:00
|
|
|
for mp := first; mp != nil; mp = mp.alllink {
|
|
|
|
|
n++
|
|
|
|
|
}
|
|
|
|
|
if n <= len(p) {
|
|
|
|
|
ok = true
|
|
|
|
|
i := 0
|
|
|
|
|
for mp := first; mp != nil; mp = mp.alllink {
|
2016-02-22 11:27:32 -08:00
|
|
|
p[i].Stack0 = mp.createstack
|
2014-09-01 00:06:26 -04:00
|
|
|
i++
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2019-08-04 15:14:48 -04:00
|
|
|
//go:linkname runtime_goroutineProfileWithLabels runtime/pprof.runtime_goroutineProfileWithLabels
|
|
|
|
|
func runtime_goroutineProfileWithLabels(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
|
|
|
|
|
return goroutineProfileWithLabels(p, labels)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// labels may be nil. If labels is non-nil, it must have the same length as p.
|
|
|
|
|
func goroutineProfileWithLabels(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
|
|
|
|
|
if labels != nil && len(labels) != len(p) {
|
|
|
|
|
labels = nil
|
|
|
|
|
}
|
2022-02-18 10:56:16 -08:00
|
|
|
|
2023-01-26 14:49:03 -08:00
|
|
|
return goroutineProfileWithLabelsConcurrent(p, labels)
|
2022-02-18 10:56:16 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var goroutineProfile = struct {
|
|
|
|
|
sema uint32
|
|
|
|
|
active bool
|
|
|
|
|
offset atomic.Int64
|
|
|
|
|
records []StackRecord
|
|
|
|
|
labels []unsafe.Pointer
|
|
|
|
|
}{
|
|
|
|
|
sema: 1,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// goroutineProfileState indicates the status of a goroutine's stack for the
|
|
|
|
|
// current in-progress goroutine profile. Goroutines' stacks are initially
|
|
|
|
|
// "Absent" from the profile, and end up "Satisfied" by the time the profile is
|
|
|
|
|
// complete. While a goroutine's stack is being captured, its
|
|
|
|
|
// goroutineProfileState will be "InProgress" and it will not be able to run
|
|
|
|
|
// until the capture completes and the state moves to "Satisfied".
|
|
|
|
|
//
|
|
|
|
|
// Some goroutines (the finalizer goroutine, which at various times can be
|
|
|
|
|
// either a "system" or a "user" goroutine, and the goroutine that is
|
|
|
|
|
// coordinating the profile, any goroutines created during the profile) move
|
|
|
|
|
// directly to the "Satisfied" state.
|
|
|
|
|
type goroutineProfileState uint32
|
|
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
goroutineProfileAbsent goroutineProfileState = iota
|
|
|
|
|
goroutineProfileInProgress
|
|
|
|
|
goroutineProfileSatisfied
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
type goroutineProfileStateHolder atomic.Uint32
|
|
|
|
|
|
|
|
|
|
func (p *goroutineProfileStateHolder) Load() goroutineProfileState {
|
|
|
|
|
return goroutineProfileState((*atomic.Uint32)(p).Load())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (p *goroutineProfileStateHolder) Store(value goroutineProfileState) {
|
|
|
|
|
(*atomic.Uint32)(p).Store(uint32(value))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (p *goroutineProfileStateHolder) CompareAndSwap(old, new goroutineProfileState) bool {
|
|
|
|
|
return (*atomic.Uint32)(p).CompareAndSwap(uint32(old), uint32(new))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func goroutineProfileWithLabelsConcurrent(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
|
|
|
|
|
semacquire(&goroutineProfile.sema)
|
|
|
|
|
|
|
|
|
|
ourg := getg()
|
|
|
|
|
|
runtime: emit STW events for all pauses, not just those for the GC
Currently STW events are only emitted for GC STWs. There's little reason
why the trace can't contain events for every STW: they're rare so don't
take up much space in the trace, yet being able to see when the world
was stopped is often critical to debugging certain latency issues,
especially when they stem from user-level APIs.
This change adds new "kinds" to the EvGCSTWStart event, renames the
GCSTW events to just "STW," and lets the parser deal with unknown STW
kinds for future backwards compatibility.
But, this change must break trace compatibility, so it bumps the trace
version to Go 1.21.
This change also includes a small cleanup in the trace command, which
previously checked for STW events when deciding whether user tasks
overlapped with a GC. Looking at the source, I don't see a way for STW
events to ever enter the stream that that code looks at, so that
condition has been deleted.
Change-Id: I9a5dc144092c53e92eb6950e9a5504a790ac00cf
Reviewed-on: https://go-review.googlesource.com/c/go/+/494495
Reviewed-by: Michael Pratt <mpratt@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Run-TryBot: Michael Knyszek <mknyszek@google.com>
2023-05-11 21:09:10 +00:00
|
|
|
stopTheWorld(stwGoroutineProfile)
|
2022-02-18 10:56:16 -08:00
|
|
|
// Using gcount while the world is stopped should give us a consistent view
|
|
|
|
|
// of the number of live goroutines, minus the number of goroutines that are
|
|
|
|
|
// alive and permanently marked as "system". But to make this count agree
|
|
|
|
|
// with what we'd get from isSystemGoroutine, we need special handling for
|
|
|
|
|
// goroutines that can vary between user and system to ensure that the count
|
|
|
|
|
// doesn't change during the collection. So, check the finalizer goroutine
|
|
|
|
|
// in particular.
|
|
|
|
|
n = int(gcount())
|
2022-04-13 21:14:22 +08:00
|
|
|
if fingStatus.Load()&fingRunningFinalizer != 0 {
|
2022-02-18 10:56:16 -08:00
|
|
|
n++
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if n > len(p) {
|
|
|
|
|
// There's not enough space in p to store the whole profile, so (per the
|
|
|
|
|
// contract of runtime.GoroutineProfile) we're not allowed to write to p
|
|
|
|
|
// at all and must return n, false.
|
|
|
|
|
startTheWorld()
|
|
|
|
|
semrelease(&goroutineProfile.sema)
|
|
|
|
|
return n, false
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Save current goroutine.
|
|
|
|
|
sp := getcallersp()
|
|
|
|
|
pc := getcallerpc()
|
|
|
|
|
systemstack(func() {
|
|
|
|
|
saveg(pc, sp, ourg, &p[0])
|
|
|
|
|
})
|
|
|
|
|
ourg.goroutineProfiled.Store(goroutineProfileSatisfied)
|
|
|
|
|
goroutineProfile.offset.Store(1)
|
|
|
|
|
|
|
|
|
|
// Prepare for all other goroutines to enter the profile. Aside from ourg,
|
|
|
|
|
// every goroutine struct in the allgs list has its goroutineProfiled field
|
|
|
|
|
// cleared. Any goroutine created from this point on (while
|
|
|
|
|
// goroutineProfile.active is set) will start with its goroutineProfiled
|
|
|
|
|
// field set to goroutineProfileSatisfied.
|
|
|
|
|
goroutineProfile.active = true
|
|
|
|
|
goroutineProfile.records = p
|
|
|
|
|
goroutineProfile.labels = labels
|
2022-07-16 14:31:14 +00:00
|
|
|
// The finalizer goroutine needs special handling because it can vary over
|
2022-02-18 10:56:16 -08:00
|
|
|
// time between being a user goroutine (eligible for this profile) and a
|
|
|
|
|
// system goroutine (to be excluded). Pick one before restarting the world.
|
|
|
|
|
if fing != nil {
|
|
|
|
|
fing.goroutineProfiled.Store(goroutineProfileSatisfied)
|
2022-05-10 15:09:12 +00:00
|
|
|
if readgstatus(fing) != _Gdead && !isSystemGoroutine(fing, false) {
|
|
|
|
|
doRecordGoroutineProfile(fing)
|
|
|
|
|
}
|
2022-02-18 10:56:16 -08:00
|
|
|
}
|
|
|
|
|
startTheWorld()
|
|
|
|
|
|
|
|
|
|
// Visit each goroutine that existed as of the startTheWorld call above.
|
|
|
|
|
//
|
|
|
|
|
// New goroutines may not be in this list, but we didn't want to know about
|
|
|
|
|
// them anyway. If they do appear in this list (via reusing a dead goroutine
|
|
|
|
|
// struct, or racing to launch between the world restarting and us getting
|
2022-05-17 21:25:43 +00:00
|
|
|
// the list), they will already have their goroutineProfiled field set to
|
2022-02-18 10:56:16 -08:00
|
|
|
// goroutineProfileSatisfied before their state transitions out of _Gdead.
|
|
|
|
|
//
|
|
|
|
|
// Any goroutine that the scheduler tries to execute concurrently with this
|
|
|
|
|
// call will start by adding itself to the profile (before the act of
|
|
|
|
|
// executing can cause any changes in its stack).
|
|
|
|
|
forEachGRace(func(gp1 *g) {
|
|
|
|
|
tryRecordGoroutineProfile(gp1, Gosched)
|
|
|
|
|
})
|
|
|
|
|
|
runtime: emit STW events for all pauses, not just those for the GC
Currently STW events are only emitted for GC STWs. There's little reason
why the trace can't contain events for every STW: they're rare so don't
take up much space in the trace, yet being able to see when the world
was stopped is often critical to debugging certain latency issues,
especially when they stem from user-level APIs.
This change adds new "kinds" to the EvGCSTWStart event, renames the
GCSTW events to just "STW," and lets the parser deal with unknown STW
kinds for future backwards compatibility.
But, this change must break trace compatibility, so it bumps the trace
version to Go 1.21.
This change also includes a small cleanup in the trace command, which
previously checked for STW events when deciding whether user tasks
overlapped with a GC. Looking at the source, I don't see a way for STW
events to ever enter the stream that that code looks at, so that
condition has been deleted.
Change-Id: I9a5dc144092c53e92eb6950e9a5504a790ac00cf
Reviewed-on: https://go-review.googlesource.com/c/go/+/494495
Reviewed-by: Michael Pratt <mpratt@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Run-TryBot: Michael Knyszek <mknyszek@google.com>
2023-05-11 21:09:10 +00:00
|
|
|
stopTheWorld(stwGoroutineProfileCleanup)
|
2022-02-18 10:56:16 -08:00
|
|
|
endOffset := goroutineProfile.offset.Swap(0)
|
|
|
|
|
goroutineProfile.active = false
|
|
|
|
|
goroutineProfile.records = nil
|
|
|
|
|
goroutineProfile.labels = nil
|
|
|
|
|
startTheWorld()
|
|
|
|
|
|
|
|
|
|
// Restore the invariant that every goroutine struct in allgs has its
|
|
|
|
|
// goroutineProfiled field cleared.
|
|
|
|
|
forEachGRace(func(gp1 *g) {
|
|
|
|
|
gp1.goroutineProfiled.Store(goroutineProfileAbsent)
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
if raceenabled {
|
|
|
|
|
raceacquire(unsafe.Pointer(&labelSync))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if n != int(endOffset) {
|
|
|
|
|
// It's a big surprise that the number of goroutines changed while we
|
|
|
|
|
// were collecting the profile. But probably better to return a
|
|
|
|
|
// truncated profile than to crash the whole process.
|
|
|
|
|
//
|
|
|
|
|
// For instance, needm moves a goroutine out of the _Gdead state and so
|
|
|
|
|
// might be able to change the goroutine count without interacting with
|
|
|
|
|
// the scheduler. For code like that, the race windows are small and the
|
|
|
|
|
// combination of features is uncommon, so it's hard to be (and remain)
|
|
|
|
|
// sure we've caught them all.
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
semrelease(&goroutineProfile.sema)
|
|
|
|
|
return n, true
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// tryRecordGoroutineProfileWB asserts that write barriers are allowed and calls
|
|
|
|
|
// tryRecordGoroutineProfile.
|
|
|
|
|
//
|
|
|
|
|
//go:yeswritebarrierrec
|
|
|
|
|
func tryRecordGoroutineProfileWB(gp1 *g) {
|
|
|
|
|
if getg().m.p.ptr() == nil {
|
|
|
|
|
throw("no P available, write barriers are forbidden")
|
|
|
|
|
}
|
|
|
|
|
tryRecordGoroutineProfile(gp1, osyield)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// tryRecordGoroutineProfile ensures that gp1 has the appropriate representation
|
|
|
|
|
// in the current goroutine profile: either that it should not be profiled, or
|
|
|
|
|
// that a snapshot of its call stack and labels are now in the profile.
|
|
|
|
|
func tryRecordGoroutineProfile(gp1 *g, yield func()) {
|
|
|
|
|
if readgstatus(gp1) == _Gdead {
|
|
|
|
|
// Dead goroutines should not appear in the profile. Goroutines that
|
|
|
|
|
// start while profile collection is active will get goroutineProfiled
|
|
|
|
|
// set to goroutineProfileSatisfied before transitioning out of _Gdead,
|
|
|
|
|
// so here we check _Gdead first.
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
if isSystemGoroutine(gp1, true) {
|
|
|
|
|
// System goroutines should not appear in the profile. (The finalizer
|
|
|
|
|
// goroutine is marked as "already profiled".)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for {
|
|
|
|
|
prev := gp1.goroutineProfiled.Load()
|
|
|
|
|
if prev == goroutineProfileSatisfied {
|
|
|
|
|
// This goroutine is already in the profile (or is new since the
|
|
|
|
|
// start of collection, so shouldn't appear in the profile).
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
if prev == goroutineProfileInProgress {
|
|
|
|
|
// Something else is adding gp1 to the goroutine profile right now.
|
|
|
|
|
// Give that a moment to finish.
|
|
|
|
|
yield()
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// While we have gp1.goroutineProfiled set to
|
|
|
|
|
// goroutineProfileInProgress, gp1 may appear _Grunnable but will not
|
|
|
|
|
// actually be able to run. Disable preemption for ourselves, to make
|
|
|
|
|
// sure we finish profiling gp1 right away instead of leaving it stuck
|
|
|
|
|
// in this limbo.
|
|
|
|
|
mp := acquirem()
|
|
|
|
|
if gp1.goroutineProfiled.CompareAndSwap(goroutineProfileAbsent, goroutineProfileInProgress) {
|
|
|
|
|
doRecordGoroutineProfile(gp1)
|
|
|
|
|
gp1.goroutineProfiled.Store(goroutineProfileSatisfied)
|
|
|
|
|
}
|
|
|
|
|
releasem(mp)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// doRecordGoroutineProfile writes gp1's call stack and labels to an in-progress
|
|
|
|
|
// goroutine profile. Preemption is disabled.
|
|
|
|
|
//
|
|
|
|
|
// This may be called via tryRecordGoroutineProfile in two ways: by the
|
|
|
|
|
// goroutine that is coordinating the goroutine profile (running on its own
|
|
|
|
|
// stack), or from the scheduler in preparation to execute gp1 (running on the
|
|
|
|
|
// system stack).
|
|
|
|
|
func doRecordGoroutineProfile(gp1 *g) {
|
|
|
|
|
if readgstatus(gp1) == _Grunning {
|
|
|
|
|
print("doRecordGoroutineProfile gp1=", gp1.goid, "\n")
|
|
|
|
|
throw("cannot read stack of running goroutine")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
offset := int(goroutineProfile.offset.Add(1)) - 1
|
|
|
|
|
|
|
|
|
|
if offset >= len(goroutineProfile.records) {
|
|
|
|
|
// Should be impossible, but better to return a truncated profile than
|
|
|
|
|
// to crash the entire process at this point. Instead, deal with it in
|
|
|
|
|
// goroutineProfileWithLabelsConcurrent where we have more context.
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// saveg calls gentraceback, which may call cgo traceback functions. When
|
|
|
|
|
// called from the scheduler, this is on the system stack already so
|
|
|
|
|
// traceback.go:cgoContextPCs will avoid calling back into the scheduler.
|
|
|
|
|
//
|
|
|
|
|
// When called from the goroutine coordinating the profile, we still have
|
|
|
|
|
// set gp1.goroutineProfiled to goroutineProfileInProgress and so are still
|
|
|
|
|
// preventing it from being truly _Grunnable. So we'll use the system stack
|
|
|
|
|
// to avoid schedule delays.
|
|
|
|
|
systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &goroutineProfile.records[offset]) })
|
|
|
|
|
|
|
|
|
|
if goroutineProfile.labels != nil {
|
|
|
|
|
goroutineProfile.labels[offset] = gp1.labels
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func goroutineProfileWithLabelsSync(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
|
2016-01-26 22:58:59 -05:00
|
|
|
gp := getg()
|
|
|
|
|
|
|
|
|
|
isOK := func(gp1 *g) bool {
|
|
|
|
|
// Checking isSystemGoroutine here makes GoroutineProfile
|
|
|
|
|
// consistent with both NumGoroutine and Stack.
|
2018-08-13 16:08:03 -04:00
|
|
|
return gp1 != gp && readgstatus(gp1) != _Gdead && !isSystemGoroutine(gp1, false)
|
2016-01-26 22:58:59 -05:00
|
|
|
}
|
|
|
|
|
|
runtime: emit STW events for all pauses, not just those for the GC
Currently STW events are only emitted for GC STWs. There's little reason
why the trace can't contain events for every STW: they're rare so don't
take up much space in the trace, yet being able to see when the world
was stopped is often critical to debugging certain latency issues,
especially when they stem from user-level APIs.
This change adds new "kinds" to the EvGCSTWStart event, renames the
GCSTW events to just "STW," and lets the parser deal with unknown STW
kinds for future backwards compatibility.
But, this change must break trace compatibility, so it bumps the trace
version to Go 1.21.
This change also includes a small cleanup in the trace command, which
previously checked for STW events when deciding whether user tasks
overlapped with a GC. Looking at the source, I don't see a way for STW
events to ever enter the stream that that code looks at, so that
condition has been deleted.
Change-Id: I9a5dc144092c53e92eb6950e9a5504a790ac00cf
Reviewed-on: https://go-review.googlesource.com/c/go/+/494495
Reviewed-by: Michael Pratt <mpratt@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Run-TryBot: Michael Knyszek <mknyszek@google.com>
2023-05-11 21:09:10 +00:00
|
|
|
stopTheWorld(stwGoroutineProfile)
|
2016-01-26 22:58:59 -05:00
|
|
|
|
2020-12-23 15:05:37 -05:00
|
|
|
// World is stopped, no locking required.
|
2016-01-26 22:58:59 -05:00
|
|
|
n = 1
|
2020-12-23 15:05:37 -05:00
|
|
|
forEachGRace(func(gp1 *g) {
|
2016-01-26 22:58:59 -05:00
|
|
|
if isOK(gp1) {
|
|
|
|
|
n++
|
|
|
|
|
}
|
2020-12-23 15:05:37 -05:00
|
|
|
})
|
2014-09-01 00:06:26 -04:00
|
|
|
|
2014-09-01 18:51:12 -04:00
|
|
|
if n <= len(p) {
|
2016-01-26 22:58:59 -05:00
|
|
|
ok = true
|
2019-08-04 15:14:48 -04:00
|
|
|
r, lbl := p, labels
|
2016-01-26 22:58:59 -05:00
|
|
|
|
|
|
|
|
// Save current goroutine.
|
2018-04-26 14:06:08 -04:00
|
|
|
sp := getcallersp()
|
2017-09-22 15:16:26 -04:00
|
|
|
pc := getcallerpc()
|
2016-01-26 22:58:59 -05:00
|
|
|
systemstack(func() {
|
|
|
|
|
saveg(pc, sp, gp, &r[0])
|
|
|
|
|
})
|
|
|
|
|
r = r[1:]
|
|
|
|
|
|
2019-08-04 15:14:48 -04:00
|
|
|
// If we have a place to put our goroutine labelmap, insert it there.
|
|
|
|
|
if labels != nil {
|
|
|
|
|
lbl[0] = gp.labels
|
|
|
|
|
lbl = lbl[1:]
|
|
|
|
|
}
|
|
|
|
|
|
2016-01-26 22:58:59 -05:00
|
|
|
// Save other goroutines.
|
2020-12-23 15:05:37 -05:00
|
|
|
forEachGRace(func(gp1 *g) {
|
|
|
|
|
if !isOK(gp1) {
|
|
|
|
|
return
|
2014-09-01 00:06:26 -04:00
|
|
|
}
|
2020-12-23 15:05:37 -05:00
|
|
|
|
|
|
|
|
if len(r) == 0 {
|
|
|
|
|
// Should be impossible, but better to return a
|
|
|
|
|
// truncated profile than to crash the entire process.
|
|
|
|
|
return
|
|
|
|
|
}
|
2021-11-09 19:50:47 -05:00
|
|
|
// saveg calls gentraceback, which may call cgo traceback functions.
|
|
|
|
|
// The world is stopped, so it cannot use cgocall (which will be
|
|
|
|
|
// blocked at exitsyscall). Do it on the system stack so it won't
|
|
|
|
|
// call into the schedular (see traceback.go:cgoContextPCs).
|
|
|
|
|
systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &r[0]) })
|
2020-12-23 15:05:37 -05:00
|
|
|
if labels != nil {
|
|
|
|
|
lbl[0] = gp1.labels
|
|
|
|
|
lbl = lbl[1:]
|
|
|
|
|
}
|
|
|
|
|
r = r[1:]
|
|
|
|
|
})
|
2014-09-01 00:06:26 -04:00
|
|
|
}
|
|
|
|
|
|
2022-02-14 12:16:22 -08:00
|
|
|
if raceenabled {
|
|
|
|
|
raceacquire(unsafe.Pointer(&labelSync))
|
|
|
|
|
}
|
|
|
|
|
|
2016-01-26 22:58:59 -05:00
|
|
|
startTheWorld()
|
2014-09-01 18:51:12 -04:00
|
|
|
return n, ok
|
|
|
|
|
}
|
2014-09-01 00:06:26 -04:00
|
|
|
|
2019-08-04 15:14:48 -04:00
|
|
|
// GoroutineProfile returns n, the number of records in the active goroutine stack profile.
|
|
|
|
|
// If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true.
|
|
|
|
|
// If len(p) < n, GoroutineProfile does not change p and returns n, false.
|
|
|
|
|
//
|
|
|
|
|
// Most clients should use the runtime/pprof package instead
|
|
|
|
|
// of calling GoroutineProfile directly.
|
|
|
|
|
func GoroutineProfile(p []StackRecord) (n int, ok bool) {
|
|
|
|
|
|
|
|
|
|
return goroutineProfileWithLabels(p, nil)
|
|
|
|
|
}
|
|
|
|
|
|
2014-09-01 18:51:12 -04:00
|
|
|
func saveg(pc, sp uintptr, gp *g, r *StackRecord) {
|
2023-02-14 12:25:11 -05:00
|
|
|
var u unwinder
|
|
|
|
|
u.initAt(pc, sp, 0, gp, unwindSilentErrors)
|
|
|
|
|
n := tracebackPCs(&u, 0, r.Stack0[:])
|
runtime: convert traceback*.c to Go
The two converted files were nearly identical.
Instead of continuing that duplication, I merged them
into a single traceback.go.
Tested on arm, amd64, amd64p32, and 386.
LGTM=r
R=golang-codereviews, remyoudompheng, dave, r
CC=dvyukov, golang-codereviews, iant, khr
https://golang.org/cl/134200044
2014-09-02 15:12:53 -04:00
|
|
|
if n < len(r.Stack0) {
|
2014-09-01 18:51:12 -04:00
|
|
|
r.Stack0[n] = 0
|
|
|
|
|
}
|
2014-09-01 00:06:26 -04:00
|
|
|
}
|
|
|
|
|
|
2014-08-26 08:34:46 +02:00
|
|
|
// Stack formats a stack trace of the calling goroutine into buf
|
|
|
|
|
// and returns the number of bytes written to buf.
|
|
|
|
|
// If all is true, Stack formats stack traces of all other goroutines
|
|
|
|
|
// into buf after the trace for the current goroutine.
|
|
|
|
|
func Stack(buf []byte, all bool) int {
|
|
|
|
|
if all {
|
runtime: emit STW events for all pauses, not just those for the GC
Currently STW events are only emitted for GC STWs. There's little reason
why the trace can't contain events for every STW: they're rare so don't
take up much space in the trace, yet being able to see when the world
was stopped is often critical to debugging certain latency issues,
especially when they stem from user-level APIs.
This change adds new "kinds" to the EvGCSTWStart event, renames the
GCSTW events to just "STW," and lets the parser deal with unknown STW
kinds for future backwards compatibility.
But, this change must break trace compatibility, so it bumps the trace
version to Go 1.21.
This change also includes a small cleanup in the trace command, which
previously checked for STW events when deciding whether user tasks
overlapped with a GC. Looking at the source, I don't see a way for STW
events to ever enter the stream that that code looks at, so that
condition has been deleted.
Change-Id: I9a5dc144092c53e92eb6950e9a5504a790ac00cf
Reviewed-on: https://go-review.googlesource.com/c/go/+/494495
Reviewed-by: Michael Pratt <mpratt@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Run-TryBot: Michael Knyszek <mknyszek@google.com>
2023-05-11 21:09:10 +00:00
|
|
|
stopTheWorld(stwAllGoroutinesStack)
|
2014-08-26 08:34:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
n := 0
|
|
|
|
|
if len(buf) > 0 {
|
2014-12-15 14:39:28 -08:00
|
|
|
gp := getg()
|
2018-04-26 14:06:08 -04:00
|
|
|
sp := getcallersp()
|
2017-09-22 15:16:26 -04:00
|
|
|
pc := getcallerpc()
|
[dev.cc] runtime: delete scalararg, ptrarg; rename onM to systemstack
Scalararg and ptrarg are not "signal safe".
Go code filling them out can be interrupted by a signal,
and then the signal handler runs, and if it also ends up
in Go code that uses scalararg or ptrarg, now the old
values have been smashed.
For the pieces of code that do need to run in a signal handler,
we introduced onM_signalok, which is really just onM
except that the _signalok is meant to convey that the caller
asserts that scalarg and ptrarg will be restored to their old
values after the call (instead of the usual behavior, zeroing them).
Scalararg and ptrarg are also untyped and therefore error-prone.
Go code can always pass a closure instead of using scalararg
and ptrarg; they were only really necessary for C code.
And there's no more C code.
For all these reasons, delete scalararg and ptrarg, converting
the few remaining references to use closures.
Once those are gone, there is no need for a distinction between
onM and onM_signalok, so replace both with a single function
equivalent to the current onM_signalok (that is, it can be called
on any of the curg, g0, and gsignal stacks).
The name onM and the phrase 'm stack' are misnomers,
because on most system an M has two system stacks:
the main thread stack and the signal handling stack.
Correct the misnomer by naming the replacement function systemstack.
Fix a few references to "M stack" in code.
The main motivation for this change is to eliminate scalararg/ptrarg.
Rick and I have already seen them cause problems because
the calling sequence m.ptrarg[0] = p is a heap pointer assignment,
so it gets a write barrier. The write barrier also uses onM, so it has
all the same problems as if it were being invoked by a signal handler.
We worked around this by saving and restoring the old values
and by calling onM_signalok, but there's no point in keeping this nice
home for bugs around any longer.
This CL also changes funcline to return the file name as a result
instead of filling in a passed-in *string. (The *string signature is
left over from when the code was written in and called from C.)
That's arguably an unrelated change, except that once I had done
the ptrarg/scalararg/onM cleanup I started getting false positives
about the *string argument escaping (not allowed in package runtime).
The compiler is wrong, but the easiest fix is to write the code like
Go code instead of like C code. I am a bit worried that the compiler
is wrong because of some use of uninitialized memory in the escape
analysis. If that's the reason, it will go away when we convert the
compiler to Go. (And if not, we'll debug it the next time.)
LGTM=khr
R=r, khr
CC=austin, golang-codereviews, iant, rlh
https://golang.org/cl/174950043
2014-11-12 14:54:31 -05:00
|
|
|
systemstack(func() {
|
runtime: avoid gentraceback of self on user goroutine stack
Gentraceback may grow the stack.
One of the gentraceback wrappers may grow the stack.
One of the gentraceback callback calls may grow the stack.
Various stack pointers are stored in various stack locations
as type uintptr during the execution of these calls.
If the stack does grow, these stack pointers will not be
updated and will start trying to decode stack memory that
is no longer valid.
It may be possible to change the type of the stack pointer
variables to be unsafe.Pointer, but that's pretty subtle and
may still have problems, even if we catch every last one.
An easier, more obviously correct fix is to require that
gentraceback of the currently running goroutine must run
on the g0 stack, not on the goroutine's own stack.
Not doing this causes faults when you set
StackFromSystem = 1
StackFaultOnFree = 1
The new check in gentraceback will catch future lapses.
The more general problem is calling getcallersp but then
calling a function that might relocate the stack, which would
invalidate the result of getcallersp. Add note to stubs.go
declaration of getcallersp explaining the problem, and
check all existing calls to getcallersp. Most needed fixes.
This affects Callers, Stack, and nearly all the runtime
profiling routines. It does not affect stack copying directly
nor garbage collection.
LGTM=khr
R=khr, bradfitz
CC=golang-codereviews, r
https://golang.org/cl/167060043
2014-11-05 23:01:48 -05:00
|
|
|
g0 := getg()
|
2016-01-06 21:16:01 -05:00
|
|
|
// Force traceback=1 to override GOTRACEBACK setting,
|
|
|
|
|
// so that Stack's results are consistent.
|
|
|
|
|
// GOTRACEBACK is only about crash dumps.
|
|
|
|
|
g0.m.traceback = 1
|
runtime: avoid gentraceback of self on user goroutine stack
Gentraceback may grow the stack.
One of the gentraceback wrappers may grow the stack.
One of the gentraceback callback calls may grow the stack.
Various stack pointers are stored in various stack locations
as type uintptr during the execution of these calls.
If the stack does grow, these stack pointers will not be
updated and will start trying to decode stack memory that
is no longer valid.
It may be possible to change the type of the stack pointer
variables to be unsafe.Pointer, but that's pretty subtle and
may still have problems, even if we catch every last one.
An easier, more obviously correct fix is to require that
gentraceback of the currently running goroutine must run
on the g0 stack, not on the goroutine's own stack.
Not doing this causes faults when you set
StackFromSystem = 1
StackFaultOnFree = 1
The new check in gentraceback will catch future lapses.
The more general problem is calling getcallersp but then
calling a function that might relocate the stack, which would
invalidate the result of getcallersp. Add note to stubs.go
declaration of getcallersp explaining the problem, and
check all existing calls to getcallersp. Most needed fixes.
This affects Callers, Stack, and nearly all the runtime
profiling routines. It does not affect stack copying directly
nor garbage collection.
LGTM=khr
R=khr, bradfitz
CC=golang-codereviews, r
https://golang.org/cl/167060043
2014-11-05 23:01:48 -05:00
|
|
|
g0.writebuf = buf[0:0:len(buf)]
|
|
|
|
|
goroutineheader(gp)
|
|
|
|
|
traceback(pc, sp, 0, gp)
|
|
|
|
|
if all {
|
|
|
|
|
tracebackothers(gp)
|
|
|
|
|
}
|
2016-01-06 21:16:01 -05:00
|
|
|
g0.m.traceback = 0
|
runtime: avoid gentraceback of self on user goroutine stack
Gentraceback may grow the stack.
One of the gentraceback wrappers may grow the stack.
One of the gentraceback callback calls may grow the stack.
Various stack pointers are stored in various stack locations
as type uintptr during the execution of these calls.
If the stack does grow, these stack pointers will not be
updated and will start trying to decode stack memory that
is no longer valid.
It may be possible to change the type of the stack pointer
variables to be unsafe.Pointer, but that's pretty subtle and
may still have problems, even if we catch every last one.
An easier, more obviously correct fix is to require that
gentraceback of the currently running goroutine must run
on the g0 stack, not on the goroutine's own stack.
Not doing this causes faults when you set
StackFromSystem = 1
StackFaultOnFree = 1
The new check in gentraceback will catch future lapses.
The more general problem is calling getcallersp but then
calling a function that might relocate the stack, which would
invalidate the result of getcallersp. Add note to stubs.go
declaration of getcallersp explaining the problem, and
check all existing calls to getcallersp. Most needed fixes.
This affects Callers, Stack, and nearly all the runtime
profiling routines. It does not affect stack copying directly
nor garbage collection.
LGTM=khr
R=khr, bradfitz
CC=golang-codereviews, r
https://golang.org/cl/167060043
2014-11-05 23:01:48 -05:00
|
|
|
n = len(g0.writebuf)
|
|
|
|
|
g0.writebuf = nil
|
|
|
|
|
})
|
2014-08-26 08:34:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if all {
|
2015-05-15 16:00:50 -04:00
|
|
|
startTheWorld()
|
2014-08-26 08:34:46 +02:00
|
|
|
}
|
|
|
|
|
return n
|
|
|
|
|
}
|
|
|
|
|
|
2014-09-01 00:06:26 -04:00
|
|
|
// Tracing of alloc/free/gc.
|
|
|
|
|
|
2014-09-01 18:51:12 -04:00
|
|
|
var tracelock mutex
|
2014-09-01 00:06:26 -04:00
|
|
|
|
2014-09-01 18:51:12 -04:00
|
|
|
func tracealloc(p unsafe.Pointer, size uintptr, typ *_type) {
|
2014-09-01 00:06:26 -04:00
|
|
|
lock(&tracelock)
|
2014-09-01 18:51:12 -04:00
|
|
|
gp := getg()
|
|
|
|
|
gp.m.traceback = 2
|
|
|
|
|
if typ == nil {
|
|
|
|
|
print("tracealloc(", p, ", ", hex(size), ")\n")
|
|
|
|
|
} else {
|
2023-04-24 15:45:33 -04:00
|
|
|
print("tracealloc(", p, ", ", hex(size), ", ", toRType(typ).string(), ")\n")
|
2014-09-01 18:51:12 -04:00
|
|
|
}
|
|
|
|
|
if gp.m.curg == nil || gp == gp.m.curg {
|
|
|
|
|
goroutineheader(gp)
|
2017-09-22 15:16:26 -04:00
|
|
|
pc := getcallerpc()
|
2018-04-26 14:06:08 -04:00
|
|
|
sp := getcallersp()
|
[dev.cc] runtime: delete scalararg, ptrarg; rename onM to systemstack
Scalararg and ptrarg are not "signal safe".
Go code filling them out can be interrupted by a signal,
and then the signal handler runs, and if it also ends up
in Go code that uses scalararg or ptrarg, now the old
values have been smashed.
For the pieces of code that do need to run in a signal handler,
we introduced onM_signalok, which is really just onM
except that the _signalok is meant to convey that the caller
asserts that scalarg and ptrarg will be restored to their old
values after the call (instead of the usual behavior, zeroing them).
Scalararg and ptrarg are also untyped and therefore error-prone.
Go code can always pass a closure instead of using scalararg
and ptrarg; they were only really necessary for C code.
And there's no more C code.
For all these reasons, delete scalararg and ptrarg, converting
the few remaining references to use closures.
Once those are gone, there is no need for a distinction between
onM and onM_signalok, so replace both with a single function
equivalent to the current onM_signalok (that is, it can be called
on any of the curg, g0, and gsignal stacks).
The name onM and the phrase 'm stack' are misnomers,
because on most system an M has two system stacks:
the main thread stack and the signal handling stack.
Correct the misnomer by naming the replacement function systemstack.
Fix a few references to "M stack" in code.
The main motivation for this change is to eliminate scalararg/ptrarg.
Rick and I have already seen them cause problems because
the calling sequence m.ptrarg[0] = p is a heap pointer assignment,
so it gets a write barrier. The write barrier also uses onM, so it has
all the same problems as if it were being invoked by a signal handler.
We worked around this by saving and restoring the old values
and by calling onM_signalok, but there's no point in keeping this nice
home for bugs around any longer.
This CL also changes funcline to return the file name as a result
instead of filling in a passed-in *string. (The *string signature is
left over from when the code was written in and called from C.)
That's arguably an unrelated change, except that once I had done
the ptrarg/scalararg/onM cleanup I started getting false positives
about the *string argument escaping (not allowed in package runtime).
The compiler is wrong, but the easiest fix is to write the code like
Go code instead of like C code. I am a bit worried that the compiler
is wrong because of some use of uninitialized memory in the escape
analysis. If that's the reason, it will go away when we convert the
compiler to Go. (And if not, we'll debug it the next time.)
LGTM=khr
R=r, khr
CC=austin, golang-codereviews, iant, rlh
https://golang.org/cl/174950043
2014-11-12 14:54:31 -05:00
|
|
|
systemstack(func() {
|
runtime: avoid gentraceback of self on user goroutine stack
Gentraceback may grow the stack.
One of the gentraceback wrappers may grow the stack.
One of the gentraceback callback calls may grow the stack.
Various stack pointers are stored in various stack locations
as type uintptr during the execution of these calls.
If the stack does grow, these stack pointers will not be
updated and will start trying to decode stack memory that
is no longer valid.
It may be possible to change the type of the stack pointer
variables to be unsafe.Pointer, but that's pretty subtle and
may still have problems, even if we catch every last one.
An easier, more obviously correct fix is to require that
gentraceback of the currently running goroutine must run
on the g0 stack, not on the goroutine's own stack.
Not doing this causes faults when you set
StackFromSystem = 1
StackFaultOnFree = 1
The new check in gentraceback will catch future lapses.
The more general problem is calling getcallersp but then
calling a function that might relocate the stack, which would
invalidate the result of getcallersp. Add note to stubs.go
declaration of getcallersp explaining the problem, and
check all existing calls to getcallersp. Most needed fixes.
This affects Callers, Stack, and nearly all the runtime
profiling routines. It does not affect stack copying directly
nor garbage collection.
LGTM=khr
R=khr, bradfitz
CC=golang-codereviews, r
https://golang.org/cl/167060043
2014-11-05 23:01:48 -05:00
|
|
|
traceback(pc, sp, 0, gp)
|
|
|
|
|
})
|
2014-09-01 00:06:26 -04:00
|
|
|
} else {
|
2014-09-01 18:51:12 -04:00
|
|
|
goroutineheader(gp.m.curg)
|
|
|
|
|
traceback(^uintptr(0), ^uintptr(0), 0, gp.m.curg)
|
2014-08-21 08:07:42 +02:00
|
|
|
}
|
2014-09-01 18:51:12 -04:00
|
|
|
print("\n")
|
|
|
|
|
gp.m.traceback = 0
|
2014-09-01 00:06:26 -04:00
|
|
|
unlock(&tracelock)
|
|
|
|
|
}
|
|
|
|
|
|
2014-09-01 18:51:12 -04:00
|
|
|
func tracefree(p unsafe.Pointer, size uintptr) {
|
2014-09-01 00:06:26 -04:00
|
|
|
lock(&tracelock)
|
2014-09-01 18:51:12 -04:00
|
|
|
gp := getg()
|
|
|
|
|
gp.m.traceback = 2
|
|
|
|
|
print("tracefree(", p, ", ", hex(size), ")\n")
|
|
|
|
|
goroutineheader(gp)
|
2017-09-22 15:16:26 -04:00
|
|
|
pc := getcallerpc()
|
2018-04-26 14:06:08 -04:00
|
|
|
sp := getcallersp()
|
[dev.cc] runtime: delete scalararg, ptrarg; rename onM to systemstack
Scalararg and ptrarg are not "signal safe".
Go code filling them out can be interrupted by a signal,
and then the signal handler runs, and if it also ends up
in Go code that uses scalararg or ptrarg, now the old
values have been smashed.
For the pieces of code that do need to run in a signal handler,
we introduced onM_signalok, which is really just onM
except that the _signalok is meant to convey that the caller
asserts that scalarg and ptrarg will be restored to their old
values after the call (instead of the usual behavior, zeroing them).
Scalararg and ptrarg are also untyped and therefore error-prone.
Go code can always pass a closure instead of using scalararg
and ptrarg; they were only really necessary for C code.
And there's no more C code.
For all these reasons, delete scalararg and ptrarg, converting
the few remaining references to use closures.
Once those are gone, there is no need for a distinction between
onM and onM_signalok, so replace both with a single function
equivalent to the current onM_signalok (that is, it can be called
on any of the curg, g0, and gsignal stacks).
The name onM and the phrase 'm stack' are misnomers,
because on most system an M has two system stacks:
the main thread stack and the signal handling stack.
Correct the misnomer by naming the replacement function systemstack.
Fix a few references to "M stack" in code.
The main motivation for this change is to eliminate scalararg/ptrarg.
Rick and I have already seen them cause problems because
the calling sequence m.ptrarg[0] = p is a heap pointer assignment,
so it gets a write barrier. The write barrier also uses onM, so it has
all the same problems as if it were being invoked by a signal handler.
We worked around this by saving and restoring the old values
and by calling onM_signalok, but there's no point in keeping this nice
home for bugs around any longer.
This CL also changes funcline to return the file name as a result
instead of filling in a passed-in *string. (The *string signature is
left over from when the code was written in and called from C.)
That's arguably an unrelated change, except that once I had done
the ptrarg/scalararg/onM cleanup I started getting false positives
about the *string argument escaping (not allowed in package runtime).
The compiler is wrong, but the easiest fix is to write the code like
Go code instead of like C code. I am a bit worried that the compiler
is wrong because of some use of uninitialized memory in the escape
analysis. If that's the reason, it will go away when we convert the
compiler to Go. (And if not, we'll debug it the next time.)
LGTM=khr
R=r, khr
CC=austin, golang-codereviews, iant, rlh
https://golang.org/cl/174950043
2014-11-12 14:54:31 -05:00
|
|
|
systemstack(func() {
|
runtime: avoid gentraceback of self on user goroutine stack
Gentraceback may grow the stack.
One of the gentraceback wrappers may grow the stack.
One of the gentraceback callback calls may grow the stack.
Various stack pointers are stored in various stack locations
as type uintptr during the execution of these calls.
If the stack does grow, these stack pointers will not be
updated and will start trying to decode stack memory that
is no longer valid.
It may be possible to change the type of the stack pointer
variables to be unsafe.Pointer, but that's pretty subtle and
may still have problems, even if we catch every last one.
An easier, more obviously correct fix is to require that
gentraceback of the currently running goroutine must run
on the g0 stack, not on the goroutine's own stack.
Not doing this causes faults when you set
StackFromSystem = 1
StackFaultOnFree = 1
The new check in gentraceback will catch future lapses.
The more general problem is calling getcallersp but then
calling a function that might relocate the stack, which would
invalidate the result of getcallersp. Add note to stubs.go
declaration of getcallersp explaining the problem, and
check all existing calls to getcallersp. Most needed fixes.
This affects Callers, Stack, and nearly all the runtime
profiling routines. It does not affect stack copying directly
nor garbage collection.
LGTM=khr
R=khr, bradfitz
CC=golang-codereviews, r
https://golang.org/cl/167060043
2014-11-05 23:01:48 -05:00
|
|
|
traceback(pc, sp, 0, gp)
|
|
|
|
|
})
|
2014-09-01 18:51:12 -04:00
|
|
|
print("\n")
|
|
|
|
|
gp.m.traceback = 0
|
2014-09-01 00:06:26 -04:00
|
|
|
unlock(&tracelock)
|
|
|
|
|
}
|
|
|
|
|
|
2014-09-01 18:51:12 -04:00
|
|
|
func tracegc() {
|
2014-09-01 00:06:26 -04:00
|
|
|
lock(&tracelock)
|
2014-09-01 18:51:12 -04:00
|
|
|
gp := getg()
|
|
|
|
|
gp.m.traceback = 2
|
|
|
|
|
print("tracegc()\n")
|
|
|
|
|
// running on m->g0 stack; show all non-g0 goroutines
|
|
|
|
|
tracebackothers(gp)
|
|
|
|
|
print("end tracegc\n")
|
|
|
|
|
print("\n")
|
|
|
|
|
gp.m.traceback = 0
|
2014-09-01 00:06:26 -04:00
|
|
|
unlock(&tracelock)
|
2014-08-21 08:07:42 +02:00
|
|
|
}
|