mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime: emit STW events for all pauses, not just those for the GC
Currently STW events are only emitted for GC STWs. There's little reason why the trace can't contain events for every STW: they're rare so don't take up much space in the trace, yet being able to see when the world was stopped is often critical to debugging certain latency issues, especially when they stem from user-level APIs. This change adds new "kinds" to the EvGCSTWStart event, renames the GCSTW events to just "STW," and lets the parser deal with unknown STW kinds for future backwards compatibility. But, this change must break trace compatibility, so it bumps the trace version to Go 1.21. This change also includes a small cleanup in the trace command, which previously checked for STW events when deciding whether user tasks overlapped with a GC. Looking at the source, I don't see a way for STW events to ever enter the stream that that code looks at, so that condition has been deleted. Change-Id: I9a5dc144092c53e92eb6950e9a5504a790ac00cf Reviewed-on: https://go-review.googlesource.com/c/go/+/494495 Reviewed-by: Michael Pratt <mpratt@google.com> TryBot-Result: Gopher Robot <gobot@golang.org> Run-TryBot: Michael Knyszek <mknyszek@google.com>
This commit is contained in:
parent
944911af56
commit
b1aadd034c
19 changed files with 177 additions and 69 deletions
|
|
@ -276,7 +276,7 @@ var ReadUnaligned32 = readUnaligned32
|
|||
var ReadUnaligned64 = readUnaligned64
|
||||
|
||||
func CountPagesInUse() (pagesInUse, counted uintptr) {
|
||||
stopTheWorld("CountPagesInUse")
|
||||
stopTheWorld(stwForTestCountPagesInUse)
|
||||
|
||||
pagesInUse = uintptr(mheap_.pagesInUse.Load())
|
||||
|
||||
|
|
@ -319,7 +319,7 @@ func (p *ProfBuf) Close() {
|
|||
}
|
||||
|
||||
func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) {
|
||||
stopTheWorld("ReadMetricsSlow")
|
||||
stopTheWorld(stwForTestReadMetricsSlow)
|
||||
|
||||
// Initialize the metrics beforehand because this could
|
||||
// allocate and skew the stats.
|
||||
|
|
@ -347,7 +347,7 @@ func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int)
|
|||
// ReadMemStatsSlow returns both the runtime-computed MemStats and
|
||||
// MemStats accumulated by scanning the heap.
|
||||
func ReadMemStatsSlow() (base, slow MemStats) {
|
||||
stopTheWorld("ReadMemStatsSlow")
|
||||
stopTheWorld(stwForTestReadMemStatsSlow)
|
||||
|
||||
// Run on the system stack to avoid stack growth allocation.
|
||||
systemstack(func() {
|
||||
|
|
@ -1193,7 +1193,7 @@ func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
|
|||
}
|
||||
|
||||
func PageCachePagesLeaked() (leaked uintptr) {
|
||||
stopTheWorld("PageCachePagesLeaked")
|
||||
stopTheWorld(stwForTestPageCachePagesLeaked)
|
||||
|
||||
// Walk over destroyed Ps and look for unflushed caches.
|
||||
deadp := allp[len(allp):cap(allp)]
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue