2018-12-30 19:47:27 -05:00
|
|
|
// Copyright 2019 The Go Authors. All rights reserved.
|
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
|
|
// This file provides an internal debug logging facility. The debug
|
|
|
|
|
// log is a lightweight, in-memory, per-M ring buffer. By default, the
|
|
|
|
|
// runtime prints the debug log on panic.
|
|
|
|
|
//
|
|
|
|
|
// To print something to the debug log, call dlog to obtain a dlogger
|
|
|
|
|
// and use the methods on that to add values. The values will be
|
|
|
|
|
// space-separated in the output (much like println).
|
|
|
|
|
//
|
|
|
|
|
// This facility can be enabled by passing -tags debuglog when
|
|
|
|
|
// building. Without this tag, dlog calls compile to nothing.
|
2024-07-23 15:39:51 -04:00
|
|
|
//
|
|
|
|
|
// Implementation notes
|
|
|
|
|
//
|
|
|
|
|
// There are two implementations of the dlog interface: dloggerImpl and
|
|
|
|
|
// dloggerFake. dloggerFake is a no-op implementation. dlogger is type-aliased
|
|
|
|
|
// to one or the other depending on the debuglog build tag. However, both types
|
|
|
|
|
// always exist and are always built. This helps ensure we compile as much of
|
|
|
|
|
// the implementation as possible in the default build configuration, while also
|
|
|
|
|
// enabling us to achieve good test coverage of the real debuglog implementation
|
|
|
|
|
// even when the debuglog build tag is not set.
|
2018-12-30 19:47:27 -05:00
|
|
|
|
|
|
|
|
package runtime
|
|
|
|
|
|
|
|
|
|
import (
|
2024-04-02 13:08:24 +00:00
|
|
|
"internal/abi"
|
2025-04-08 10:00:13 +00:00
|
|
|
"internal/byteorder"
|
2024-02-01 10:21:14 +08:00
|
|
|
"internal/runtime/atomic"
|
2024-07-23 11:43:23 -04:00
|
|
|
"internal/runtime/sys"
|
2018-12-30 19:47:27 -05:00
|
|
|
"unsafe"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
// debugLogBytes is the size of each per-M ring buffer. This is
|
|
|
|
|
// allocated off-heap to avoid blowing up the M and hence the GC'd
|
|
|
|
|
// heap size.
|
|
|
|
|
const debugLogBytes = 16 << 10
|
|
|
|
|
|
2019-04-05 10:05:57 -04:00
|
|
|
// debugLogStringLimit is the maximum number of bytes in a string.
|
2018-12-30 19:47:27 -05:00
|
|
|
// Above this, the string will be truncated with "..(n more bytes).."
|
|
|
|
|
const debugLogStringLimit = debugLogBytes / 8
|
|
|
|
|
|
|
|
|
|
// dlog returns a debug logger. The caller can use methods on the
|
|
|
|
|
// returned logger to add values, which will be space-separated in the
|
|
|
|
|
// final output, much like println. The caller must call end() to
|
|
|
|
|
// finish the message.
|
|
|
|
|
//
|
|
|
|
|
// dlog can be used from highly-constrained corners of the runtime: it
|
|
|
|
|
// is safe to use in the signal handler, from within the write
|
|
|
|
|
// barrier, from within the stack implementation, and in places that
|
|
|
|
|
// must be recursively nosplit.
|
|
|
|
|
//
|
|
|
|
|
// This will be compiled away if built without the debuglog build tag.
|
|
|
|
|
// However, argument construction may not be. If any of the arguments
|
|
|
|
|
// are not literals or trivial expressions, consider protecting the
|
|
|
|
|
// call with "if dlogEnabled".
|
|
|
|
|
//
|
|
|
|
|
//go:nosplit
|
|
|
|
|
//go:nowritebarrierrec
|
2024-07-23 15:39:51 -04:00
|
|
|
func dlog() dlogger {
|
|
|
|
|
// dlog1 is defined to either dlogImpl or dlogFake.
|
|
|
|
|
return dlog1()
|
|
|
|
|
}
|
2018-12-30 19:47:27 -05:00
|
|
|
|
2024-07-23 15:39:51 -04:00
|
|
|
//go:nosplit
|
|
|
|
|
//go:nowritebarrierrec
|
|
|
|
|
func dlogFake() dloggerFake {
|
|
|
|
|
return dloggerFake{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
//go:nowritebarrierrec
|
|
|
|
|
func dlogImpl() *dloggerImpl {
|
2018-12-30 19:47:27 -05:00
|
|
|
// Get the time.
|
|
|
|
|
tick, nano := uint64(cputicks()), uint64(nanotime())
|
|
|
|
|
|
|
|
|
|
// Try to get a cached logger.
|
|
|
|
|
l := getCachedDlogger()
|
|
|
|
|
|
|
|
|
|
// If we couldn't get a cached logger, try to get one from the
|
|
|
|
|
// global pool.
|
|
|
|
|
if l == nil {
|
|
|
|
|
allp := (*uintptr)(unsafe.Pointer(&allDloggers))
|
2024-07-23 15:11:14 -04:00
|
|
|
all := (*dloggerImpl)(unsafe.Pointer(atomic.Loaduintptr(allp)))
|
2018-12-30 19:47:27 -05:00
|
|
|
for l1 := all; l1 != nil; l1 = l1.allLink {
|
2022-08-26 11:28:48 +08:00
|
|
|
if l1.owned.Load() == 0 && l1.owned.CompareAndSwap(0, 1) {
|
2018-12-30 19:47:27 -05:00
|
|
|
l = l1
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If that failed, allocate a new logger.
|
|
|
|
|
if l == nil {
|
runtime: track how much memory is mapped in the Ready state
This change adds a field to memstats called mappedReady that tracks how
much memory is in the Ready state at any given time. In essence, it's
the total memory usage by the Go runtime (with one exception which is
documented). Essentially, all memory mapped read/write that has either
been paged in or will soon.
To make tracking this not involve the many different stats that track
mapped memory, we track this statistic at a very low level. The downside
of tracking this statistic at such a low level is that it managed to
catch lots of situations where the runtime wasn't fully accounting for
memory. This change rectifies these situations by always accounting for
memory that's mapped in some way (i.e. always passing a sysMemStat to a
mem.go function), with *two* exceptions.
Rectifying these situations means also having the memory mapped during
testing being accounted for, so that tests (i.e. ReadMemStats) that
ultimately check mappedReady continue to work correctly without special
exceptions. We choose to simply account for this memory in other_sys.
Let's talk about the exceptions. The first is the arenas array for
finding heap arena metadata from an address is mapped as read/write in
one large chunk. It's tens of MiB in size. On systems with demand
paging, we assume that the whole thing isn't paged in at once (after
all, it maps to the whole address space, and it's exceedingly difficult
with today's technology to even broach having as much physical memory as
the total address space). On systems where we have to commit memory
manually, we use a two-level structure.
Now, the reason why this is an exception is because we have no mechanism
to track what memory is paged in, and we can't just account for the
entire thing, because that would *look* like an enormous overhead.
Furthermore, this structure is on a few really, really critical paths in
the runtime, so doing more explicit tracking isn't really an option. So,
we explicitly don't and call sysAllocOS to map this memory.
The second exception is that we call sysFree with no accounting to clean
up address space reservations, or otherwise to throw out mappings we
don't care about. In this case, also drop down to a lower level and call
sysFreeOS to explicitly avoid accounting.
The third exception is debuglog allocations. That is purely a debugging
facility and ideally we want it to have as small an impact on the
runtime as possible. If we include it in mappedReady calculations, it
could cause GC pacing shifts in future CLs, especailly if one increases
the debuglog buffer sizes as a one-off.
As of this CL, these are the only three places in the runtime that would
pass nil for a stat to any of the functions in mem.go. As a result, this
CL makes sysMemStats mandatory to facilitate better accounting in the
future. It's now much easier to grep and find out where accounting is
explicitly elided, because one doesn't have to follow the trail of
sysMemStat nil pointer values, and can just look at the function name.
For #48409.
Change-Id: I274eb467fc2603881717482214fddc47c9eaf218
Reviewed-on: https://go-review.googlesource.com/c/go/+/393402
Reviewed-by: Michael Pratt <mpratt@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Run-TryBot: Michael Knyszek <mknyszek@google.com>
2022-03-15 02:48:18 +00:00
|
|
|
// Use sysAllocOS instead of sysAlloc because we want to interfere
|
|
|
|
|
// with the runtime as little as possible, and sysAlloc updates accounting.
|
2025-02-01 14:19:04 +01:00
|
|
|
l = (*dloggerImpl)(sysAllocOS(unsafe.Sizeof(dloggerImpl{}), "debug log"))
|
2018-12-30 19:47:27 -05:00
|
|
|
if l == nil {
|
|
|
|
|
throw("failed to allocate debug log")
|
|
|
|
|
}
|
|
|
|
|
l.w.r.data = &l.w.data
|
2022-08-26 11:28:48 +08:00
|
|
|
l.owned.Store(1)
|
2018-12-30 19:47:27 -05:00
|
|
|
|
|
|
|
|
// Prepend to allDloggers list.
|
|
|
|
|
headp := (*uintptr)(unsafe.Pointer(&allDloggers))
|
|
|
|
|
for {
|
|
|
|
|
head := atomic.Loaduintptr(headp)
|
2024-07-23 15:11:14 -04:00
|
|
|
l.allLink = (*dloggerImpl)(unsafe.Pointer(head))
|
2018-12-30 19:47:27 -05:00
|
|
|
if atomic.Casuintptr(headp, head, uintptr(unsafe.Pointer(l))) {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If the time delta is getting too high, write a new sync
|
|
|
|
|
// packet. We set the limit so we don't write more than 6
|
|
|
|
|
// bytes of delta in the record header.
|
|
|
|
|
const deltaLimit = 1<<(3*7) - 1 // ~2ms between sync packets
|
|
|
|
|
if tick-l.w.tick > deltaLimit || nano-l.w.nano > deltaLimit {
|
|
|
|
|
l.w.writeSync(tick, nano)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Reserve space for framing header.
|
|
|
|
|
l.w.ensure(debugLogHeaderSize)
|
|
|
|
|
l.w.write += debugLogHeaderSize
|
|
|
|
|
|
|
|
|
|
// Write record header.
|
|
|
|
|
l.w.uvarint(tick - l.w.tick)
|
|
|
|
|
l.w.uvarint(nano - l.w.nano)
|
|
|
|
|
gp := getg()
|
|
|
|
|
if gp != nil && gp.m != nil && gp.m.p != 0 {
|
|
|
|
|
l.w.varint(int64(gp.m.p.ptr().id))
|
|
|
|
|
} else {
|
|
|
|
|
l.w.varint(-1)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return l
|
|
|
|
|
}
|
|
|
|
|
|
2024-07-23 15:11:14 -04:00
|
|
|
// A dloggerImpl writes to the debug log.
|
2018-12-30 19:47:27 -05:00
|
|
|
//
|
2024-07-23 15:11:14 -04:00
|
|
|
// To obtain a dloggerImpl, call dlog(). When done with the dloggerImpl, call
|
2018-12-30 19:47:27 -05:00
|
|
|
// end().
|
2024-07-23 15:11:14 -04:00
|
|
|
type dloggerImpl struct {
|
2022-08-07 17:43:57 +07:00
|
|
|
_ sys.NotInHeap
|
2018-12-30 19:47:27 -05:00
|
|
|
w debugLogWriter
|
|
|
|
|
|
|
|
|
|
// allLink is the next dlogger in the allDloggers list.
|
2024-07-23 15:11:14 -04:00
|
|
|
allLink *dloggerImpl
|
2018-12-30 19:47:27 -05:00
|
|
|
|
|
|
|
|
// owned indicates that this dlogger is owned by an M. This is
|
|
|
|
|
// accessed atomically.
|
2022-08-26 11:28:48 +08:00
|
|
|
owned atomic.Uint32
|
2018-12-30 19:47:27 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// allDloggers is a list of all dloggers, linked through
|
|
|
|
|
// dlogger.allLink. This is accessed atomically. This is prepend only,
|
|
|
|
|
// so it doesn't need to protect against ABA races.
|
2024-07-23 15:11:14 -04:00
|
|
|
var allDloggers *dloggerImpl
|
2018-12-30 19:47:27 -05:00
|
|
|
|
2024-07-23 15:39:51 -04:00
|
|
|
// A dloggerFake is a no-op implementation of dlogger.
|
|
|
|
|
type dloggerFake struct{}
|
|
|
|
|
|
2018-12-30 19:47:27 -05:00
|
|
|
//go:nosplit
|
2024-07-23 15:39:51 -04:00
|
|
|
func (l dloggerFake) end() {}
|
2018-12-30 19:47:27 -05:00
|
|
|
|
2024-07-23 15:39:51 -04:00
|
|
|
//go:nosplit
|
|
|
|
|
func (l *dloggerImpl) end() {
|
2018-12-30 19:47:27 -05:00
|
|
|
// Fill in framing header.
|
|
|
|
|
size := l.w.write - l.w.r.end
|
|
|
|
|
if !l.w.writeFrameAt(l.w.r.end, size) {
|
|
|
|
|
throw("record too large")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Commit the record.
|
|
|
|
|
l.w.r.end = l.w.write
|
|
|
|
|
|
|
|
|
|
// Attempt to return this logger to the cache.
|
|
|
|
|
if putCachedDlogger(l) {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Return the logger to the global pool.
|
2022-08-26 11:28:48 +08:00
|
|
|
l.owned.Store(0)
|
2018-12-30 19:47:27 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
debugLogUnknown = 1 + iota
|
|
|
|
|
debugLogBoolTrue
|
|
|
|
|
debugLogBoolFalse
|
|
|
|
|
debugLogInt
|
|
|
|
|
debugLogUint
|
|
|
|
|
debugLogHex
|
|
|
|
|
debugLogPtr
|
|
|
|
|
debugLogString
|
|
|
|
|
debugLogConstString
|
2025-06-10 19:19:34 -04:00
|
|
|
debugLogHexdump
|
|
|
|
|
debugLogOverflow
|
2018-12-30 19:47:27 -05:00
|
|
|
|
|
|
|
|
debugLogPC
|
|
|
|
|
debugLogTraceback
|
|
|
|
|
)
|
|
|
|
|
|
2024-07-23 15:39:51 -04:00
|
|
|
//go:nosplit
|
|
|
|
|
func (l dloggerFake) b(x bool) dloggerFake { return l }
|
|
|
|
|
|
2018-12-30 19:47:27 -05:00
|
|
|
//go:nosplit
|
2024-07-23 15:11:14 -04:00
|
|
|
func (l *dloggerImpl) b(x bool) *dloggerImpl {
|
2018-12-30 19:47:27 -05:00
|
|
|
if x {
|
|
|
|
|
l.w.byte(debugLogBoolTrue)
|
|
|
|
|
} else {
|
|
|
|
|
l.w.byte(debugLogBoolFalse)
|
|
|
|
|
}
|
|
|
|
|
return l
|
|
|
|
|
}
|
|
|
|
|
|
2024-07-23 15:39:51 -04:00
|
|
|
//go:nosplit
|
|
|
|
|
func (l dloggerFake) i(x int) dloggerFake { return l }
|
|
|
|
|
|
2018-12-30 19:47:27 -05:00
|
|
|
//go:nosplit
|
2024-07-23 15:11:14 -04:00
|
|
|
func (l *dloggerImpl) i(x int) *dloggerImpl {
|
2018-12-30 19:47:27 -05:00
|
|
|
return l.i64(int64(x))
|
|
|
|
|
}
|
|
|
|
|
|
2024-07-23 15:39:51 -04:00
|
|
|
//go:nosplit
|
|
|
|
|
func (l dloggerFake) i8(x int8) dloggerFake { return l }
|
|
|
|
|
|
2018-12-30 19:47:27 -05:00
|
|
|
//go:nosplit
|
2024-07-23 15:11:14 -04:00
|
|
|
func (l *dloggerImpl) i8(x int8) *dloggerImpl {
|
2018-12-30 19:47:27 -05:00
|
|
|
return l.i64(int64(x))
|
|
|
|
|
}
|
|
|
|
|
|
2024-07-23 15:39:51 -04:00
|
|
|
//go:nosplit
|
|
|
|
|
func (l dloggerFake) i16(x int16) dloggerFake { return l }
|
|
|
|
|
|
2018-12-30 19:47:27 -05:00
|
|
|
//go:nosplit
|
2024-07-23 15:11:14 -04:00
|
|
|
func (l *dloggerImpl) i16(x int16) *dloggerImpl {
|
2018-12-30 19:47:27 -05:00
|
|
|
return l.i64(int64(x))
|
|
|
|
|
}
|
|
|
|
|
|
2024-07-23 15:39:51 -04:00
|
|
|
//go:nosplit
|
|
|
|
|
func (l dloggerFake) i32(x int32) dloggerFake { return l }
|
|
|
|
|
|
2018-12-30 19:47:27 -05:00
|
|
|
//go:nosplit
|
2024-07-23 15:11:14 -04:00
|
|
|
func (l *dloggerImpl) i32(x int32) *dloggerImpl {
|
2018-12-30 19:47:27 -05:00
|
|
|
return l.i64(int64(x))
|
|
|
|
|
}
|
|
|
|
|
|
2024-07-23 15:39:51 -04:00
|
|
|
//go:nosplit
|
|
|
|
|
func (l dloggerFake) i64(x int64) dloggerFake { return l }
|
|
|
|
|
|
2018-12-30 19:47:27 -05:00
|
|
|
//go:nosplit
|
2024-07-23 15:11:14 -04:00
|
|
|
func (l *dloggerImpl) i64(x int64) *dloggerImpl {
|
2018-12-30 19:47:27 -05:00
|
|
|
l.w.byte(debugLogInt)
|
|
|
|
|
l.w.varint(x)
|
|
|
|
|
return l
|
|
|
|
|
}
|
|
|
|
|
|
2024-07-23 15:39:51 -04:00
|
|
|
//go:nosplit
|
|
|
|
|
func (l dloggerFake) u(x uint) dloggerFake { return l }
|
|
|
|
|
|
2018-12-30 19:47:27 -05:00
|
|
|
//go:nosplit
|
2024-07-23 15:11:14 -04:00
|
|
|
func (l *dloggerImpl) u(x uint) *dloggerImpl {
|
2018-12-30 19:47:27 -05:00
|
|
|
return l.u64(uint64(x))
|
|
|
|
|
}
|
|
|
|
|
|
2024-07-23 15:39:51 -04:00
|
|
|
//go:nosplit
|
|
|
|
|
func (l dloggerFake) uptr(x uintptr) dloggerFake { return l }
|
|
|
|
|
|
2018-12-30 19:47:27 -05:00
|
|
|
//go:nosplit
|
2024-07-23 15:11:14 -04:00
|
|
|
func (l *dloggerImpl) uptr(x uintptr) *dloggerImpl {
|
2018-12-30 19:47:27 -05:00
|
|
|
return l.u64(uint64(x))
|
|
|
|
|
}
|
|
|
|
|
|
2024-07-23 15:39:51 -04:00
|
|
|
//go:nosplit
|
|
|
|
|
func (l dloggerFake) u8(x uint8) dloggerFake { return l }
|
|
|
|
|
|
2018-12-30 19:47:27 -05:00
|
|
|
//go:nosplit
|
2024-07-23 15:11:14 -04:00
|
|
|
func (l *dloggerImpl) u8(x uint8) *dloggerImpl {
|
2018-12-30 19:47:27 -05:00
|
|
|
return l.u64(uint64(x))
|
|
|
|
|
}
|
|
|
|
|
|
2024-07-23 15:39:51 -04:00
|
|
|
//go:nosplit
|
|
|
|
|
func (l dloggerFake) u16(x uint16) dloggerFake { return l }
|
|
|
|
|
|
2018-12-30 19:47:27 -05:00
|
|
|
//go:nosplit
|
2024-07-23 15:11:14 -04:00
|
|
|
func (l *dloggerImpl) u16(x uint16) *dloggerImpl {
|
2018-12-30 19:47:27 -05:00
|
|
|
return l.u64(uint64(x))
|
|
|
|
|
}
|
|
|
|
|
|
2024-07-23 15:39:51 -04:00
|
|
|
//go:nosplit
|
|
|
|
|
func (l dloggerFake) u32(x uint32) dloggerFake { return l }
|
|
|
|
|
|
2018-12-30 19:47:27 -05:00
|
|
|
//go:nosplit
|
2024-07-23 15:11:14 -04:00
|
|
|
func (l *dloggerImpl) u32(x uint32) *dloggerImpl {
|
2018-12-30 19:47:27 -05:00
|
|
|
return l.u64(uint64(x))
|
|
|
|
|
}
|
|
|
|
|
|
2024-07-23 15:39:51 -04:00
|
|
|
//go:nosplit
|
|
|
|
|
func (l dloggerFake) u64(x uint64) dloggerFake { return l }
|
|
|
|
|
|
2018-12-30 19:47:27 -05:00
|
|
|
//go:nosplit
|
2024-07-23 15:11:14 -04:00
|
|
|
func (l *dloggerImpl) u64(x uint64) *dloggerImpl {
|
2018-12-30 19:47:27 -05:00
|
|
|
l.w.byte(debugLogUint)
|
|
|
|
|
l.w.uvarint(x)
|
|
|
|
|
return l
|
|
|
|
|
}
|
|
|
|
|
|
2024-07-23 15:39:51 -04:00
|
|
|
//go:nosplit
|
|
|
|
|
func (l dloggerFake) hex(x uint64) dloggerFake { return l }
|
|
|
|
|
|
2018-12-30 19:47:27 -05:00
|
|
|
//go:nosplit
|
2024-07-23 15:11:14 -04:00
|
|
|
func (l *dloggerImpl) hex(x uint64) *dloggerImpl {
|
2018-12-30 19:47:27 -05:00
|
|
|
l.w.byte(debugLogHex)
|
|
|
|
|
l.w.uvarint(x)
|
|
|
|
|
return l
|
|
|
|
|
}
|
|
|
|
|
|
2024-07-23 15:39:51 -04:00
|
|
|
//go:nosplit
|
|
|
|
|
func (l dloggerFake) p(x any) dloggerFake { return l }
|
|
|
|
|
|
2018-12-30 19:47:27 -05:00
|
|
|
//go:nosplit
|
2024-07-23 15:11:14 -04:00
|
|
|
func (l *dloggerImpl) p(x any) *dloggerImpl {
|
2018-12-30 19:47:27 -05:00
|
|
|
l.w.byte(debugLogPtr)
|
|
|
|
|
if x == nil {
|
|
|
|
|
l.w.uvarint(0)
|
|
|
|
|
} else {
|
|
|
|
|
v := efaceOf(&x)
|
2025-06-14 17:06:20 -07:00
|
|
|
switch v._type.Kind() {
|
2024-04-02 13:08:24 +00:00
|
|
|
case abi.Chan, abi.Func, abi.Map, abi.Pointer, abi.UnsafePointer:
|
2018-12-30 19:47:27 -05:00
|
|
|
l.w.uvarint(uint64(uintptr(v.data)))
|
|
|
|
|
default:
|
|
|
|
|
throw("not a pointer type")
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return l
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
2024-07-23 15:39:51 -04:00
|
|
|
func (l dloggerFake) s(x string) dloggerFake { return l }
|
2022-09-07 13:23:10 +07:00
|
|
|
|
2024-07-23 15:39:51 -04:00
|
|
|
//go:nosplit
|
|
|
|
|
func (l *dloggerImpl) s(x string) *dloggerImpl {
|
2022-09-07 13:23:10 +07:00
|
|
|
strData := unsafe.StringData(x)
|
2018-12-30 19:47:27 -05:00
|
|
|
datap := &firstmoduledata
|
2022-09-07 13:23:10 +07:00
|
|
|
if len(x) > 4 && datap.etext <= uintptr(unsafe.Pointer(strData)) && uintptr(unsafe.Pointer(strData)) < datap.end {
|
2018-12-30 19:47:27 -05:00
|
|
|
// String constants are in the rodata section, which
|
|
|
|
|
// isn't recorded in moduledata. But it has to be
|
|
|
|
|
// somewhere between etext and end.
|
|
|
|
|
l.w.byte(debugLogConstString)
|
2022-09-07 13:23:10 +07:00
|
|
|
l.w.uvarint(uint64(len(x)))
|
|
|
|
|
l.w.uvarint(uint64(uintptr(unsafe.Pointer(strData)) - datap.etext))
|
2018-12-30 19:47:27 -05:00
|
|
|
} else {
|
|
|
|
|
l.w.byte(debugLogString)
|
2022-10-17 15:57:48 -04:00
|
|
|
// We can't use unsafe.Slice as it may panic, which isn't safe
|
|
|
|
|
// in this (potentially) nowritebarrier context.
|
|
|
|
|
var b []byte
|
|
|
|
|
bb := (*slice)(unsafe.Pointer(&b))
|
|
|
|
|
bb.array = unsafe.Pointer(strData)
|
|
|
|
|
bb.len, bb.cap = len(x), len(x)
|
2018-12-30 19:47:27 -05:00
|
|
|
if len(b) > debugLogStringLimit {
|
|
|
|
|
b = b[:debugLogStringLimit]
|
|
|
|
|
}
|
|
|
|
|
l.w.uvarint(uint64(len(b)))
|
|
|
|
|
l.w.bytes(b)
|
|
|
|
|
if len(b) != len(x) {
|
2025-06-10 19:19:34 -04:00
|
|
|
l.w.byte(debugLogOverflow)
|
2018-12-30 19:47:27 -05:00
|
|
|
l.w.uvarint(uint64(len(x) - len(b)))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return l
|
|
|
|
|
}
|
|
|
|
|
|
2025-06-10 19:19:34 -04:00
|
|
|
//go:nosplit
|
|
|
|
|
func (l dloggerFake) hexdump(p unsafe.Pointer, bytes uintptr) dloggerFake { return l }
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
func (l *dloggerImpl) hexdump(p unsafe.Pointer, bytes uintptr) *dloggerImpl {
|
|
|
|
|
var b []byte
|
|
|
|
|
bb := (*slice)(unsafe.Pointer(&b))
|
|
|
|
|
bb.array = unsafe.Pointer(p)
|
|
|
|
|
bb.len, bb.cap = int(bytes), int(bytes)
|
|
|
|
|
if len(b) > debugLogStringLimit {
|
|
|
|
|
b = b[:debugLogStringLimit]
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
l.w.byte(debugLogHexdump)
|
|
|
|
|
l.w.uvarint(uint64(uintptr(p)))
|
|
|
|
|
l.w.uvarint(uint64(len(b)))
|
|
|
|
|
l.w.bytes(b)
|
|
|
|
|
|
|
|
|
|
if uintptr(len(b)) != bytes {
|
|
|
|
|
l.w.byte(debugLogOverflow)
|
|
|
|
|
l.w.uvarint(uint64(bytes) - uint64(len(b)))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return l
|
|
|
|
|
}
|
|
|
|
|
|
2024-07-23 15:39:51 -04:00
|
|
|
//go:nosplit
|
|
|
|
|
func (l dloggerFake) pc(x uintptr) dloggerFake { return l }
|
|
|
|
|
|
2018-12-30 19:47:27 -05:00
|
|
|
//go:nosplit
|
2024-07-23 15:11:14 -04:00
|
|
|
func (l *dloggerImpl) pc(x uintptr) *dloggerImpl {
|
2018-12-30 19:47:27 -05:00
|
|
|
l.w.byte(debugLogPC)
|
|
|
|
|
l.w.uvarint(uint64(x))
|
|
|
|
|
return l
|
|
|
|
|
}
|
|
|
|
|
|
2024-07-23 15:39:51 -04:00
|
|
|
//go:nosplit
|
|
|
|
|
func (l dloggerFake) traceback(x []uintptr) dloggerFake { return l }
|
|
|
|
|
|
2018-12-30 19:47:27 -05:00
|
|
|
//go:nosplit
|
2024-07-23 15:11:14 -04:00
|
|
|
func (l *dloggerImpl) traceback(x []uintptr) *dloggerImpl {
|
2018-12-30 19:47:27 -05:00
|
|
|
l.w.byte(debugLogTraceback)
|
|
|
|
|
l.w.uvarint(uint64(len(x)))
|
|
|
|
|
for _, pc := range x {
|
|
|
|
|
l.w.uvarint(uint64(pc))
|
|
|
|
|
}
|
|
|
|
|
return l
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// A debugLogWriter is a ring buffer of binary debug log records.
|
|
|
|
|
//
|
|
|
|
|
// A log record consists of a 2-byte framing header and a sequence of
|
|
|
|
|
// fields. The framing header gives the size of the record as a little
|
|
|
|
|
// endian 16-bit value. Each field starts with a byte indicating its
|
|
|
|
|
// type, followed by type-specific data. If the size in the framing
|
|
|
|
|
// header is 0, it's a sync record consisting of two little endian
|
|
|
|
|
// 64-bit values giving a new time base.
|
|
|
|
|
//
|
|
|
|
|
// Because this is a ring buffer, new records will eventually
|
|
|
|
|
// overwrite old records. Hence, it maintains a reader that consumes
|
|
|
|
|
// the log as it gets overwritten. That reader state is where an
|
|
|
|
|
// actual log reader would start.
|
|
|
|
|
type debugLogWriter struct {
|
2022-08-07 17:43:57 +07:00
|
|
|
_ sys.NotInHeap
|
2018-12-30 19:47:27 -05:00
|
|
|
write uint64
|
|
|
|
|
data debugLogBuf
|
|
|
|
|
|
|
|
|
|
// tick and nano are the time bases from the most recently
|
|
|
|
|
// written sync record.
|
|
|
|
|
tick, nano uint64
|
|
|
|
|
|
|
|
|
|
// r is a reader that consumes records as they get overwritten
|
|
|
|
|
// by the writer. It also acts as the initial reader state
|
|
|
|
|
// when printing the log.
|
|
|
|
|
r debugLogReader
|
|
|
|
|
|
|
|
|
|
// buf is a scratch buffer for encoding. This is here to
|
|
|
|
|
// reduce stack usage.
|
|
|
|
|
buf [10]byte
|
|
|
|
|
}
|
|
|
|
|
|
2022-08-07 17:43:57 +07:00
|
|
|
type debugLogBuf struct {
|
|
|
|
|
_ sys.NotInHeap
|
|
|
|
|
b [debugLogBytes]byte
|
|
|
|
|
}
|
2018-12-30 19:47:27 -05:00
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
// debugLogHeaderSize is the number of bytes in the framing
|
|
|
|
|
// header of every dlog record.
|
|
|
|
|
debugLogHeaderSize = 2
|
|
|
|
|
|
|
|
|
|
// debugLogSyncSize is the number of bytes in a sync record.
|
|
|
|
|
debugLogSyncSize = debugLogHeaderSize + 2*8
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
func (l *debugLogWriter) ensure(n uint64) {
|
2022-08-07 17:43:57 +07:00
|
|
|
for l.write+n >= l.r.begin+uint64(len(l.data.b)) {
|
2018-12-30 19:47:27 -05:00
|
|
|
// Consume record at begin.
|
|
|
|
|
if l.r.skip() == ^uint64(0) {
|
|
|
|
|
// Wrapped around within a record.
|
|
|
|
|
//
|
|
|
|
|
// TODO(austin): It would be better to just
|
|
|
|
|
// eat the whole buffer at this point, but we
|
|
|
|
|
// have to communicate that to the reader
|
|
|
|
|
// somehow.
|
|
|
|
|
throw("record wrapped around")
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
func (l *debugLogWriter) writeFrameAt(pos, size uint64) bool {
|
2022-08-07 17:43:57 +07:00
|
|
|
l.data.b[pos%uint64(len(l.data.b))] = uint8(size)
|
|
|
|
|
l.data.b[(pos+1)%uint64(len(l.data.b))] = uint8(size >> 8)
|
2018-12-30 19:47:27 -05:00
|
|
|
return size <= 0xFFFF
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
func (l *debugLogWriter) writeSync(tick, nano uint64) {
|
|
|
|
|
l.tick, l.nano = tick, nano
|
|
|
|
|
l.ensure(debugLogHeaderSize)
|
|
|
|
|
l.writeFrameAt(l.write, 0)
|
|
|
|
|
l.write += debugLogHeaderSize
|
|
|
|
|
l.writeUint64LE(tick)
|
|
|
|
|
l.writeUint64LE(nano)
|
|
|
|
|
l.r.end = l.write
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
func (l *debugLogWriter) writeUint64LE(x uint64) {
|
|
|
|
|
var b [8]byte
|
2025-04-08 10:00:13 +00:00
|
|
|
byteorder.LEPutUint64(b[:], x)
|
2018-12-30 19:47:27 -05:00
|
|
|
l.bytes(b[:])
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
func (l *debugLogWriter) byte(x byte) {
|
|
|
|
|
l.ensure(1)
|
|
|
|
|
pos := l.write
|
|
|
|
|
l.write++
|
2022-08-07 17:43:57 +07:00
|
|
|
l.data.b[pos%uint64(len(l.data.b))] = x
|
2018-12-30 19:47:27 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
func (l *debugLogWriter) bytes(x []byte) {
|
|
|
|
|
l.ensure(uint64(len(x)))
|
|
|
|
|
pos := l.write
|
|
|
|
|
l.write += uint64(len(x))
|
|
|
|
|
for len(x) > 0 {
|
2022-08-07 17:43:57 +07:00
|
|
|
n := copy(l.data.b[pos%uint64(len(l.data.b)):], x)
|
2018-12-30 19:47:27 -05:00
|
|
|
pos += uint64(n)
|
|
|
|
|
x = x[n:]
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
func (l *debugLogWriter) varint(x int64) {
|
|
|
|
|
var u uint64
|
|
|
|
|
if x < 0 {
|
|
|
|
|
u = (^uint64(x) << 1) | 1 // complement i, bit 0 is 1
|
|
|
|
|
} else {
|
|
|
|
|
u = (uint64(x) << 1) // do not complement i, bit 0 is 0
|
|
|
|
|
}
|
|
|
|
|
l.uvarint(u)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
func (l *debugLogWriter) uvarint(u uint64) {
|
|
|
|
|
i := 0
|
|
|
|
|
for u >= 0x80 {
|
|
|
|
|
l.buf[i] = byte(u) | 0x80
|
|
|
|
|
u >>= 7
|
|
|
|
|
i++
|
|
|
|
|
}
|
|
|
|
|
l.buf[i] = byte(u)
|
|
|
|
|
i++
|
|
|
|
|
l.bytes(l.buf[:i])
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type debugLogReader struct {
|
|
|
|
|
data *debugLogBuf
|
|
|
|
|
|
|
|
|
|
// begin and end are the positions in the log of the beginning
|
|
|
|
|
// and end of the log data, modulo len(data).
|
|
|
|
|
begin, end uint64
|
|
|
|
|
|
|
|
|
|
// tick and nano are the current time base at begin.
|
|
|
|
|
tick, nano uint64
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
func (r *debugLogReader) skip() uint64 {
|
|
|
|
|
// Read size at pos.
|
|
|
|
|
if r.begin+debugLogHeaderSize > r.end {
|
|
|
|
|
return ^uint64(0)
|
|
|
|
|
}
|
|
|
|
|
size := uint64(r.readUint16LEAt(r.begin))
|
|
|
|
|
if size == 0 {
|
|
|
|
|
// Sync packet.
|
|
|
|
|
r.tick = r.readUint64LEAt(r.begin + debugLogHeaderSize)
|
|
|
|
|
r.nano = r.readUint64LEAt(r.begin + debugLogHeaderSize + 8)
|
|
|
|
|
size = debugLogSyncSize
|
|
|
|
|
}
|
|
|
|
|
if r.begin+size > r.end {
|
|
|
|
|
return ^uint64(0)
|
|
|
|
|
}
|
|
|
|
|
r.begin += size
|
|
|
|
|
return size
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
func (r *debugLogReader) readUint16LEAt(pos uint64) uint16 {
|
2022-08-07 17:43:57 +07:00
|
|
|
return uint16(r.data.b[pos%uint64(len(r.data.b))]) |
|
|
|
|
|
uint16(r.data.b[(pos+1)%uint64(len(r.data.b))])<<8
|
2018-12-30 19:47:27 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
func (r *debugLogReader) readUint64LEAt(pos uint64) uint64 {
|
|
|
|
|
var b [8]byte
|
|
|
|
|
for i := range b {
|
2022-08-07 17:43:57 +07:00
|
|
|
b[i] = r.data.b[pos%uint64(len(r.data.b))]
|
2018-12-30 19:47:27 -05:00
|
|
|
pos++
|
|
|
|
|
}
|
2025-04-08 10:00:13 +00:00
|
|
|
return byteorder.LEUint64(b[:])
|
2018-12-30 19:47:27 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (r *debugLogReader) peek() (tick uint64) {
|
|
|
|
|
// Consume any sync records.
|
|
|
|
|
size := uint64(0)
|
|
|
|
|
for size == 0 {
|
|
|
|
|
if r.begin+debugLogHeaderSize > r.end {
|
|
|
|
|
return ^uint64(0)
|
|
|
|
|
}
|
|
|
|
|
size = uint64(r.readUint16LEAt(r.begin))
|
|
|
|
|
if size != 0 {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
if r.begin+debugLogSyncSize > r.end {
|
|
|
|
|
return ^uint64(0)
|
|
|
|
|
}
|
|
|
|
|
// Sync packet.
|
|
|
|
|
r.tick = r.readUint64LEAt(r.begin + debugLogHeaderSize)
|
|
|
|
|
r.nano = r.readUint64LEAt(r.begin + debugLogHeaderSize + 8)
|
|
|
|
|
r.begin += debugLogSyncSize
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Peek tick delta.
|
|
|
|
|
if r.begin+size > r.end {
|
|
|
|
|
return ^uint64(0)
|
|
|
|
|
}
|
|
|
|
|
pos := r.begin + debugLogHeaderSize
|
|
|
|
|
var u uint64
|
|
|
|
|
for i := uint(0); ; i += 7 {
|
2022-08-07 17:43:57 +07:00
|
|
|
b := r.data.b[pos%uint64(len(r.data.b))]
|
2018-12-30 19:47:27 -05:00
|
|
|
pos++
|
|
|
|
|
u |= uint64(b&^0x80) << i
|
|
|
|
|
if b&0x80 == 0 {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if pos > r.begin+size {
|
|
|
|
|
return ^uint64(0)
|
|
|
|
|
}
|
|
|
|
|
return r.tick + u
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (r *debugLogReader) header() (end, tick, nano uint64, p int) {
|
|
|
|
|
// Read size. We've already skipped sync packets and checked
|
|
|
|
|
// bounds in peek.
|
|
|
|
|
size := uint64(r.readUint16LEAt(r.begin))
|
|
|
|
|
end = r.begin + size
|
|
|
|
|
r.begin += debugLogHeaderSize
|
|
|
|
|
|
|
|
|
|
// Read tick, nano, and p.
|
|
|
|
|
tick = r.uvarint() + r.tick
|
|
|
|
|
nano = r.uvarint() + r.nano
|
|
|
|
|
p = int(r.varint())
|
|
|
|
|
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (r *debugLogReader) uvarint() uint64 {
|
|
|
|
|
var u uint64
|
|
|
|
|
for i := uint(0); ; i += 7 {
|
2022-08-07 17:43:57 +07:00
|
|
|
b := r.data.b[r.begin%uint64(len(r.data.b))]
|
2018-12-30 19:47:27 -05:00
|
|
|
r.begin++
|
|
|
|
|
u |= uint64(b&^0x80) << i
|
|
|
|
|
if b&0x80 == 0 {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return u
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (r *debugLogReader) varint() int64 {
|
|
|
|
|
u := r.uvarint()
|
|
|
|
|
var v int64
|
|
|
|
|
if u&1 == 0 {
|
|
|
|
|
v = int64(u >> 1)
|
|
|
|
|
} else {
|
|
|
|
|
v = ^int64(u >> 1)
|
|
|
|
|
}
|
|
|
|
|
return v
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (r *debugLogReader) printVal() bool {
|
2022-08-07 17:43:57 +07:00
|
|
|
typ := r.data.b[r.begin%uint64(len(r.data.b))]
|
2018-12-30 19:47:27 -05:00
|
|
|
r.begin++
|
|
|
|
|
|
|
|
|
|
switch typ {
|
|
|
|
|
default:
|
|
|
|
|
print("<unknown field type ", hex(typ), " pos ", r.begin-1, " end ", r.end, ">\n")
|
|
|
|
|
return false
|
|
|
|
|
|
|
|
|
|
case debugLogUnknown:
|
|
|
|
|
print("<unknown kind>")
|
|
|
|
|
|
|
|
|
|
case debugLogBoolTrue:
|
|
|
|
|
print(true)
|
|
|
|
|
|
|
|
|
|
case debugLogBoolFalse:
|
|
|
|
|
print(false)
|
|
|
|
|
|
|
|
|
|
case debugLogInt:
|
|
|
|
|
print(r.varint())
|
|
|
|
|
|
|
|
|
|
case debugLogUint:
|
|
|
|
|
print(r.uvarint())
|
|
|
|
|
|
|
|
|
|
case debugLogHex, debugLogPtr:
|
|
|
|
|
print(hex(r.uvarint()))
|
|
|
|
|
|
|
|
|
|
case debugLogString:
|
|
|
|
|
sl := r.uvarint()
|
|
|
|
|
if r.begin+sl > r.end {
|
|
|
|
|
r.begin = r.end
|
|
|
|
|
print("<string length corrupted>")
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
for sl > 0 {
|
2022-08-07 17:43:57 +07:00
|
|
|
b := r.data.b[r.begin%uint64(len(r.data.b)):]
|
2018-12-30 19:47:27 -05:00
|
|
|
if uint64(len(b)) > sl {
|
|
|
|
|
b = b[:sl]
|
|
|
|
|
}
|
|
|
|
|
r.begin += uint64(len(b))
|
|
|
|
|
sl -= uint64(len(b))
|
|
|
|
|
gwrite(b)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case debugLogConstString:
|
|
|
|
|
len, ptr := int(r.uvarint()), uintptr(r.uvarint())
|
|
|
|
|
ptr += firstmoduledata.etext
|
2022-10-17 15:57:48 -04:00
|
|
|
// We can't use unsafe.String as it may panic, which isn't safe
|
|
|
|
|
// in this (potentially) nowritebarrier context.
|
|
|
|
|
str := stringStruct{
|
|
|
|
|
str: unsafe.Pointer(ptr),
|
|
|
|
|
len: len,
|
|
|
|
|
}
|
|
|
|
|
s := *(*string)(unsafe.Pointer(&str))
|
2018-12-30 19:47:27 -05:00
|
|
|
print(s)
|
|
|
|
|
|
2025-06-10 19:19:34 -04:00
|
|
|
case debugLogOverflow:
|
2018-12-30 19:47:27 -05:00
|
|
|
print("..(", r.uvarint(), " more bytes)..")
|
|
|
|
|
|
2025-06-10 19:19:34 -04:00
|
|
|
case debugLogHexdump:
|
|
|
|
|
p := uintptr(r.uvarint())
|
|
|
|
|
bl := r.uvarint()
|
|
|
|
|
if r.begin+bl > r.end {
|
|
|
|
|
r.begin = r.end
|
|
|
|
|
print("<hexdump length corrupted>")
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
println() // Start on a new line
|
|
|
|
|
hd := hexdumper{addr: p}
|
|
|
|
|
for bl > 0 {
|
|
|
|
|
b := r.data.b[r.begin%uint64(len(r.data.b)):]
|
|
|
|
|
if uint64(len(b)) > bl {
|
|
|
|
|
b = b[:bl]
|
|
|
|
|
}
|
|
|
|
|
r.begin += uint64(len(b))
|
|
|
|
|
bl -= uint64(len(b))
|
|
|
|
|
hd.write(b)
|
|
|
|
|
}
|
|
|
|
|
hd.close()
|
|
|
|
|
|
2018-12-30 19:47:27 -05:00
|
|
|
case debugLogPC:
|
2019-08-06 21:37:37 -04:00
|
|
|
printDebugLogPC(uintptr(r.uvarint()), false)
|
2018-12-30 19:47:27 -05:00
|
|
|
|
|
|
|
|
case debugLogTraceback:
|
|
|
|
|
n := int(r.uvarint())
|
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
|
print("\n\t")
|
2019-08-06 21:37:37 -04:00
|
|
|
// gentraceback PCs are always return PCs.
|
|
|
|
|
// Convert them to call PCs.
|
|
|
|
|
//
|
|
|
|
|
// TODO(austin): Expand inlined frames.
|
|
|
|
|
printDebugLogPC(uintptr(r.uvarint()), true)
|
2018-12-30 19:47:27 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// printDebugLog prints the debug log.
|
|
|
|
|
func printDebugLog() {
|
runtime: run debuglog tests when debuglog tag is *not* set
Currently, the debuglog tests only run when the debuglog build tag is
set because, until the last few CLs, all of debuglog was compiled away
without that build tag. This causes two annoying problems:
1. The tests basically never run, because we don't regularly test this
configuration.
2. If you do turn on the debuglog build tag, it's probably because
you're adding debuglogs into the runtime, which are very likely to
mess up these tests, so you wind up disabling the tests and they,
again, don't get coverage.
Now we've set things up so the debuglog implementation is always
accessible, if you ask nicely enough. So we can switch these tests to
run when the tag is *not* set, and turn off when the tag *is* set (and
you're probably adding actual log statements).
Change-Id: Ib68d7a5022d4f5db96e9c7c8010cbef21d11fe11
Reviewed-on: https://go-review.googlesource.com/c/go/+/600697
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Carlos Amedee <carlos@golang.org>
2024-07-23 17:28:09 -04:00
|
|
|
if dlogEnabled {
|
|
|
|
|
printDebugLogImpl()
|
2018-12-30 19:47:27 -05:00
|
|
|
}
|
runtime: run debuglog tests when debuglog tag is *not* set
Currently, the debuglog tests only run when the debuglog build tag is
set because, until the last few CLs, all of debuglog was compiled away
without that build tag. This causes two annoying problems:
1. The tests basically never run, because we don't regularly test this
configuration.
2. If you do turn on the debuglog build tag, it's probably because
you're adding debuglogs into the runtime, which are very likely to
mess up these tests, so you wind up disabling the tests and they,
again, don't get coverage.
Now we've set things up so the debuglog implementation is always
accessible, if you ask nicely enough. So we can switch these tests to
run when the tag is *not* set, and turn off when the tag *is* set (and
you're probably adding actual log statements).
Change-Id: Ib68d7a5022d4f5db96e9c7c8010cbef21d11fe11
Reviewed-on: https://go-review.googlesource.com/c/go/+/600697
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Carlos Amedee <carlos@golang.org>
2024-07-23 17:28:09 -04:00
|
|
|
}
|
2018-12-30 19:47:27 -05:00
|
|
|
|
runtime: run debuglog tests when debuglog tag is *not* set
Currently, the debuglog tests only run when the debuglog build tag is
set because, until the last few CLs, all of debuglog was compiled away
without that build tag. This causes two annoying problems:
1. The tests basically never run, because we don't regularly test this
configuration.
2. If you do turn on the debuglog build tag, it's probably because
you're adding debuglogs into the runtime, which are very likely to
mess up these tests, so you wind up disabling the tests and they,
again, don't get coverage.
Now we've set things up so the debuglog implementation is always
accessible, if you ask nicely enough. So we can switch these tests to
run when the tag is *not* set, and turn off when the tag *is* set (and
you're probably adding actual log statements).
Change-Id: Ib68d7a5022d4f5db96e9c7c8010cbef21d11fe11
Reviewed-on: https://go-review.googlesource.com/c/go/+/600697
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Carlos Amedee <carlos@golang.org>
2024-07-23 17:28:09 -04:00
|
|
|
func printDebugLogImpl() {
|
2018-12-30 19:47:27 -05:00
|
|
|
// This function should not panic or throw since it is used in
|
|
|
|
|
// the fatal panic path and this may deadlock.
|
|
|
|
|
|
|
|
|
|
printlock()
|
|
|
|
|
|
|
|
|
|
// Get the list of all debug logs.
|
|
|
|
|
allp := (*uintptr)(unsafe.Pointer(&allDloggers))
|
2024-07-23 15:11:14 -04:00
|
|
|
all := (*dloggerImpl)(unsafe.Pointer(atomic.Loaduintptr(allp)))
|
2018-12-30 19:47:27 -05:00
|
|
|
|
|
|
|
|
// Count the logs.
|
|
|
|
|
n := 0
|
|
|
|
|
for l := all; l != nil; l = l.allLink {
|
|
|
|
|
n++
|
|
|
|
|
}
|
|
|
|
|
if n == 0 {
|
|
|
|
|
printunlock()
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Prepare read state for all logs.
|
|
|
|
|
type readState struct {
|
|
|
|
|
debugLogReader
|
|
|
|
|
first bool
|
|
|
|
|
lost uint64
|
|
|
|
|
nextTick uint64
|
|
|
|
|
}
|
runtime: track how much memory is mapped in the Ready state
This change adds a field to memstats called mappedReady that tracks how
much memory is in the Ready state at any given time. In essence, it's
the total memory usage by the Go runtime (with one exception which is
documented). Essentially, all memory mapped read/write that has either
been paged in or will soon.
To make tracking this not involve the many different stats that track
mapped memory, we track this statistic at a very low level. The downside
of tracking this statistic at such a low level is that it managed to
catch lots of situations where the runtime wasn't fully accounting for
memory. This change rectifies these situations by always accounting for
memory that's mapped in some way (i.e. always passing a sysMemStat to a
mem.go function), with *two* exceptions.
Rectifying these situations means also having the memory mapped during
testing being accounted for, so that tests (i.e. ReadMemStats) that
ultimately check mappedReady continue to work correctly without special
exceptions. We choose to simply account for this memory in other_sys.
Let's talk about the exceptions. The first is the arenas array for
finding heap arena metadata from an address is mapped as read/write in
one large chunk. It's tens of MiB in size. On systems with demand
paging, we assume that the whole thing isn't paged in at once (after
all, it maps to the whole address space, and it's exceedingly difficult
with today's technology to even broach having as much physical memory as
the total address space). On systems where we have to commit memory
manually, we use a two-level structure.
Now, the reason why this is an exception is because we have no mechanism
to track what memory is paged in, and we can't just account for the
entire thing, because that would *look* like an enormous overhead.
Furthermore, this structure is on a few really, really critical paths in
the runtime, so doing more explicit tracking isn't really an option. So,
we explicitly don't and call sysAllocOS to map this memory.
The second exception is that we call sysFree with no accounting to clean
up address space reservations, or otherwise to throw out mappings we
don't care about. In this case, also drop down to a lower level and call
sysFreeOS to explicitly avoid accounting.
The third exception is debuglog allocations. That is purely a debugging
facility and ideally we want it to have as small an impact on the
runtime as possible. If we include it in mappedReady calculations, it
could cause GC pacing shifts in future CLs, especailly if one increases
the debuglog buffer sizes as a one-off.
As of this CL, these are the only three places in the runtime that would
pass nil for a stat to any of the functions in mem.go. As a result, this
CL makes sysMemStats mandatory to facilitate better accounting in the
future. It's now much easier to grep and find out where accounting is
explicitly elided, because one doesn't have to follow the trail of
sysMemStat nil pointer values, and can just look at the function name.
For #48409.
Change-Id: I274eb467fc2603881717482214fddc47c9eaf218
Reviewed-on: https://go-review.googlesource.com/c/go/+/393402
Reviewed-by: Michael Pratt <mpratt@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Run-TryBot: Michael Knyszek <mknyszek@google.com>
2022-03-15 02:48:18 +00:00
|
|
|
// Use sysAllocOS instead of sysAlloc because we want to interfere
|
|
|
|
|
// with the runtime as little as possible, and sysAlloc updates accounting.
|
2025-02-01 14:19:04 +01:00
|
|
|
state1 := sysAllocOS(unsafe.Sizeof(readState{})*uintptr(n), "debug log")
|
2018-12-30 19:47:27 -05:00
|
|
|
if state1 == nil {
|
|
|
|
|
println("failed to allocate read state for", n, "logs")
|
|
|
|
|
printunlock()
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
state := (*[1 << 20]readState)(state1)[:n]
|
|
|
|
|
{
|
|
|
|
|
l := all
|
|
|
|
|
for i := range state {
|
|
|
|
|
s := &state[i]
|
|
|
|
|
s.debugLogReader = l.w.r
|
|
|
|
|
s.first = true
|
|
|
|
|
s.lost = l.w.r.begin
|
|
|
|
|
s.nextTick = s.peek()
|
|
|
|
|
l = l.allLink
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Print records.
|
|
|
|
|
for {
|
|
|
|
|
// Find the next record.
|
|
|
|
|
var best struct {
|
|
|
|
|
tick uint64
|
|
|
|
|
i int
|
|
|
|
|
}
|
|
|
|
|
best.tick = ^uint64(0)
|
|
|
|
|
for i := range state {
|
|
|
|
|
if state[i].nextTick < best.tick {
|
|
|
|
|
best.tick = state[i].nextTick
|
|
|
|
|
best.i = i
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if best.tick == ^uint64(0) {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Print record.
|
|
|
|
|
s := &state[best.i]
|
|
|
|
|
if s.first {
|
|
|
|
|
print(">> begin log ", best.i)
|
|
|
|
|
if s.lost != 0 {
|
|
|
|
|
print("; lost first ", s.lost>>10, "KB")
|
|
|
|
|
}
|
|
|
|
|
print(" <<\n")
|
|
|
|
|
s.first = false
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
end, _, nano, p := s.header()
|
|
|
|
|
oldEnd := s.end
|
|
|
|
|
s.end = end
|
|
|
|
|
|
|
|
|
|
print("[")
|
|
|
|
|
var tmpbuf [21]byte
|
|
|
|
|
pnano := int64(nano) - runtimeInitTime
|
|
|
|
|
if pnano < 0 {
|
|
|
|
|
// Logged before runtimeInitTime was set.
|
|
|
|
|
pnano = 0
|
|
|
|
|
}
|
2022-06-02 21:10:26 +00:00
|
|
|
pnanoBytes := itoaDiv(tmpbuf[:], uint64(pnano), 9)
|
|
|
|
|
print(slicebytetostringtmp((*byte)(noescape(unsafe.Pointer(&pnanoBytes[0]))), len(pnanoBytes)))
|
2018-12-30 19:47:27 -05:00
|
|
|
print(" P ", p, "] ")
|
|
|
|
|
|
|
|
|
|
for i := 0; s.begin < s.end; i++ {
|
|
|
|
|
if i > 0 {
|
|
|
|
|
print(" ")
|
|
|
|
|
}
|
|
|
|
|
if !s.printVal() {
|
|
|
|
|
// Abort this P log.
|
|
|
|
|
print("<aborting P log>")
|
|
|
|
|
end = oldEnd
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
println()
|
|
|
|
|
|
|
|
|
|
// Move on to the next record.
|
|
|
|
|
s.begin = end
|
|
|
|
|
s.end = oldEnd
|
|
|
|
|
s.nextTick = s.peek()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
printunlock()
|
|
|
|
|
}
|
|
|
|
|
|
2019-08-06 21:37:37 -04:00
|
|
|
// printDebugLogPC prints a single symbolized PC. If returnPC is true,
|
|
|
|
|
// pc is a return PC that must first be converted to a call PC.
|
|
|
|
|
func printDebugLogPC(pc uintptr, returnPC bool) {
|
2018-12-30 19:47:27 -05:00
|
|
|
fn := findfunc(pc)
|
2021-09-21 14:05:57 -07:00
|
|
|
if returnPC && (!fn.valid() || pc > fn.entry()) {
|
2019-08-06 21:37:37 -04:00
|
|
|
// TODO(austin): Don't back up if the previous frame
|
|
|
|
|
// was a sigpanic.
|
|
|
|
|
pc--
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
print(hex(pc))
|
2018-12-30 19:47:27 -05:00
|
|
|
if !fn.valid() {
|
|
|
|
|
print(" [unknown PC]")
|
|
|
|
|
} else {
|
|
|
|
|
name := funcname(fn)
|
|
|
|
|
file, line := funcline(fn, pc)
|
2021-09-21 14:05:57 -07:00
|
|
|
print(" [", name, "+", hex(pc-fn.entry()),
|
2018-12-30 19:47:27 -05:00
|
|
|
" ", file, ":", line, "]")
|
|
|
|
|
}
|
|
|
|
|
}
|