go/src/runtime/debuglog.go

921 lines
21 KiB
Go
Raw Normal View History

// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file provides an internal debug logging facility. The debug
// log is a lightweight, in-memory, per-M ring buffer. By default, the
// runtime prints the debug log on panic.
//
// To print something to the debug log, call dlog to obtain a dlogger
// and use the methods on that to add values. The values will be
// space-separated in the output (much like println).
//
// This facility can be enabled by passing -tags debuglog when
// building. Without this tag, dlog calls compile to nothing.
runtime: switch debuglog from const-toggled to type-toggled Currently, the debuglog build tag controls the dlogEnabled const, and all methods of dlogger first check this const and immediately return if dlog is not enabled. With constant folding and inlining, this makes the whole dlog implementation compile away if it's not enabled. However, we want to be able to test debuglog even when the build tag isn't set. For that to work, we need a different mechanism. This CL changes this mechanism so the debuglog build tag instead controls the type alias for dlogger to be either dloggerImpl or dloggerFake. These two types have the same method set, but one is just stubs. This way, the methods of dloggerImpl don't need to be conditional dlogEnabled, which sets us up to use the now fully-functional dloggerImpl type in the test. I confirmed that this change has no effect on the final size of the cmd/go binary. It does increase the size of the runtime.a file by 0.9% and make the runtime take ever so slightly longer to compile because the compiler can no longer simply eliminate the bodies of the all of dlogger during early deadcode. However, this all gets eliminated by the linker. I consider this worth it to always get build and test coverage of debuglog. Change-Id: I81759e9e1411b7d369a23383a18b022ab7451421 Reviewed-on: https://go-review.googlesource.com/c/go/+/600696 Reviewed-by: Carlos Amedee <carlos@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2024-07-23 15:39:51 -04:00
//
// Implementation notes
//
// There are two implementations of the dlog interface: dloggerImpl and
// dloggerFake. dloggerFake is a no-op implementation. dlogger is type-aliased
// to one or the other depending on the debuglog build tag. However, both types
// always exist and are always built. This helps ensure we compile as much of
// the implementation as possible in the default build configuration, while also
// enabling us to achieve good test coverage of the real debuglog implementation
// even when the debuglog build tag is not set.
package runtime
import (
"internal/abi"
"internal/byteorder"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
)
// debugLogBytes is the size of each per-M ring buffer. This is
// allocated off-heap to avoid blowing up the M and hence the GC'd
// heap size.
const debugLogBytes = 16 << 10
// debugLogStringLimit is the maximum number of bytes in a string.
// Above this, the string will be truncated with "..(n more bytes).."
const debugLogStringLimit = debugLogBytes / 8
// dlog returns a debug logger. The caller can use methods on the
// returned logger to add values, which will be space-separated in the
// final output, much like println. The caller must call end() to
// finish the message.
//
// dlog can be used from highly-constrained corners of the runtime: it
// is safe to use in the signal handler, from within the write
// barrier, from within the stack implementation, and in places that
// must be recursively nosplit.
//
// This will be compiled away if built without the debuglog build tag.
// However, argument construction may not be. If any of the arguments
// are not literals or trivial expressions, consider protecting the
// call with "if dlogEnabled".
//
//go:nosplit
//go:nowritebarrierrec
runtime: switch debuglog from const-toggled to type-toggled Currently, the debuglog build tag controls the dlogEnabled const, and all methods of dlogger first check this const and immediately return if dlog is not enabled. With constant folding and inlining, this makes the whole dlog implementation compile away if it's not enabled. However, we want to be able to test debuglog even when the build tag isn't set. For that to work, we need a different mechanism. This CL changes this mechanism so the debuglog build tag instead controls the type alias for dlogger to be either dloggerImpl or dloggerFake. These two types have the same method set, but one is just stubs. This way, the methods of dloggerImpl don't need to be conditional dlogEnabled, which sets us up to use the now fully-functional dloggerImpl type in the test. I confirmed that this change has no effect on the final size of the cmd/go binary. It does increase the size of the runtime.a file by 0.9% and make the runtime take ever so slightly longer to compile because the compiler can no longer simply eliminate the bodies of the all of dlogger during early deadcode. However, this all gets eliminated by the linker. I consider this worth it to always get build and test coverage of debuglog. Change-Id: I81759e9e1411b7d369a23383a18b022ab7451421 Reviewed-on: https://go-review.googlesource.com/c/go/+/600696 Reviewed-by: Carlos Amedee <carlos@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2024-07-23 15:39:51 -04:00
func dlog() dlogger {
// dlog1 is defined to either dlogImpl or dlogFake.
return dlog1()
}
runtime: switch debuglog from const-toggled to type-toggled Currently, the debuglog build tag controls the dlogEnabled const, and all methods of dlogger first check this const and immediately return if dlog is not enabled. With constant folding and inlining, this makes the whole dlog implementation compile away if it's not enabled. However, we want to be able to test debuglog even when the build tag isn't set. For that to work, we need a different mechanism. This CL changes this mechanism so the debuglog build tag instead controls the type alias for dlogger to be either dloggerImpl or dloggerFake. These two types have the same method set, but one is just stubs. This way, the methods of dloggerImpl don't need to be conditional dlogEnabled, which sets us up to use the now fully-functional dloggerImpl type in the test. I confirmed that this change has no effect on the final size of the cmd/go binary. It does increase the size of the runtime.a file by 0.9% and make the runtime take ever so slightly longer to compile because the compiler can no longer simply eliminate the bodies of the all of dlogger during early deadcode. However, this all gets eliminated by the linker. I consider this worth it to always get build and test coverage of debuglog. Change-Id: I81759e9e1411b7d369a23383a18b022ab7451421 Reviewed-on: https://go-review.googlesource.com/c/go/+/600696 Reviewed-by: Carlos Amedee <carlos@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2024-07-23 15:39:51 -04:00
//go:nosplit
//go:nowritebarrierrec
func dlogFake() dloggerFake {
return dloggerFake{}
}
//go:nosplit
//go:nowritebarrierrec
func dlogImpl() *dloggerImpl {
// Get the time.
tick, nano := uint64(cputicks()), uint64(nanotime())
// Try to get a cached logger.
l := getCachedDlogger()
// If we couldn't get a cached logger, try to get one from the
// global pool.
if l == nil {
allp := (*uintptr)(unsafe.Pointer(&allDloggers))
all := (*dloggerImpl)(unsafe.Pointer(atomic.Loaduintptr(allp)))
for l1 := all; l1 != nil; l1 = l1.allLink {
if l1.owned.Load() == 0 && l1.owned.CompareAndSwap(0, 1) {
l = l1
break
}
}
}
// If that failed, allocate a new logger.
if l == nil {
runtime: track how much memory is mapped in the Ready state This change adds a field to memstats called mappedReady that tracks how much memory is in the Ready state at any given time. In essence, it's the total memory usage by the Go runtime (with one exception which is documented). Essentially, all memory mapped read/write that has either been paged in or will soon. To make tracking this not involve the many different stats that track mapped memory, we track this statistic at a very low level. The downside of tracking this statistic at such a low level is that it managed to catch lots of situations where the runtime wasn't fully accounting for memory. This change rectifies these situations by always accounting for memory that's mapped in some way (i.e. always passing a sysMemStat to a mem.go function), with *two* exceptions. Rectifying these situations means also having the memory mapped during testing being accounted for, so that tests (i.e. ReadMemStats) that ultimately check mappedReady continue to work correctly without special exceptions. We choose to simply account for this memory in other_sys. Let's talk about the exceptions. The first is the arenas array for finding heap arena metadata from an address is mapped as read/write in one large chunk. It's tens of MiB in size. On systems with demand paging, we assume that the whole thing isn't paged in at once (after all, it maps to the whole address space, and it's exceedingly difficult with today's technology to even broach having as much physical memory as the total address space). On systems where we have to commit memory manually, we use a two-level structure. Now, the reason why this is an exception is because we have no mechanism to track what memory is paged in, and we can't just account for the entire thing, because that would *look* like an enormous overhead. Furthermore, this structure is on a few really, really critical paths in the runtime, so doing more explicit tracking isn't really an option. So, we explicitly don't and call sysAllocOS to map this memory. The second exception is that we call sysFree with no accounting to clean up address space reservations, or otherwise to throw out mappings we don't care about. In this case, also drop down to a lower level and call sysFreeOS to explicitly avoid accounting. The third exception is debuglog allocations. That is purely a debugging facility and ideally we want it to have as small an impact on the runtime as possible. If we include it in mappedReady calculations, it could cause GC pacing shifts in future CLs, especailly if one increases the debuglog buffer sizes as a one-off. As of this CL, these are the only three places in the runtime that would pass nil for a stat to any of the functions in mem.go. As a result, this CL makes sysMemStats mandatory to facilitate better accounting in the future. It's now much easier to grep and find out where accounting is explicitly elided, because one doesn't have to follow the trail of sysMemStat nil pointer values, and can just look at the function name. For #48409. Change-Id: I274eb467fc2603881717482214fddc47c9eaf218 Reviewed-on: https://go-review.googlesource.com/c/go/+/393402 Reviewed-by: Michael Pratt <mpratt@google.com> TryBot-Result: Gopher Robot <gobot@golang.org> Run-TryBot: Michael Knyszek <mknyszek@google.com>
2022-03-15 02:48:18 +00:00
// Use sysAllocOS instead of sysAlloc because we want to interfere
// with the runtime as little as possible, and sysAlloc updates accounting.
runtime: decorate anonymous memory mappings Leverage the prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, ...) API to name the anonymous memory areas. This API has been introduced in Linux 5.17 to decorate the anonymous memory areas shown in /proc/<pid>/maps. This is already used by glibc. See: * https://sourceware.org/git/?p=glibc.git;a=blob;f=malloc/malloc.c;h=27dfd1eb907f4615b70c70237c42c552bb4f26a8;hb=HEAD#l2434 * https://sourceware.org/git/?p=glibc.git;a=blob;f=sysdeps/unix/sysv/linux/setvmaname.c;h=ea93a5ffbebc9e5a7e32a297138f465724b4725f;hb=HEAD#l63 This can be useful when investigating the memory consumption of a multi-language program. On a 100% Go program, pprof profiler can be used to profile the memory consumption of the program. But pprof is only aware of what happens within the Go world. On a multi-language program, there could be a doubt about whether the suspicious extra-memory consumption comes from the Go part or the native part. With this change, the following Go program: package main import ( "fmt" "log" "os" ) /* #include <stdlib.h> void f(void) { (void)malloc(1024*1024*1024); } */ import "C" func main() { C.f() data, err := os.ReadFile("/proc/self/maps") if err != nil { log.Fatal(err) } fmt.Println(string(data)) } produces this output: $ GLIBC_TUNABLES=glibc.mem.decorate_maps=1 ~/doc/devel/open-source/go/bin/go run . 00400000-00402000 r--p 00000000 00:21 28451768 /home/lenaic/.cache/go-build/9f/9f25a17baed5a80d03eb080a2ce2a5ff49c17f9a56e28330f0474a2bb74a30a0-d/test_vma_name 00402000-004a4000 r-xp 00002000 00:21 28451768 /home/lenaic/.cache/go-build/9f/9f25a17baed5a80d03eb080a2ce2a5ff49c17f9a56e28330f0474a2bb74a30a0-d/test_vma_name 004a4000-00574000 r--p 000a4000 00:21 28451768 /home/lenaic/.cache/go-build/9f/9f25a17baed5a80d03eb080a2ce2a5ff49c17f9a56e28330f0474a2bb74a30a0-d/test_vma_name 00574000-00575000 r--p 00173000 00:21 28451768 /home/lenaic/.cache/go-build/9f/9f25a17baed5a80d03eb080a2ce2a5ff49c17f9a56e28330f0474a2bb74a30a0-d/test_vma_name 00575000-00580000 rw-p 00174000 00:21 28451768 /home/lenaic/.cache/go-build/9f/9f25a17baed5a80d03eb080a2ce2a5ff49c17f9a56e28330f0474a2bb74a30a0-d/test_vma_name 00580000-005a4000 rw-p 00000000 00:00 0 2e075000-2e096000 rw-p 00000000 00:00 0 [heap] c000000000-c000400000 rw-p 00000000 00:00 0 [anon: Go: heap] c000400000-c004000000 ---p 00000000 00:00 0 [anon: Go: heap reservation] 777f40000000-777f40021000 rw-p 00000000 00:00 0 [anon: glibc: malloc arena] 777f40021000-777f44000000 ---p 00000000 00:00 0 777f44000000-777f44021000 rw-p 00000000 00:00 0 [anon: glibc: malloc arena] 777f44021000-777f48000000 ---p 00000000 00:00 0 777f48000000-777f48021000 rw-p 00000000 00:00 0 [anon: glibc: malloc arena] 777f48021000-777f4c000000 ---p 00000000 00:00 0 777f4c000000-777f4c021000 rw-p 00000000 00:00 0 [anon: glibc: malloc arena] 777f4c021000-777f50000000 ---p 00000000 00:00 0 777f50000000-777f50021000 rw-p 00000000 00:00 0 [anon: glibc: malloc arena] 777f50021000-777f54000000 ---p 00000000 00:00 0 777f55afb000-777f55afc000 ---p 00000000 00:00 0 777f55afc000-777f562fc000 rw-p 00000000 00:00 0 [anon: glibc: pthread stack: 216378] 777f562fc000-777f562fd000 ---p 00000000 00:00 0 777f562fd000-777f56afd000 rw-p 00000000 00:00 0 [anon: glibc: pthread stack: 216377] 777f56afd000-777f56afe000 ---p 00000000 00:00 0 777f56afe000-777f572fe000 rw-p 00000000 00:00 0 [anon: glibc: pthread stack: 216376] 777f572fe000-777f572ff000 ---p 00000000 00:00 0 777f572ff000-777f57aff000 rw-p 00000000 00:00 0 [anon: glibc: pthread stack: 216375] 777f57aff000-777f57b00000 ---p 00000000 00:00 0 777f57b00000-777f58300000 rw-p 00000000 00:00 0 [anon: glibc: pthread stack: 216374] 777f58300000-777f58400000 rw-p 00000000 00:00 0 [anon: Go: page alloc index] 777f58400000-777f5a400000 rw-p 00000000 00:00 0 [anon: Go: heap index] 777f5a400000-777f6a580000 ---p 00000000 00:00 0 [anon: Go: scavenge index] 777f6a580000-777f6a581000 rw-p 00000000 00:00 0 [anon: Go: scavenge index] 777f6a581000-777f7a400000 ---p 00000000 00:00 0 [anon: Go: scavenge index] 777f7a400000-777f8a580000 ---p 00000000 00:00 0 [anon: Go: page summary] 777f8a580000-777f8a581000 rw-p 00000000 00:00 0 [anon: Go: page alloc] 777f8a581000-777f9c430000 ---p 00000000 00:00 0 [anon: Go: page summary] 777f9c430000-777f9c431000 rw-p 00000000 00:00 0 [anon: Go: page alloc] 777f9c431000-777f9e806000 ---p 00000000 00:00 0 [anon: Go: page summary] 777f9e806000-777f9e807000 rw-p 00000000 00:00 0 [anon: Go: page alloc] 777f9e807000-777f9ec00000 ---p 00000000 00:00 0 [anon: Go: page summary] 777f9ec36000-777f9ecb6000 rw-p 00000000 00:00 0 [anon: Go: immortal metadata] 777f9ecb6000-777f9ecc6000 rw-p 00000000 00:00 0 [anon: Go: gc bits] 777f9ecc6000-777f9ecd6000 rw-p 00000000 00:00 0 [anon: Go: allspans array] 777f9ecd6000-777f9ece7000 rw-p 00000000 00:00 0 [anon: Go: immortal metadata] 777f9ece7000-777f9ed67000 ---p 00000000 00:00 0 [anon: Go: page summary] 777f9ed67000-777f9ed68000 rw-p 00000000 00:00 0 [anon: Go: page alloc] 777f9ed68000-777f9ede7000 ---p 00000000 00:00 0 [anon: Go: page summary] 777f9ede7000-777f9ee07000 rw-p 00000000 00:00 0 [anon: Go: page alloc] 777f9ee07000-777f9ee0a000 rw-p 00000000 00:00 0 [anon: glibc: loader malloc] 777f9ee0a000-777f9ee2e000 r--p 00000000 00:21 48158213 /usr/lib/libc.so.6 777f9ee2e000-777f9ef9f000 r-xp 00024000 00:21 48158213 /usr/lib/libc.so.6 777f9ef9f000-777f9efee000 r--p 00195000 00:21 48158213 /usr/lib/libc.so.6 777f9efee000-777f9eff2000 r--p 001e3000 00:21 48158213 /usr/lib/libc.so.6 777f9eff2000-777f9eff4000 rw-p 001e7000 00:21 48158213 /usr/lib/libc.so.6 777f9eff4000-777f9effc000 rw-p 00000000 00:00 0 777f9effc000-777f9effe000 rw-p 00000000 00:00 0 [anon: glibc: loader malloc] 777f9f00a000-777f9f04a000 rw-p 00000000 00:00 0 [anon: Go: immortal metadata] 777f9f04a000-777f9f04c000 r--p 00000000 00:00 0 [vvar] 777f9f04c000-777f9f04e000 r--p 00000000 00:00 0 [vvar_vclock] 777f9f04e000-777f9f050000 r-xp 00000000 00:00 0 [vdso] 777f9f050000-777f9f051000 r--p 00000000 00:21 48158204 /usr/lib/ld-linux-x86-64.so.2 777f9f051000-777f9f07a000 r-xp 00001000 00:21 48158204 /usr/lib/ld-linux-x86-64.so.2 777f9f07a000-777f9f085000 r--p 0002a000 00:21 48158204 /usr/lib/ld-linux-x86-64.so.2 777f9f085000-777f9f087000 r--p 00034000 00:21 48158204 /usr/lib/ld-linux-x86-64.so.2 777f9f087000-777f9f088000 rw-p 00036000 00:21 48158204 /usr/lib/ld-linux-x86-64.so.2 777f9f088000-777f9f089000 rw-p 00000000 00:00 0 7ffc7bfa7000-7ffc7bfc8000 rw-p 00000000 00:00 0 [stack] ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 [vsyscall] The anonymous memory areas are now labelled so that we can see which ones have been allocated by the Go runtime versus which ones have been allocated by the glibc. Fixes #71546 Change-Id: I304e8b4dd7f2477a6da794fd44e9a7a5354e4bf4 Reviewed-on: https://go-review.googlesource.com/c/go/+/646095 Auto-Submit: Alan Donovan <adonovan@google.com> Commit-Queue: Alan Donovan <adonovan@google.com> Reviewed-by: Felix Geisendörfer <felix.geisendoerfer@datadoghq.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Michael Knyszek <mknyszek@google.com> Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
2025-02-01 14:19:04 +01:00
l = (*dloggerImpl)(sysAllocOS(unsafe.Sizeof(dloggerImpl{}), "debug log"))
if l == nil {
throw("failed to allocate debug log")
}
l.w.r.data = &l.w.data
l.owned.Store(1)
// Prepend to allDloggers list.
headp := (*uintptr)(unsafe.Pointer(&allDloggers))
for {
head := atomic.Loaduintptr(headp)
l.allLink = (*dloggerImpl)(unsafe.Pointer(head))
if atomic.Casuintptr(headp, head, uintptr(unsafe.Pointer(l))) {
break
}
}
}
// If the time delta is getting too high, write a new sync
// packet. We set the limit so we don't write more than 6
// bytes of delta in the record header.
const deltaLimit = 1<<(3*7) - 1 // ~2ms between sync packets
if tick-l.w.tick > deltaLimit || nano-l.w.nano > deltaLimit {
l.w.writeSync(tick, nano)
}
// Reserve space for framing header.
l.w.ensure(debugLogHeaderSize)
l.w.write += debugLogHeaderSize
// Write record header.
l.w.uvarint(tick - l.w.tick)
l.w.uvarint(nano - l.w.nano)
gp := getg()
if gp != nil && gp.m != nil && gp.m.p != 0 {
l.w.varint(int64(gp.m.p.ptr().id))
} else {
l.w.varint(-1)
}
return l
}
// A dloggerImpl writes to the debug log.
//
// To obtain a dloggerImpl, call dlog(). When done with the dloggerImpl, call
// end().
type dloggerImpl struct {
_ sys.NotInHeap
w debugLogWriter
// allLink is the next dlogger in the allDloggers list.
allLink *dloggerImpl
// owned indicates that this dlogger is owned by an M. This is
// accessed atomically.
owned atomic.Uint32
}
// allDloggers is a list of all dloggers, linked through
// dlogger.allLink. This is accessed atomically. This is prepend only,
// so it doesn't need to protect against ABA races.
var allDloggers *dloggerImpl
runtime: switch debuglog from const-toggled to type-toggled Currently, the debuglog build tag controls the dlogEnabled const, and all methods of dlogger first check this const and immediately return if dlog is not enabled. With constant folding and inlining, this makes the whole dlog implementation compile away if it's not enabled. However, we want to be able to test debuglog even when the build tag isn't set. For that to work, we need a different mechanism. This CL changes this mechanism so the debuglog build tag instead controls the type alias for dlogger to be either dloggerImpl or dloggerFake. These two types have the same method set, but one is just stubs. This way, the methods of dloggerImpl don't need to be conditional dlogEnabled, which sets us up to use the now fully-functional dloggerImpl type in the test. I confirmed that this change has no effect on the final size of the cmd/go binary. It does increase the size of the runtime.a file by 0.9% and make the runtime take ever so slightly longer to compile because the compiler can no longer simply eliminate the bodies of the all of dlogger during early deadcode. However, this all gets eliminated by the linker. I consider this worth it to always get build and test coverage of debuglog. Change-Id: I81759e9e1411b7d369a23383a18b022ab7451421 Reviewed-on: https://go-review.googlesource.com/c/go/+/600696 Reviewed-by: Carlos Amedee <carlos@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2024-07-23 15:39:51 -04:00
// A dloggerFake is a no-op implementation of dlogger.
type dloggerFake struct{}
//go:nosplit
runtime: switch debuglog from const-toggled to type-toggled Currently, the debuglog build tag controls the dlogEnabled const, and all methods of dlogger first check this const and immediately return if dlog is not enabled. With constant folding and inlining, this makes the whole dlog implementation compile away if it's not enabled. However, we want to be able to test debuglog even when the build tag isn't set. For that to work, we need a different mechanism. This CL changes this mechanism so the debuglog build tag instead controls the type alias for dlogger to be either dloggerImpl or dloggerFake. These two types have the same method set, but one is just stubs. This way, the methods of dloggerImpl don't need to be conditional dlogEnabled, which sets us up to use the now fully-functional dloggerImpl type in the test. I confirmed that this change has no effect on the final size of the cmd/go binary. It does increase the size of the runtime.a file by 0.9% and make the runtime take ever so slightly longer to compile because the compiler can no longer simply eliminate the bodies of the all of dlogger during early deadcode. However, this all gets eliminated by the linker. I consider this worth it to always get build and test coverage of debuglog. Change-Id: I81759e9e1411b7d369a23383a18b022ab7451421 Reviewed-on: https://go-review.googlesource.com/c/go/+/600696 Reviewed-by: Carlos Amedee <carlos@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2024-07-23 15:39:51 -04:00
func (l dloggerFake) end() {}
runtime: switch debuglog from const-toggled to type-toggled Currently, the debuglog build tag controls the dlogEnabled const, and all methods of dlogger first check this const and immediately return if dlog is not enabled. With constant folding and inlining, this makes the whole dlog implementation compile away if it's not enabled. However, we want to be able to test debuglog even when the build tag isn't set. For that to work, we need a different mechanism. This CL changes this mechanism so the debuglog build tag instead controls the type alias for dlogger to be either dloggerImpl or dloggerFake. These two types have the same method set, but one is just stubs. This way, the methods of dloggerImpl don't need to be conditional dlogEnabled, which sets us up to use the now fully-functional dloggerImpl type in the test. I confirmed that this change has no effect on the final size of the cmd/go binary. It does increase the size of the runtime.a file by 0.9% and make the runtime take ever so slightly longer to compile because the compiler can no longer simply eliminate the bodies of the all of dlogger during early deadcode. However, this all gets eliminated by the linker. I consider this worth it to always get build and test coverage of debuglog. Change-Id: I81759e9e1411b7d369a23383a18b022ab7451421 Reviewed-on: https://go-review.googlesource.com/c/go/+/600696 Reviewed-by: Carlos Amedee <carlos@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2024-07-23 15:39:51 -04:00
//go:nosplit
func (l *dloggerImpl) end() {
// Fill in framing header.
size := l.w.write - l.w.r.end
if !l.w.writeFrameAt(l.w.r.end, size) {
throw("record too large")
}
// Commit the record.
l.w.r.end = l.w.write
// Attempt to return this logger to the cache.
if putCachedDlogger(l) {
return
}
// Return the logger to the global pool.
l.owned.Store(0)
}
const (
debugLogUnknown = 1 + iota
debugLogBoolTrue
debugLogBoolFalse
debugLogInt
debugLogUint
debugLogHex
debugLogPtr
debugLogString
debugLogConstString
debugLogHexdump
debugLogOverflow
debugLogPC
debugLogTraceback
)
runtime: switch debuglog from const-toggled to type-toggled Currently, the debuglog build tag controls the dlogEnabled const, and all methods of dlogger first check this const and immediately return if dlog is not enabled. With constant folding and inlining, this makes the whole dlog implementation compile away if it's not enabled. However, we want to be able to test debuglog even when the build tag isn't set. For that to work, we need a different mechanism. This CL changes this mechanism so the debuglog build tag instead controls the type alias for dlogger to be either dloggerImpl or dloggerFake. These two types have the same method set, but one is just stubs. This way, the methods of dloggerImpl don't need to be conditional dlogEnabled, which sets us up to use the now fully-functional dloggerImpl type in the test. I confirmed that this change has no effect on the final size of the cmd/go binary. It does increase the size of the runtime.a file by 0.9% and make the runtime take ever so slightly longer to compile because the compiler can no longer simply eliminate the bodies of the all of dlogger during early deadcode. However, this all gets eliminated by the linker. I consider this worth it to always get build and test coverage of debuglog. Change-Id: I81759e9e1411b7d369a23383a18b022ab7451421 Reviewed-on: https://go-review.googlesource.com/c/go/+/600696 Reviewed-by: Carlos Amedee <carlos@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2024-07-23 15:39:51 -04:00
//go:nosplit
func (l dloggerFake) b(x bool) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) b(x bool) *dloggerImpl {
if x {
l.w.byte(debugLogBoolTrue)
} else {
l.w.byte(debugLogBoolFalse)
}
return l
}
runtime: switch debuglog from const-toggled to type-toggled Currently, the debuglog build tag controls the dlogEnabled const, and all methods of dlogger first check this const and immediately return if dlog is not enabled. With constant folding and inlining, this makes the whole dlog implementation compile away if it's not enabled. However, we want to be able to test debuglog even when the build tag isn't set. For that to work, we need a different mechanism. This CL changes this mechanism so the debuglog build tag instead controls the type alias for dlogger to be either dloggerImpl or dloggerFake. These two types have the same method set, but one is just stubs. This way, the methods of dloggerImpl don't need to be conditional dlogEnabled, which sets us up to use the now fully-functional dloggerImpl type in the test. I confirmed that this change has no effect on the final size of the cmd/go binary. It does increase the size of the runtime.a file by 0.9% and make the runtime take ever so slightly longer to compile because the compiler can no longer simply eliminate the bodies of the all of dlogger during early deadcode. However, this all gets eliminated by the linker. I consider this worth it to always get build and test coverage of debuglog. Change-Id: I81759e9e1411b7d369a23383a18b022ab7451421 Reviewed-on: https://go-review.googlesource.com/c/go/+/600696 Reviewed-by: Carlos Amedee <carlos@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2024-07-23 15:39:51 -04:00
//go:nosplit
func (l dloggerFake) i(x int) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) i(x int) *dloggerImpl {
return l.i64(int64(x))
}
runtime: switch debuglog from const-toggled to type-toggled Currently, the debuglog build tag controls the dlogEnabled const, and all methods of dlogger first check this const and immediately return if dlog is not enabled. With constant folding and inlining, this makes the whole dlog implementation compile away if it's not enabled. However, we want to be able to test debuglog even when the build tag isn't set. For that to work, we need a different mechanism. This CL changes this mechanism so the debuglog build tag instead controls the type alias for dlogger to be either dloggerImpl or dloggerFake. These two types have the same method set, but one is just stubs. This way, the methods of dloggerImpl don't need to be conditional dlogEnabled, which sets us up to use the now fully-functional dloggerImpl type in the test. I confirmed that this change has no effect on the final size of the cmd/go binary. It does increase the size of the runtime.a file by 0.9% and make the runtime take ever so slightly longer to compile because the compiler can no longer simply eliminate the bodies of the all of dlogger during early deadcode. However, this all gets eliminated by the linker. I consider this worth it to always get build and test coverage of debuglog. Change-Id: I81759e9e1411b7d369a23383a18b022ab7451421 Reviewed-on: https://go-review.googlesource.com/c/go/+/600696 Reviewed-by: Carlos Amedee <carlos@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2024-07-23 15:39:51 -04:00
//go:nosplit
func (l dloggerFake) i8(x int8) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) i8(x int8) *dloggerImpl {
return l.i64(int64(x))
}
runtime: switch debuglog from const-toggled to type-toggled Currently, the debuglog build tag controls the dlogEnabled const, and all methods of dlogger first check this const and immediately return if dlog is not enabled. With constant folding and inlining, this makes the whole dlog implementation compile away if it's not enabled. However, we want to be able to test debuglog even when the build tag isn't set. For that to work, we need a different mechanism. This CL changes this mechanism so the debuglog build tag instead controls the type alias for dlogger to be either dloggerImpl or dloggerFake. These two types have the same method set, but one is just stubs. This way, the methods of dloggerImpl don't need to be conditional dlogEnabled, which sets us up to use the now fully-functional dloggerImpl type in the test. I confirmed that this change has no effect on the final size of the cmd/go binary. It does increase the size of the runtime.a file by 0.9% and make the runtime take ever so slightly longer to compile because the compiler can no longer simply eliminate the bodies of the all of dlogger during early deadcode. However, this all gets eliminated by the linker. I consider this worth it to always get build and test coverage of debuglog. Change-Id: I81759e9e1411b7d369a23383a18b022ab7451421 Reviewed-on: https://go-review.googlesource.com/c/go/+/600696 Reviewed-by: Carlos Amedee <carlos@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2024-07-23 15:39:51 -04:00
//go:nosplit
func (l dloggerFake) i16(x int16) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) i16(x int16) *dloggerImpl {
return l.i64(int64(x))
}
runtime: switch debuglog from const-toggled to type-toggled Currently, the debuglog build tag controls the dlogEnabled const, and all methods of dlogger first check this const and immediately return if dlog is not enabled. With constant folding and inlining, this makes the whole dlog implementation compile away if it's not enabled. However, we want to be able to test debuglog even when the build tag isn't set. For that to work, we need a different mechanism. This CL changes this mechanism so the debuglog build tag instead controls the type alias for dlogger to be either dloggerImpl or dloggerFake. These two types have the same method set, but one is just stubs. This way, the methods of dloggerImpl don't need to be conditional dlogEnabled, which sets us up to use the now fully-functional dloggerImpl type in the test. I confirmed that this change has no effect on the final size of the cmd/go binary. It does increase the size of the runtime.a file by 0.9% and make the runtime take ever so slightly longer to compile because the compiler can no longer simply eliminate the bodies of the all of dlogger during early deadcode. However, this all gets eliminated by the linker. I consider this worth it to always get build and test coverage of debuglog. Change-Id: I81759e9e1411b7d369a23383a18b022ab7451421 Reviewed-on: https://go-review.googlesource.com/c/go/+/600696 Reviewed-by: Carlos Amedee <carlos@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2024-07-23 15:39:51 -04:00
//go:nosplit
func (l dloggerFake) i32(x int32) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) i32(x int32) *dloggerImpl {
return l.i64(int64(x))
}
runtime: switch debuglog from const-toggled to type-toggled Currently, the debuglog build tag controls the dlogEnabled const, and all methods of dlogger first check this const and immediately return if dlog is not enabled. With constant folding and inlining, this makes the whole dlog implementation compile away if it's not enabled. However, we want to be able to test debuglog even when the build tag isn't set. For that to work, we need a different mechanism. This CL changes this mechanism so the debuglog build tag instead controls the type alias for dlogger to be either dloggerImpl or dloggerFake. These two types have the same method set, but one is just stubs. This way, the methods of dloggerImpl don't need to be conditional dlogEnabled, which sets us up to use the now fully-functional dloggerImpl type in the test. I confirmed that this change has no effect on the final size of the cmd/go binary. It does increase the size of the runtime.a file by 0.9% and make the runtime take ever so slightly longer to compile because the compiler can no longer simply eliminate the bodies of the all of dlogger during early deadcode. However, this all gets eliminated by the linker. I consider this worth it to always get build and test coverage of debuglog. Change-Id: I81759e9e1411b7d369a23383a18b022ab7451421 Reviewed-on: https://go-review.googlesource.com/c/go/+/600696 Reviewed-by: Carlos Amedee <carlos@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2024-07-23 15:39:51 -04:00
//go:nosplit
func (l dloggerFake) i64(x int64) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) i64(x int64) *dloggerImpl {
l.w.byte(debugLogInt)
l.w.varint(x)
return l
}
runtime: switch debuglog from const-toggled to type-toggled Currently, the debuglog build tag controls the dlogEnabled const, and all methods of dlogger first check this const and immediately return if dlog is not enabled. With constant folding and inlining, this makes the whole dlog implementation compile away if it's not enabled. However, we want to be able to test debuglog even when the build tag isn't set. For that to work, we need a different mechanism. This CL changes this mechanism so the debuglog build tag instead controls the type alias for dlogger to be either dloggerImpl or dloggerFake. These two types have the same method set, but one is just stubs. This way, the methods of dloggerImpl don't need to be conditional dlogEnabled, which sets us up to use the now fully-functional dloggerImpl type in the test. I confirmed that this change has no effect on the final size of the cmd/go binary. It does increase the size of the runtime.a file by 0.9% and make the runtime take ever so slightly longer to compile because the compiler can no longer simply eliminate the bodies of the all of dlogger during early deadcode. However, this all gets eliminated by the linker. I consider this worth it to always get build and test coverage of debuglog. Change-Id: I81759e9e1411b7d369a23383a18b022ab7451421 Reviewed-on: https://go-review.googlesource.com/c/go/+/600696 Reviewed-by: Carlos Amedee <carlos@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2024-07-23 15:39:51 -04:00
//go:nosplit
func (l dloggerFake) u(x uint) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) u(x uint) *dloggerImpl {
return l.u64(uint64(x))
}
runtime: switch debuglog from const-toggled to type-toggled Currently, the debuglog build tag controls the dlogEnabled const, and all methods of dlogger first check this const and immediately return if dlog is not enabled. With constant folding and inlining, this makes the whole dlog implementation compile away if it's not enabled. However, we want to be able to test debuglog even when the build tag isn't set. For that to work, we need a different mechanism. This CL changes this mechanism so the debuglog build tag instead controls the type alias for dlogger to be either dloggerImpl or dloggerFake. These two types have the same method set, but one is just stubs. This way, the methods of dloggerImpl don't need to be conditional dlogEnabled, which sets us up to use the now fully-functional dloggerImpl type in the test. I confirmed that this change has no effect on the final size of the cmd/go binary. It does increase the size of the runtime.a file by 0.9% and make the runtime take ever so slightly longer to compile because the compiler can no longer simply eliminate the bodies of the all of dlogger during early deadcode. However, this all gets eliminated by the linker. I consider this worth it to always get build and test coverage of debuglog. Change-Id: I81759e9e1411b7d369a23383a18b022ab7451421 Reviewed-on: https://go-review.googlesource.com/c/go/+/600696 Reviewed-by: Carlos Amedee <carlos@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2024-07-23 15:39:51 -04:00
//go:nosplit
func (l dloggerFake) uptr(x uintptr) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) uptr(x uintptr) *dloggerImpl {
return l.u64(uint64(x))
}
runtime: switch debuglog from const-toggled to type-toggled Currently, the debuglog build tag controls the dlogEnabled const, and all methods of dlogger first check this const and immediately return if dlog is not enabled. With constant folding and inlining, this makes the whole dlog implementation compile away if it's not enabled. However, we want to be able to test debuglog even when the build tag isn't set. For that to work, we need a different mechanism. This CL changes this mechanism so the debuglog build tag instead controls the type alias for dlogger to be either dloggerImpl or dloggerFake. These two types have the same method set, but one is just stubs. This way, the methods of dloggerImpl don't need to be conditional dlogEnabled, which sets us up to use the now fully-functional dloggerImpl type in the test. I confirmed that this change has no effect on the final size of the cmd/go binary. It does increase the size of the runtime.a file by 0.9% and make the runtime take ever so slightly longer to compile because the compiler can no longer simply eliminate the bodies of the all of dlogger during early deadcode. However, this all gets eliminated by the linker. I consider this worth it to always get build and test coverage of debuglog. Change-Id: I81759e9e1411b7d369a23383a18b022ab7451421 Reviewed-on: https://go-review.googlesource.com/c/go/+/600696 Reviewed-by: Carlos Amedee <carlos@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2024-07-23 15:39:51 -04:00
//go:nosplit
func (l dloggerFake) u8(x uint8) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) u8(x uint8) *dloggerImpl {
return l.u64(uint64(x))
}
runtime: switch debuglog from const-toggled to type-toggled Currently, the debuglog build tag controls the dlogEnabled const, and all methods of dlogger first check this const and immediately return if dlog is not enabled. With constant folding and inlining, this makes the whole dlog implementation compile away if it's not enabled. However, we want to be able to test debuglog even when the build tag isn't set. For that to work, we need a different mechanism. This CL changes this mechanism so the debuglog build tag instead controls the type alias for dlogger to be either dloggerImpl or dloggerFake. These two types have the same method set, but one is just stubs. This way, the methods of dloggerImpl don't need to be conditional dlogEnabled, which sets us up to use the now fully-functional dloggerImpl type in the test. I confirmed that this change has no effect on the final size of the cmd/go binary. It does increase the size of the runtime.a file by 0.9% and make the runtime take ever so slightly longer to compile because the compiler can no longer simply eliminate the bodies of the all of dlogger during early deadcode. However, this all gets eliminated by the linker. I consider this worth it to always get build and test coverage of debuglog. Change-Id: I81759e9e1411b7d369a23383a18b022ab7451421 Reviewed-on: https://go-review.googlesource.com/c/go/+/600696 Reviewed-by: Carlos Amedee <carlos@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2024-07-23 15:39:51 -04:00
//go:nosplit
func (l dloggerFake) u16(x uint16) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) u16(x uint16) *dloggerImpl {
return l.u64(uint64(x))
}
runtime: switch debuglog from const-toggled to type-toggled Currently, the debuglog build tag controls the dlogEnabled const, and all methods of dlogger first check this const and immediately return if dlog is not enabled. With constant folding and inlining, this makes the whole dlog implementation compile away if it's not enabled. However, we want to be able to test debuglog even when the build tag isn't set. For that to work, we need a different mechanism. This CL changes this mechanism so the debuglog build tag instead controls the type alias for dlogger to be either dloggerImpl or dloggerFake. These two types have the same method set, but one is just stubs. This way, the methods of dloggerImpl don't need to be conditional dlogEnabled, which sets us up to use the now fully-functional dloggerImpl type in the test. I confirmed that this change has no effect on the final size of the cmd/go binary. It does increase the size of the runtime.a file by 0.9% and make the runtime take ever so slightly longer to compile because the compiler can no longer simply eliminate the bodies of the all of dlogger during early deadcode. However, this all gets eliminated by the linker. I consider this worth it to always get build and test coverage of debuglog. Change-Id: I81759e9e1411b7d369a23383a18b022ab7451421 Reviewed-on: https://go-review.googlesource.com/c/go/+/600696 Reviewed-by: Carlos Amedee <carlos@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2024-07-23 15:39:51 -04:00
//go:nosplit
func (l dloggerFake) u32(x uint32) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) u32(x uint32) *dloggerImpl {
return l.u64(uint64(x))
}
runtime: switch debuglog from const-toggled to type-toggled Currently, the debuglog build tag controls the dlogEnabled const, and all methods of dlogger first check this const and immediately return if dlog is not enabled. With constant folding and inlining, this makes the whole dlog implementation compile away if it's not enabled. However, we want to be able to test debuglog even when the build tag isn't set. For that to work, we need a different mechanism. This CL changes this mechanism so the debuglog build tag instead controls the type alias for dlogger to be either dloggerImpl or dloggerFake. These two types have the same method set, but one is just stubs. This way, the methods of dloggerImpl don't need to be conditional dlogEnabled, which sets us up to use the now fully-functional dloggerImpl type in the test. I confirmed that this change has no effect on the final size of the cmd/go binary. It does increase the size of the runtime.a file by 0.9% and make the runtime take ever so slightly longer to compile because the compiler can no longer simply eliminate the bodies of the all of dlogger during early deadcode. However, this all gets eliminated by the linker. I consider this worth it to always get build and test coverage of debuglog. Change-Id: I81759e9e1411b7d369a23383a18b022ab7451421 Reviewed-on: https://go-review.googlesource.com/c/go/+/600696 Reviewed-by: Carlos Amedee <carlos@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2024-07-23 15:39:51 -04:00
//go:nosplit
func (l dloggerFake) u64(x uint64) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) u64(x uint64) *dloggerImpl {
l.w.byte(debugLogUint)
l.w.uvarint(x)
return l
}
runtime: switch debuglog from const-toggled to type-toggled Currently, the debuglog build tag controls the dlogEnabled const, and all methods of dlogger first check this const and immediately return if dlog is not enabled. With constant folding and inlining, this makes the whole dlog implementation compile away if it's not enabled. However, we want to be able to test debuglog even when the build tag isn't set. For that to work, we need a different mechanism. This CL changes this mechanism so the debuglog build tag instead controls the type alias for dlogger to be either dloggerImpl or dloggerFake. These two types have the same method set, but one is just stubs. This way, the methods of dloggerImpl don't need to be conditional dlogEnabled, which sets us up to use the now fully-functional dloggerImpl type in the test. I confirmed that this change has no effect on the final size of the cmd/go binary. It does increase the size of the runtime.a file by 0.9% and make the runtime take ever so slightly longer to compile because the compiler can no longer simply eliminate the bodies of the all of dlogger during early deadcode. However, this all gets eliminated by the linker. I consider this worth it to always get build and test coverage of debuglog. Change-Id: I81759e9e1411b7d369a23383a18b022ab7451421 Reviewed-on: https://go-review.googlesource.com/c/go/+/600696 Reviewed-by: Carlos Amedee <carlos@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2024-07-23 15:39:51 -04:00
//go:nosplit
func (l dloggerFake) hex(x uint64) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) hex(x uint64) *dloggerImpl {
l.w.byte(debugLogHex)
l.w.uvarint(x)
return l
}
runtime: switch debuglog from const-toggled to type-toggled Currently, the debuglog build tag controls the dlogEnabled const, and all methods of dlogger first check this const and immediately return if dlog is not enabled. With constant folding and inlining, this makes the whole dlog implementation compile away if it's not enabled. However, we want to be able to test debuglog even when the build tag isn't set. For that to work, we need a different mechanism. This CL changes this mechanism so the debuglog build tag instead controls the type alias for dlogger to be either dloggerImpl or dloggerFake. These two types have the same method set, but one is just stubs. This way, the methods of dloggerImpl don't need to be conditional dlogEnabled, which sets us up to use the now fully-functional dloggerImpl type in the test. I confirmed that this change has no effect on the final size of the cmd/go binary. It does increase the size of the runtime.a file by 0.9% and make the runtime take ever so slightly longer to compile because the compiler can no longer simply eliminate the bodies of the all of dlogger during early deadcode. However, this all gets eliminated by the linker. I consider this worth it to always get build and test coverage of debuglog. Change-Id: I81759e9e1411b7d369a23383a18b022ab7451421 Reviewed-on: https://go-review.googlesource.com/c/go/+/600696 Reviewed-by: Carlos Amedee <carlos@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2024-07-23 15:39:51 -04:00
//go:nosplit
func (l dloggerFake) p(x any) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) p(x any) *dloggerImpl {
l.w.byte(debugLogPtr)
if x == nil {
l.w.uvarint(0)
} else {
v := efaceOf(&x)
switch v._type.Kind() {
case abi.Chan, abi.Func, abi.Map, abi.Pointer, abi.UnsafePointer:
l.w.uvarint(uint64(uintptr(v.data)))
default:
throw("not a pointer type")
}
}
return l
}
//go:nosplit
runtime: switch debuglog from const-toggled to type-toggled Currently, the debuglog build tag controls the dlogEnabled const, and all methods of dlogger first check this const and immediately return if dlog is not enabled. With constant folding and inlining, this makes the whole dlog implementation compile away if it's not enabled. However, we want to be able to test debuglog even when the build tag isn't set. For that to work, we need a different mechanism. This CL changes this mechanism so the debuglog build tag instead controls the type alias for dlogger to be either dloggerImpl or dloggerFake. These two types have the same method set, but one is just stubs. This way, the methods of dloggerImpl don't need to be conditional dlogEnabled, which sets us up to use the now fully-functional dloggerImpl type in the test. I confirmed that this change has no effect on the final size of the cmd/go binary. It does increase the size of the runtime.a file by 0.9% and make the runtime take ever so slightly longer to compile because the compiler can no longer simply eliminate the bodies of the all of dlogger during early deadcode. However, this all gets eliminated by the linker. I consider this worth it to always get build and test coverage of debuglog. Change-Id: I81759e9e1411b7d369a23383a18b022ab7451421 Reviewed-on: https://go-review.googlesource.com/c/go/+/600696 Reviewed-by: Carlos Amedee <carlos@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2024-07-23 15:39:51 -04:00
func (l dloggerFake) s(x string) dloggerFake { return l }
runtime: replace stringStruct with unsafe.String where appropriate Simplify the code a bit, no significant performance changes. name old time/op new time/op delta HashStringSpeed-8 9.64ns ±11% 8.91ns ± 9% -7.60% (p=0.007 n=10+10) HashStringArraySpeed-8 19.8ns ± 3% 19.5ns ± 2% ~ (p=0.085 n=10+10) MapStringKeysEight_16-8 10.7ns ± 3% 10.2ns ± 2% -4.48% (p=0.000 n=10+10) MapStringKeysEight_32-8 8.89ns ± 3% 8.71ns ± 3% ~ (p=0.082 n=9+10) MapStringKeysEight_64-8 8.84ns ± 2% 8.60ns ± 3% -2.73% (p=0.004 n=9+10) MapStringKeysEight_1M-8 8.90ns ± 3% 8.62ns ± 3% -3.15% (p=0.000 n=10+10) MapStringConversion/32/simple-8 8.62ns ± 3% 8.61ns ± 2% ~ (p=0.895 n=10+10) MapStringConversion/32/struct-8 8.53ns ± 2% 8.63ns ± 2% ~ (p=0.123 n=10+10) MapStringConversion/32/array-8 8.54ns ± 2% 8.50ns ± 1% ~ (p=0.590 n=9+9) MapStringConversion/64/simple-8 8.44ns ± 2% 8.38ns ± 2% ~ (p=0.353 n=10+10) MapStringConversion/64/struct-8 8.41ns ± 2% 8.48ns ± 2% ~ (p=0.143 n=10+10) MapStringConversion/64/array-8 8.42ns ± 2% 8.44ns ± 2% ~ (p=0.739 n=10+10) MapInterfaceString-8 13.6ns ±26% 13.6ns ±20% ~ (p=0.736 n=10+9) AppendGrowString-8 38.9ms ± 9% 40.2ms ±13% ~ (p=0.481 n=10+10) CompareStringEqual-8 3.03ns ± 2% 2.86ns ± 3% -5.58% (p=0.000 n=10+10) CompareStringIdentical-8 1.20ns ± 3% 1.01ns ± 4% -16.16% (p=0.000 n=10+10) CompareStringSameLength-8 2.11ns ± 3% 1.85ns ± 3% -12.33% (p=0.000 n=10+10) CompareStringDifferentLength-8 0.30ns ± 0% 0.30ns ± 0% ~ (p=0.508 n=10+9) CompareStringBigUnaligned-8 43.0µs ± 1% 42.8µs ± 2% ~ (p=0.165 n=10+10) CompareStringBig-8 43.2µs ± 2% 43.4µs ± 2% ~ (p=0.661 n=9+10) ConcatStringAndBytes-8 15.1ns ± 1% 14.9ns ± 1% -1.57% (p=0.001 n=8+10) SliceByteToString/1-8 2.45ns ± 2% 2.39ns ± 2% -2.64% (p=0.000 n=10+10) SliceByteToString/2-8 10.9ns ± 2% 10.8ns ± 4% ~ (p=0.060 n=10+10) SliceByteToString/4-8 11.9ns ± 0% 11.8ns ± 1% -0.97% (p=0.000 n=8+8) SliceByteToString/8-8 13.9ns ± 1% 13.9ns ± 1% +0.57% (p=0.009 n=9+9) SliceByteToString/16-8 18.0ns ± 3% 18.6ns ± 5% +2.78% (p=0.001 n=9+10) SliceByteToString/32-8 20.1ns ± 3% 20.5ns ± 5% +2.10% (p=0.034 n=10+10) SliceByteToString/64-8 24.3ns ± 3% 24.9ns ± 3% +2.28% (p=0.001 n=9+10) SliceByteToString/128-8 33.8ns ± 1% 34.5ns ± 4% ~ (p=0.264 n=8+10) Updates #54854 Change-Id: I7ce57a92c5f590fa8cb31a48969d281147eb05f1 Reviewed-on: https://go-review.googlesource.com/c/go/+/428759 Reviewed-by: hopehook <hopehook@golangcn.org> Reviewed-by: Keith Randall <khr@google.com> Run-TryBot: Ian Lance Taylor <iant@google.com> Auto-Submit: Ian Lance Taylor <iant@google.com> TryBot-Result: Gopher Robot <gobot@golang.org> Run-TryBot: Cuong Manh Le <cuong.manhle.vn@gmail.com> Reviewed-by: Ian Lance Taylor <iant@google.com>
2022-09-07 13:23:10 +07:00
runtime: switch debuglog from const-toggled to type-toggled Currently, the debuglog build tag controls the dlogEnabled const, and all methods of dlogger first check this const and immediately return if dlog is not enabled. With constant folding and inlining, this makes the whole dlog implementation compile away if it's not enabled. However, we want to be able to test debuglog even when the build tag isn't set. For that to work, we need a different mechanism. This CL changes this mechanism so the debuglog build tag instead controls the type alias for dlogger to be either dloggerImpl or dloggerFake. These two types have the same method set, but one is just stubs. This way, the methods of dloggerImpl don't need to be conditional dlogEnabled, which sets us up to use the now fully-functional dloggerImpl type in the test. I confirmed that this change has no effect on the final size of the cmd/go binary. It does increase the size of the runtime.a file by 0.9% and make the runtime take ever so slightly longer to compile because the compiler can no longer simply eliminate the bodies of the all of dlogger during early deadcode. However, this all gets eliminated by the linker. I consider this worth it to always get build and test coverage of debuglog. Change-Id: I81759e9e1411b7d369a23383a18b022ab7451421 Reviewed-on: https://go-review.googlesource.com/c/go/+/600696 Reviewed-by: Carlos Amedee <carlos@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2024-07-23 15:39:51 -04:00
//go:nosplit
func (l *dloggerImpl) s(x string) *dloggerImpl {
runtime: replace stringStruct with unsafe.String where appropriate Simplify the code a bit, no significant performance changes. name old time/op new time/op delta HashStringSpeed-8 9.64ns ±11% 8.91ns ± 9% -7.60% (p=0.007 n=10+10) HashStringArraySpeed-8 19.8ns ± 3% 19.5ns ± 2% ~ (p=0.085 n=10+10) MapStringKeysEight_16-8 10.7ns ± 3% 10.2ns ± 2% -4.48% (p=0.000 n=10+10) MapStringKeysEight_32-8 8.89ns ± 3% 8.71ns ± 3% ~ (p=0.082 n=9+10) MapStringKeysEight_64-8 8.84ns ± 2% 8.60ns ± 3% -2.73% (p=0.004 n=9+10) MapStringKeysEight_1M-8 8.90ns ± 3% 8.62ns ± 3% -3.15% (p=0.000 n=10+10) MapStringConversion/32/simple-8 8.62ns ± 3% 8.61ns ± 2% ~ (p=0.895 n=10+10) MapStringConversion/32/struct-8 8.53ns ± 2% 8.63ns ± 2% ~ (p=0.123 n=10+10) MapStringConversion/32/array-8 8.54ns ± 2% 8.50ns ± 1% ~ (p=0.590 n=9+9) MapStringConversion/64/simple-8 8.44ns ± 2% 8.38ns ± 2% ~ (p=0.353 n=10+10) MapStringConversion/64/struct-8 8.41ns ± 2% 8.48ns ± 2% ~ (p=0.143 n=10+10) MapStringConversion/64/array-8 8.42ns ± 2% 8.44ns ± 2% ~ (p=0.739 n=10+10) MapInterfaceString-8 13.6ns ±26% 13.6ns ±20% ~ (p=0.736 n=10+9) AppendGrowString-8 38.9ms ± 9% 40.2ms ±13% ~ (p=0.481 n=10+10) CompareStringEqual-8 3.03ns ± 2% 2.86ns ± 3% -5.58% (p=0.000 n=10+10) CompareStringIdentical-8 1.20ns ± 3% 1.01ns ± 4% -16.16% (p=0.000 n=10+10) CompareStringSameLength-8 2.11ns ± 3% 1.85ns ± 3% -12.33% (p=0.000 n=10+10) CompareStringDifferentLength-8 0.30ns ± 0% 0.30ns ± 0% ~ (p=0.508 n=10+9) CompareStringBigUnaligned-8 43.0µs ± 1% 42.8µs ± 2% ~ (p=0.165 n=10+10) CompareStringBig-8 43.2µs ± 2% 43.4µs ± 2% ~ (p=0.661 n=9+10) ConcatStringAndBytes-8 15.1ns ± 1% 14.9ns ± 1% -1.57% (p=0.001 n=8+10) SliceByteToString/1-8 2.45ns ± 2% 2.39ns ± 2% -2.64% (p=0.000 n=10+10) SliceByteToString/2-8 10.9ns ± 2% 10.8ns ± 4% ~ (p=0.060 n=10+10) SliceByteToString/4-8 11.9ns ± 0% 11.8ns ± 1% -0.97% (p=0.000 n=8+8) SliceByteToString/8-8 13.9ns ± 1% 13.9ns ± 1% +0.57% (p=0.009 n=9+9) SliceByteToString/16-8 18.0ns ± 3% 18.6ns ± 5% +2.78% (p=0.001 n=9+10) SliceByteToString/32-8 20.1ns ± 3% 20.5ns ± 5% +2.10% (p=0.034 n=10+10) SliceByteToString/64-8 24.3ns ± 3% 24.9ns ± 3% +2.28% (p=0.001 n=9+10) SliceByteToString/128-8 33.8ns ± 1% 34.5ns ± 4% ~ (p=0.264 n=8+10) Updates #54854 Change-Id: I7ce57a92c5f590fa8cb31a48969d281147eb05f1 Reviewed-on: https://go-review.googlesource.com/c/go/+/428759 Reviewed-by: hopehook <hopehook@golangcn.org> Reviewed-by: Keith Randall <khr@google.com> Run-TryBot: Ian Lance Taylor <iant@google.com> Auto-Submit: Ian Lance Taylor <iant@google.com> TryBot-Result: Gopher Robot <gobot@golang.org> Run-TryBot: Cuong Manh Le <cuong.manhle.vn@gmail.com> Reviewed-by: Ian Lance Taylor <iant@google.com>
2022-09-07 13:23:10 +07:00
strData := unsafe.StringData(x)
datap := &firstmoduledata
runtime: replace stringStruct with unsafe.String where appropriate Simplify the code a bit, no significant performance changes. name old time/op new time/op delta HashStringSpeed-8 9.64ns ±11% 8.91ns ± 9% -7.60% (p=0.007 n=10+10) HashStringArraySpeed-8 19.8ns ± 3% 19.5ns ± 2% ~ (p=0.085 n=10+10) MapStringKeysEight_16-8 10.7ns ± 3% 10.2ns ± 2% -4.48% (p=0.000 n=10+10) MapStringKeysEight_32-8 8.89ns ± 3% 8.71ns ± 3% ~ (p=0.082 n=9+10) MapStringKeysEight_64-8 8.84ns ± 2% 8.60ns ± 3% -2.73% (p=0.004 n=9+10) MapStringKeysEight_1M-8 8.90ns ± 3% 8.62ns ± 3% -3.15% (p=0.000 n=10+10) MapStringConversion/32/simple-8 8.62ns ± 3% 8.61ns ± 2% ~ (p=0.895 n=10+10) MapStringConversion/32/struct-8 8.53ns ± 2% 8.63ns ± 2% ~ (p=0.123 n=10+10) MapStringConversion/32/array-8 8.54ns ± 2% 8.50ns ± 1% ~ (p=0.590 n=9+9) MapStringConversion/64/simple-8 8.44ns ± 2% 8.38ns ± 2% ~ (p=0.353 n=10+10) MapStringConversion/64/struct-8 8.41ns ± 2% 8.48ns ± 2% ~ (p=0.143 n=10+10) MapStringConversion/64/array-8 8.42ns ± 2% 8.44ns ± 2% ~ (p=0.739 n=10+10) MapInterfaceString-8 13.6ns ±26% 13.6ns ±20% ~ (p=0.736 n=10+9) AppendGrowString-8 38.9ms ± 9% 40.2ms ±13% ~ (p=0.481 n=10+10) CompareStringEqual-8 3.03ns ± 2% 2.86ns ± 3% -5.58% (p=0.000 n=10+10) CompareStringIdentical-8 1.20ns ± 3% 1.01ns ± 4% -16.16% (p=0.000 n=10+10) CompareStringSameLength-8 2.11ns ± 3% 1.85ns ± 3% -12.33% (p=0.000 n=10+10) CompareStringDifferentLength-8 0.30ns ± 0% 0.30ns ± 0% ~ (p=0.508 n=10+9) CompareStringBigUnaligned-8 43.0µs ± 1% 42.8µs ± 2% ~ (p=0.165 n=10+10) CompareStringBig-8 43.2µs ± 2% 43.4µs ± 2% ~ (p=0.661 n=9+10) ConcatStringAndBytes-8 15.1ns ± 1% 14.9ns ± 1% -1.57% (p=0.001 n=8+10) SliceByteToString/1-8 2.45ns ± 2% 2.39ns ± 2% -2.64% (p=0.000 n=10+10) SliceByteToString/2-8 10.9ns ± 2% 10.8ns ± 4% ~ (p=0.060 n=10+10) SliceByteToString/4-8 11.9ns ± 0% 11.8ns ± 1% -0.97% (p=0.000 n=8+8) SliceByteToString/8-8 13.9ns ± 1% 13.9ns ± 1% +0.57% (p=0.009 n=9+9) SliceByteToString/16-8 18.0ns ± 3% 18.6ns ± 5% +2.78% (p=0.001 n=9+10) SliceByteToString/32-8 20.1ns ± 3% 20.5ns ± 5% +2.10% (p=0.034 n=10+10) SliceByteToString/64-8 24.3ns ± 3% 24.9ns ± 3% +2.28% (p=0.001 n=9+10) SliceByteToString/128-8 33.8ns ± 1% 34.5ns ± 4% ~ (p=0.264 n=8+10) Updates #54854 Change-Id: I7ce57a92c5f590fa8cb31a48969d281147eb05f1 Reviewed-on: https://go-review.googlesource.com/c/go/+/428759 Reviewed-by: hopehook <hopehook@golangcn.org> Reviewed-by: Keith Randall <khr@google.com> Run-TryBot: Ian Lance Taylor <iant@google.com> Auto-Submit: Ian Lance Taylor <iant@google.com> TryBot-Result: Gopher Robot <gobot@golang.org> Run-TryBot: Cuong Manh Le <cuong.manhle.vn@gmail.com> Reviewed-by: Ian Lance Taylor <iant@google.com>
2022-09-07 13:23:10 +07:00
if len(x) > 4 && datap.etext <= uintptr(unsafe.Pointer(strData)) && uintptr(unsafe.Pointer(strData)) < datap.end {
// String constants are in the rodata section, which
// isn't recorded in moduledata. But it has to be
// somewhere between etext and end.
l.w.byte(debugLogConstString)
runtime: replace stringStruct with unsafe.String where appropriate Simplify the code a bit, no significant performance changes. name old time/op new time/op delta HashStringSpeed-8 9.64ns ±11% 8.91ns ± 9% -7.60% (p=0.007 n=10+10) HashStringArraySpeed-8 19.8ns ± 3% 19.5ns ± 2% ~ (p=0.085 n=10+10) MapStringKeysEight_16-8 10.7ns ± 3% 10.2ns ± 2% -4.48% (p=0.000 n=10+10) MapStringKeysEight_32-8 8.89ns ± 3% 8.71ns ± 3% ~ (p=0.082 n=9+10) MapStringKeysEight_64-8 8.84ns ± 2% 8.60ns ± 3% -2.73% (p=0.004 n=9+10) MapStringKeysEight_1M-8 8.90ns ± 3% 8.62ns ± 3% -3.15% (p=0.000 n=10+10) MapStringConversion/32/simple-8 8.62ns ± 3% 8.61ns ± 2% ~ (p=0.895 n=10+10) MapStringConversion/32/struct-8 8.53ns ± 2% 8.63ns ± 2% ~ (p=0.123 n=10+10) MapStringConversion/32/array-8 8.54ns ± 2% 8.50ns ± 1% ~ (p=0.590 n=9+9) MapStringConversion/64/simple-8 8.44ns ± 2% 8.38ns ± 2% ~ (p=0.353 n=10+10) MapStringConversion/64/struct-8 8.41ns ± 2% 8.48ns ± 2% ~ (p=0.143 n=10+10) MapStringConversion/64/array-8 8.42ns ± 2% 8.44ns ± 2% ~ (p=0.739 n=10+10) MapInterfaceString-8 13.6ns ±26% 13.6ns ±20% ~ (p=0.736 n=10+9) AppendGrowString-8 38.9ms ± 9% 40.2ms ±13% ~ (p=0.481 n=10+10) CompareStringEqual-8 3.03ns ± 2% 2.86ns ± 3% -5.58% (p=0.000 n=10+10) CompareStringIdentical-8 1.20ns ± 3% 1.01ns ± 4% -16.16% (p=0.000 n=10+10) CompareStringSameLength-8 2.11ns ± 3% 1.85ns ± 3% -12.33% (p=0.000 n=10+10) CompareStringDifferentLength-8 0.30ns ± 0% 0.30ns ± 0% ~ (p=0.508 n=10+9) CompareStringBigUnaligned-8 43.0µs ± 1% 42.8µs ± 2% ~ (p=0.165 n=10+10) CompareStringBig-8 43.2µs ± 2% 43.4µs ± 2% ~ (p=0.661 n=9+10) ConcatStringAndBytes-8 15.1ns ± 1% 14.9ns ± 1% -1.57% (p=0.001 n=8+10) SliceByteToString/1-8 2.45ns ± 2% 2.39ns ± 2% -2.64% (p=0.000 n=10+10) SliceByteToString/2-8 10.9ns ± 2% 10.8ns ± 4% ~ (p=0.060 n=10+10) SliceByteToString/4-8 11.9ns ± 0% 11.8ns ± 1% -0.97% (p=0.000 n=8+8) SliceByteToString/8-8 13.9ns ± 1% 13.9ns ± 1% +0.57% (p=0.009 n=9+9) SliceByteToString/16-8 18.0ns ± 3% 18.6ns ± 5% +2.78% (p=0.001 n=9+10) SliceByteToString/32-8 20.1ns ± 3% 20.5ns ± 5% +2.10% (p=0.034 n=10+10) SliceByteToString/64-8 24.3ns ± 3% 24.9ns ± 3% +2.28% (p=0.001 n=9+10) SliceByteToString/128-8 33.8ns ± 1% 34.5ns ± 4% ~ (p=0.264 n=8+10) Updates #54854 Change-Id: I7ce57a92c5f590fa8cb31a48969d281147eb05f1 Reviewed-on: https://go-review.googlesource.com/c/go/+/428759 Reviewed-by: hopehook <hopehook@golangcn.org> Reviewed-by: Keith Randall <khr@google.com> Run-TryBot: Ian Lance Taylor <iant@google.com> Auto-Submit: Ian Lance Taylor <iant@google.com> TryBot-Result: Gopher Robot <gobot@golang.org> Run-TryBot: Cuong Manh Le <cuong.manhle.vn@gmail.com> Reviewed-by: Ian Lance Taylor <iant@google.com>
2022-09-07 13:23:10 +07:00
l.w.uvarint(uint64(len(x)))
l.w.uvarint(uint64(uintptr(unsafe.Pointer(strData)) - datap.etext))
} else {
l.w.byte(debugLogString)
// We can't use unsafe.Slice as it may panic, which isn't safe
// in this (potentially) nowritebarrier context.
var b []byte
bb := (*slice)(unsafe.Pointer(&b))
bb.array = unsafe.Pointer(strData)
bb.len, bb.cap = len(x), len(x)
if len(b) > debugLogStringLimit {
b = b[:debugLogStringLimit]
}
l.w.uvarint(uint64(len(b)))
l.w.bytes(b)
if len(b) != len(x) {
l.w.byte(debugLogOverflow)
l.w.uvarint(uint64(len(x) - len(b)))
}
}
return l
}
//go:nosplit
func (l dloggerFake) hexdump(p unsafe.Pointer, bytes uintptr) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) hexdump(p unsafe.Pointer, bytes uintptr) *dloggerImpl {
var b []byte
bb := (*slice)(unsafe.Pointer(&b))
bb.array = unsafe.Pointer(p)
bb.len, bb.cap = int(bytes), int(bytes)
if len(b) > debugLogStringLimit {
b = b[:debugLogStringLimit]
}
l.w.byte(debugLogHexdump)
l.w.uvarint(uint64(uintptr(p)))
l.w.uvarint(uint64(len(b)))
l.w.bytes(b)
if uintptr(len(b)) != bytes {
l.w.byte(debugLogOverflow)
l.w.uvarint(uint64(bytes) - uint64(len(b)))
}
return l
}
runtime: switch debuglog from const-toggled to type-toggled Currently, the debuglog build tag controls the dlogEnabled const, and all methods of dlogger first check this const and immediately return if dlog is not enabled. With constant folding and inlining, this makes the whole dlog implementation compile away if it's not enabled. However, we want to be able to test debuglog even when the build tag isn't set. For that to work, we need a different mechanism. This CL changes this mechanism so the debuglog build tag instead controls the type alias for dlogger to be either dloggerImpl or dloggerFake. These two types have the same method set, but one is just stubs. This way, the methods of dloggerImpl don't need to be conditional dlogEnabled, which sets us up to use the now fully-functional dloggerImpl type in the test. I confirmed that this change has no effect on the final size of the cmd/go binary. It does increase the size of the runtime.a file by 0.9% and make the runtime take ever so slightly longer to compile because the compiler can no longer simply eliminate the bodies of the all of dlogger during early deadcode. However, this all gets eliminated by the linker. I consider this worth it to always get build and test coverage of debuglog. Change-Id: I81759e9e1411b7d369a23383a18b022ab7451421 Reviewed-on: https://go-review.googlesource.com/c/go/+/600696 Reviewed-by: Carlos Amedee <carlos@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2024-07-23 15:39:51 -04:00
//go:nosplit
func (l dloggerFake) pc(x uintptr) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) pc(x uintptr) *dloggerImpl {
l.w.byte(debugLogPC)
l.w.uvarint(uint64(x))
return l
}
runtime: switch debuglog from const-toggled to type-toggled Currently, the debuglog build tag controls the dlogEnabled const, and all methods of dlogger first check this const and immediately return if dlog is not enabled. With constant folding and inlining, this makes the whole dlog implementation compile away if it's not enabled. However, we want to be able to test debuglog even when the build tag isn't set. For that to work, we need a different mechanism. This CL changes this mechanism so the debuglog build tag instead controls the type alias for dlogger to be either dloggerImpl or dloggerFake. These two types have the same method set, but one is just stubs. This way, the methods of dloggerImpl don't need to be conditional dlogEnabled, which sets us up to use the now fully-functional dloggerImpl type in the test. I confirmed that this change has no effect on the final size of the cmd/go binary. It does increase the size of the runtime.a file by 0.9% and make the runtime take ever so slightly longer to compile because the compiler can no longer simply eliminate the bodies of the all of dlogger during early deadcode. However, this all gets eliminated by the linker. I consider this worth it to always get build and test coverage of debuglog. Change-Id: I81759e9e1411b7d369a23383a18b022ab7451421 Reviewed-on: https://go-review.googlesource.com/c/go/+/600696 Reviewed-by: Carlos Amedee <carlos@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2024-07-23 15:39:51 -04:00
//go:nosplit
func (l dloggerFake) traceback(x []uintptr) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) traceback(x []uintptr) *dloggerImpl {
l.w.byte(debugLogTraceback)
l.w.uvarint(uint64(len(x)))
for _, pc := range x {
l.w.uvarint(uint64(pc))
}
return l
}
// A debugLogWriter is a ring buffer of binary debug log records.
//
// A log record consists of a 2-byte framing header and a sequence of
// fields. The framing header gives the size of the record as a little
// endian 16-bit value. Each field starts with a byte indicating its
// type, followed by type-specific data. If the size in the framing
// header is 0, it's a sync record consisting of two little endian
// 64-bit values giving a new time base.
//
// Because this is a ring buffer, new records will eventually
// overwrite old records. Hence, it maintains a reader that consumes
// the log as it gets overwritten. That reader state is where an
// actual log reader would start.
type debugLogWriter struct {
_ sys.NotInHeap
write uint64
data debugLogBuf
// tick and nano are the time bases from the most recently
// written sync record.
tick, nano uint64
// r is a reader that consumes records as they get overwritten
// by the writer. It also acts as the initial reader state
// when printing the log.
r debugLogReader
// buf is a scratch buffer for encoding. This is here to
// reduce stack usage.
buf [10]byte
}
type debugLogBuf struct {
_ sys.NotInHeap
b [debugLogBytes]byte
}
const (
// debugLogHeaderSize is the number of bytes in the framing
// header of every dlog record.
debugLogHeaderSize = 2
// debugLogSyncSize is the number of bytes in a sync record.
debugLogSyncSize = debugLogHeaderSize + 2*8
)
//go:nosplit
func (l *debugLogWriter) ensure(n uint64) {
for l.write+n >= l.r.begin+uint64(len(l.data.b)) {
// Consume record at begin.
if l.r.skip() == ^uint64(0) {
// Wrapped around within a record.
//
// TODO(austin): It would be better to just
// eat the whole buffer at this point, but we
// have to communicate that to the reader
// somehow.
throw("record wrapped around")
}
}
}
//go:nosplit
func (l *debugLogWriter) writeFrameAt(pos, size uint64) bool {
l.data.b[pos%uint64(len(l.data.b))] = uint8(size)
l.data.b[(pos+1)%uint64(len(l.data.b))] = uint8(size >> 8)
return size <= 0xFFFF
}
//go:nosplit
func (l *debugLogWriter) writeSync(tick, nano uint64) {
l.tick, l.nano = tick, nano
l.ensure(debugLogHeaderSize)
l.writeFrameAt(l.write, 0)
l.write += debugLogHeaderSize
l.writeUint64LE(tick)
l.writeUint64LE(nano)
l.r.end = l.write
}
//go:nosplit
func (l *debugLogWriter) writeUint64LE(x uint64) {
var b [8]byte
byteorder.LEPutUint64(b[:], x)
l.bytes(b[:])
}
//go:nosplit
func (l *debugLogWriter) byte(x byte) {
l.ensure(1)
pos := l.write
l.write++
l.data.b[pos%uint64(len(l.data.b))] = x
}
//go:nosplit
func (l *debugLogWriter) bytes(x []byte) {
l.ensure(uint64(len(x)))
pos := l.write
l.write += uint64(len(x))
for len(x) > 0 {
n := copy(l.data.b[pos%uint64(len(l.data.b)):], x)
pos += uint64(n)
x = x[n:]
}
}
//go:nosplit
func (l *debugLogWriter) varint(x int64) {
var u uint64
if x < 0 {
u = (^uint64(x) << 1) | 1 // complement i, bit 0 is 1
} else {
u = (uint64(x) << 1) // do not complement i, bit 0 is 0
}
l.uvarint(u)
}
//go:nosplit
func (l *debugLogWriter) uvarint(u uint64) {
i := 0
for u >= 0x80 {
l.buf[i] = byte(u) | 0x80
u >>= 7
i++
}
l.buf[i] = byte(u)
i++
l.bytes(l.buf[:i])
}
type debugLogReader struct {
data *debugLogBuf
// begin and end are the positions in the log of the beginning
// and end of the log data, modulo len(data).
begin, end uint64
// tick and nano are the current time base at begin.
tick, nano uint64
}
//go:nosplit
func (r *debugLogReader) skip() uint64 {
// Read size at pos.
if r.begin+debugLogHeaderSize > r.end {
return ^uint64(0)
}
size := uint64(r.readUint16LEAt(r.begin))
if size == 0 {
// Sync packet.
r.tick = r.readUint64LEAt(r.begin + debugLogHeaderSize)
r.nano = r.readUint64LEAt(r.begin + debugLogHeaderSize + 8)
size = debugLogSyncSize
}
if r.begin+size > r.end {
return ^uint64(0)
}
r.begin += size
return size
}
//go:nosplit
func (r *debugLogReader) readUint16LEAt(pos uint64) uint16 {
return uint16(r.data.b[pos%uint64(len(r.data.b))]) |
uint16(r.data.b[(pos+1)%uint64(len(r.data.b))])<<8
}
//go:nosplit
func (r *debugLogReader) readUint64LEAt(pos uint64) uint64 {
var b [8]byte
for i := range b {
b[i] = r.data.b[pos%uint64(len(r.data.b))]
pos++
}
return byteorder.LEUint64(b[:])
}
func (r *debugLogReader) peek() (tick uint64) {
// Consume any sync records.
size := uint64(0)
for size == 0 {
if r.begin+debugLogHeaderSize > r.end {
return ^uint64(0)
}
size = uint64(r.readUint16LEAt(r.begin))
if size != 0 {
break
}
if r.begin+debugLogSyncSize > r.end {
return ^uint64(0)
}
// Sync packet.
r.tick = r.readUint64LEAt(r.begin + debugLogHeaderSize)
r.nano = r.readUint64LEAt(r.begin + debugLogHeaderSize + 8)
r.begin += debugLogSyncSize
}
// Peek tick delta.
if r.begin+size > r.end {
return ^uint64(0)
}
pos := r.begin + debugLogHeaderSize
var u uint64
for i := uint(0); ; i += 7 {
b := r.data.b[pos%uint64(len(r.data.b))]
pos++
u |= uint64(b&^0x80) << i
if b&0x80 == 0 {
break
}
}
if pos > r.begin+size {
return ^uint64(0)
}
return r.tick + u
}
func (r *debugLogReader) header() (end, tick, nano uint64, p int) {
// Read size. We've already skipped sync packets and checked
// bounds in peek.
size := uint64(r.readUint16LEAt(r.begin))
end = r.begin + size
r.begin += debugLogHeaderSize
// Read tick, nano, and p.
tick = r.uvarint() + r.tick
nano = r.uvarint() + r.nano
p = int(r.varint())
return
}
func (r *debugLogReader) uvarint() uint64 {
var u uint64
for i := uint(0); ; i += 7 {
b := r.data.b[r.begin%uint64(len(r.data.b))]
r.begin++
u |= uint64(b&^0x80) << i
if b&0x80 == 0 {
break
}
}
return u
}
func (r *debugLogReader) varint() int64 {
u := r.uvarint()
var v int64
if u&1 == 0 {
v = int64(u >> 1)
} else {
v = ^int64(u >> 1)
}
return v
}
func (r *debugLogReader) printVal() bool {
typ := r.data.b[r.begin%uint64(len(r.data.b))]
r.begin++
switch typ {
default:
print("<unknown field type ", hex(typ), " pos ", r.begin-1, " end ", r.end, ">\n")
return false
case debugLogUnknown:
print("<unknown kind>")
case debugLogBoolTrue:
print(true)
case debugLogBoolFalse:
print(false)
case debugLogInt:
print(r.varint())
case debugLogUint:
print(r.uvarint())
case debugLogHex, debugLogPtr:
print(hex(r.uvarint()))
case debugLogString:
sl := r.uvarint()
if r.begin+sl > r.end {
r.begin = r.end
print("<string length corrupted>")
break
}
for sl > 0 {
b := r.data.b[r.begin%uint64(len(r.data.b)):]
if uint64(len(b)) > sl {
b = b[:sl]
}
r.begin += uint64(len(b))
sl -= uint64(len(b))
gwrite(b)
}
case debugLogConstString:
len, ptr := int(r.uvarint()), uintptr(r.uvarint())
ptr += firstmoduledata.etext
// We can't use unsafe.String as it may panic, which isn't safe
// in this (potentially) nowritebarrier context.
str := stringStruct{
str: unsafe.Pointer(ptr),
len: len,
}
s := *(*string)(unsafe.Pointer(&str))
print(s)
case debugLogOverflow:
print("..(", r.uvarint(), " more bytes)..")
case debugLogHexdump:
p := uintptr(r.uvarint())
bl := r.uvarint()
if r.begin+bl > r.end {
r.begin = r.end
print("<hexdump length corrupted>")
break
}
println() // Start on a new line
hd := hexdumper{addr: p}
for bl > 0 {
b := r.data.b[r.begin%uint64(len(r.data.b)):]
if uint64(len(b)) > bl {
b = b[:bl]
}
r.begin += uint64(len(b))
bl -= uint64(len(b))
hd.write(b)
}
hd.close()
case debugLogPC:
printDebugLogPC(uintptr(r.uvarint()), false)
case debugLogTraceback:
n := int(r.uvarint())
for i := 0; i < n; i++ {
print("\n\t")
// gentraceback PCs are always return PCs.
// Convert them to call PCs.
//
// TODO(austin): Expand inlined frames.
printDebugLogPC(uintptr(r.uvarint()), true)
}
}
return true
}
// printDebugLog prints the debug log.
func printDebugLog() {
if dlogEnabled {
printDebugLogImpl()
}
}
func printDebugLogImpl() {
// This function should not panic or throw since it is used in
// the fatal panic path and this may deadlock.
printlock()
// Get the list of all debug logs.
allp := (*uintptr)(unsafe.Pointer(&allDloggers))
all := (*dloggerImpl)(unsafe.Pointer(atomic.Loaduintptr(allp)))
// Count the logs.
n := 0
for l := all; l != nil; l = l.allLink {
n++
}
if n == 0 {
printunlock()
return
}
// Prepare read state for all logs.
type readState struct {
debugLogReader
first bool
lost uint64
nextTick uint64
}
runtime: track how much memory is mapped in the Ready state This change adds a field to memstats called mappedReady that tracks how much memory is in the Ready state at any given time. In essence, it's the total memory usage by the Go runtime (with one exception which is documented). Essentially, all memory mapped read/write that has either been paged in or will soon. To make tracking this not involve the many different stats that track mapped memory, we track this statistic at a very low level. The downside of tracking this statistic at such a low level is that it managed to catch lots of situations where the runtime wasn't fully accounting for memory. This change rectifies these situations by always accounting for memory that's mapped in some way (i.e. always passing a sysMemStat to a mem.go function), with *two* exceptions. Rectifying these situations means also having the memory mapped during testing being accounted for, so that tests (i.e. ReadMemStats) that ultimately check mappedReady continue to work correctly without special exceptions. We choose to simply account for this memory in other_sys. Let's talk about the exceptions. The first is the arenas array for finding heap arena metadata from an address is mapped as read/write in one large chunk. It's tens of MiB in size. On systems with demand paging, we assume that the whole thing isn't paged in at once (after all, it maps to the whole address space, and it's exceedingly difficult with today's technology to even broach having as much physical memory as the total address space). On systems where we have to commit memory manually, we use a two-level structure. Now, the reason why this is an exception is because we have no mechanism to track what memory is paged in, and we can't just account for the entire thing, because that would *look* like an enormous overhead. Furthermore, this structure is on a few really, really critical paths in the runtime, so doing more explicit tracking isn't really an option. So, we explicitly don't and call sysAllocOS to map this memory. The second exception is that we call sysFree with no accounting to clean up address space reservations, or otherwise to throw out mappings we don't care about. In this case, also drop down to a lower level and call sysFreeOS to explicitly avoid accounting. The third exception is debuglog allocations. That is purely a debugging facility and ideally we want it to have as small an impact on the runtime as possible. If we include it in mappedReady calculations, it could cause GC pacing shifts in future CLs, especailly if one increases the debuglog buffer sizes as a one-off. As of this CL, these are the only three places in the runtime that would pass nil for a stat to any of the functions in mem.go. As a result, this CL makes sysMemStats mandatory to facilitate better accounting in the future. It's now much easier to grep and find out where accounting is explicitly elided, because one doesn't have to follow the trail of sysMemStat nil pointer values, and can just look at the function name. For #48409. Change-Id: I274eb467fc2603881717482214fddc47c9eaf218 Reviewed-on: https://go-review.googlesource.com/c/go/+/393402 Reviewed-by: Michael Pratt <mpratt@google.com> TryBot-Result: Gopher Robot <gobot@golang.org> Run-TryBot: Michael Knyszek <mknyszek@google.com>
2022-03-15 02:48:18 +00:00
// Use sysAllocOS instead of sysAlloc because we want to interfere
// with the runtime as little as possible, and sysAlloc updates accounting.
runtime: decorate anonymous memory mappings Leverage the prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, ...) API to name the anonymous memory areas. This API has been introduced in Linux 5.17 to decorate the anonymous memory areas shown in /proc/<pid>/maps. This is already used by glibc. See: * https://sourceware.org/git/?p=glibc.git;a=blob;f=malloc/malloc.c;h=27dfd1eb907f4615b70c70237c42c552bb4f26a8;hb=HEAD#l2434 * https://sourceware.org/git/?p=glibc.git;a=blob;f=sysdeps/unix/sysv/linux/setvmaname.c;h=ea93a5ffbebc9e5a7e32a297138f465724b4725f;hb=HEAD#l63 This can be useful when investigating the memory consumption of a multi-language program. On a 100% Go program, pprof profiler can be used to profile the memory consumption of the program. But pprof is only aware of what happens within the Go world. On a multi-language program, there could be a doubt about whether the suspicious extra-memory consumption comes from the Go part or the native part. With this change, the following Go program: package main import ( "fmt" "log" "os" ) /* #include <stdlib.h> void f(void) { (void)malloc(1024*1024*1024); } */ import "C" func main() { C.f() data, err := os.ReadFile("/proc/self/maps") if err != nil { log.Fatal(err) } fmt.Println(string(data)) } produces this output: $ GLIBC_TUNABLES=glibc.mem.decorate_maps=1 ~/doc/devel/open-source/go/bin/go run . 00400000-00402000 r--p 00000000 00:21 28451768 /home/lenaic/.cache/go-build/9f/9f25a17baed5a80d03eb080a2ce2a5ff49c17f9a56e28330f0474a2bb74a30a0-d/test_vma_name 00402000-004a4000 r-xp 00002000 00:21 28451768 /home/lenaic/.cache/go-build/9f/9f25a17baed5a80d03eb080a2ce2a5ff49c17f9a56e28330f0474a2bb74a30a0-d/test_vma_name 004a4000-00574000 r--p 000a4000 00:21 28451768 /home/lenaic/.cache/go-build/9f/9f25a17baed5a80d03eb080a2ce2a5ff49c17f9a56e28330f0474a2bb74a30a0-d/test_vma_name 00574000-00575000 r--p 00173000 00:21 28451768 /home/lenaic/.cache/go-build/9f/9f25a17baed5a80d03eb080a2ce2a5ff49c17f9a56e28330f0474a2bb74a30a0-d/test_vma_name 00575000-00580000 rw-p 00174000 00:21 28451768 /home/lenaic/.cache/go-build/9f/9f25a17baed5a80d03eb080a2ce2a5ff49c17f9a56e28330f0474a2bb74a30a0-d/test_vma_name 00580000-005a4000 rw-p 00000000 00:00 0 2e075000-2e096000 rw-p 00000000 00:00 0 [heap] c000000000-c000400000 rw-p 00000000 00:00 0 [anon: Go: heap] c000400000-c004000000 ---p 00000000 00:00 0 [anon: Go: heap reservation] 777f40000000-777f40021000 rw-p 00000000 00:00 0 [anon: glibc: malloc arena] 777f40021000-777f44000000 ---p 00000000 00:00 0 777f44000000-777f44021000 rw-p 00000000 00:00 0 [anon: glibc: malloc arena] 777f44021000-777f48000000 ---p 00000000 00:00 0 777f48000000-777f48021000 rw-p 00000000 00:00 0 [anon: glibc: malloc arena] 777f48021000-777f4c000000 ---p 00000000 00:00 0 777f4c000000-777f4c021000 rw-p 00000000 00:00 0 [anon: glibc: malloc arena] 777f4c021000-777f50000000 ---p 00000000 00:00 0 777f50000000-777f50021000 rw-p 00000000 00:00 0 [anon: glibc: malloc arena] 777f50021000-777f54000000 ---p 00000000 00:00 0 777f55afb000-777f55afc000 ---p 00000000 00:00 0 777f55afc000-777f562fc000 rw-p 00000000 00:00 0 [anon: glibc: pthread stack: 216378] 777f562fc000-777f562fd000 ---p 00000000 00:00 0 777f562fd000-777f56afd000 rw-p 00000000 00:00 0 [anon: glibc: pthread stack: 216377] 777f56afd000-777f56afe000 ---p 00000000 00:00 0 777f56afe000-777f572fe000 rw-p 00000000 00:00 0 [anon: glibc: pthread stack: 216376] 777f572fe000-777f572ff000 ---p 00000000 00:00 0 777f572ff000-777f57aff000 rw-p 00000000 00:00 0 [anon: glibc: pthread stack: 216375] 777f57aff000-777f57b00000 ---p 00000000 00:00 0 777f57b00000-777f58300000 rw-p 00000000 00:00 0 [anon: glibc: pthread stack: 216374] 777f58300000-777f58400000 rw-p 00000000 00:00 0 [anon: Go: page alloc index] 777f58400000-777f5a400000 rw-p 00000000 00:00 0 [anon: Go: heap index] 777f5a400000-777f6a580000 ---p 00000000 00:00 0 [anon: Go: scavenge index] 777f6a580000-777f6a581000 rw-p 00000000 00:00 0 [anon: Go: scavenge index] 777f6a581000-777f7a400000 ---p 00000000 00:00 0 [anon: Go: scavenge index] 777f7a400000-777f8a580000 ---p 00000000 00:00 0 [anon: Go: page summary] 777f8a580000-777f8a581000 rw-p 00000000 00:00 0 [anon: Go: page alloc] 777f8a581000-777f9c430000 ---p 00000000 00:00 0 [anon: Go: page summary] 777f9c430000-777f9c431000 rw-p 00000000 00:00 0 [anon: Go: page alloc] 777f9c431000-777f9e806000 ---p 00000000 00:00 0 [anon: Go: page summary] 777f9e806000-777f9e807000 rw-p 00000000 00:00 0 [anon: Go: page alloc] 777f9e807000-777f9ec00000 ---p 00000000 00:00 0 [anon: Go: page summary] 777f9ec36000-777f9ecb6000 rw-p 00000000 00:00 0 [anon: Go: immortal metadata] 777f9ecb6000-777f9ecc6000 rw-p 00000000 00:00 0 [anon: Go: gc bits] 777f9ecc6000-777f9ecd6000 rw-p 00000000 00:00 0 [anon: Go: allspans array] 777f9ecd6000-777f9ece7000 rw-p 00000000 00:00 0 [anon: Go: immortal metadata] 777f9ece7000-777f9ed67000 ---p 00000000 00:00 0 [anon: Go: page summary] 777f9ed67000-777f9ed68000 rw-p 00000000 00:00 0 [anon: Go: page alloc] 777f9ed68000-777f9ede7000 ---p 00000000 00:00 0 [anon: Go: page summary] 777f9ede7000-777f9ee07000 rw-p 00000000 00:00 0 [anon: Go: page alloc] 777f9ee07000-777f9ee0a000 rw-p 00000000 00:00 0 [anon: glibc: loader malloc] 777f9ee0a000-777f9ee2e000 r--p 00000000 00:21 48158213 /usr/lib/libc.so.6 777f9ee2e000-777f9ef9f000 r-xp 00024000 00:21 48158213 /usr/lib/libc.so.6 777f9ef9f000-777f9efee000 r--p 00195000 00:21 48158213 /usr/lib/libc.so.6 777f9efee000-777f9eff2000 r--p 001e3000 00:21 48158213 /usr/lib/libc.so.6 777f9eff2000-777f9eff4000 rw-p 001e7000 00:21 48158213 /usr/lib/libc.so.6 777f9eff4000-777f9effc000 rw-p 00000000 00:00 0 777f9effc000-777f9effe000 rw-p 00000000 00:00 0 [anon: glibc: loader malloc] 777f9f00a000-777f9f04a000 rw-p 00000000 00:00 0 [anon: Go: immortal metadata] 777f9f04a000-777f9f04c000 r--p 00000000 00:00 0 [vvar] 777f9f04c000-777f9f04e000 r--p 00000000 00:00 0 [vvar_vclock] 777f9f04e000-777f9f050000 r-xp 00000000 00:00 0 [vdso] 777f9f050000-777f9f051000 r--p 00000000 00:21 48158204 /usr/lib/ld-linux-x86-64.so.2 777f9f051000-777f9f07a000 r-xp 00001000 00:21 48158204 /usr/lib/ld-linux-x86-64.so.2 777f9f07a000-777f9f085000 r--p 0002a000 00:21 48158204 /usr/lib/ld-linux-x86-64.so.2 777f9f085000-777f9f087000 r--p 00034000 00:21 48158204 /usr/lib/ld-linux-x86-64.so.2 777f9f087000-777f9f088000 rw-p 00036000 00:21 48158204 /usr/lib/ld-linux-x86-64.so.2 777f9f088000-777f9f089000 rw-p 00000000 00:00 0 7ffc7bfa7000-7ffc7bfc8000 rw-p 00000000 00:00 0 [stack] ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 [vsyscall] The anonymous memory areas are now labelled so that we can see which ones have been allocated by the Go runtime versus which ones have been allocated by the glibc. Fixes #71546 Change-Id: I304e8b4dd7f2477a6da794fd44e9a7a5354e4bf4 Reviewed-on: https://go-review.googlesource.com/c/go/+/646095 Auto-Submit: Alan Donovan <adonovan@google.com> Commit-Queue: Alan Donovan <adonovan@google.com> Reviewed-by: Felix Geisendörfer <felix.geisendoerfer@datadoghq.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Michael Knyszek <mknyszek@google.com> Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
2025-02-01 14:19:04 +01:00
state1 := sysAllocOS(unsafe.Sizeof(readState{})*uintptr(n), "debug log")
if state1 == nil {
println("failed to allocate read state for", n, "logs")
printunlock()
return
}
state := (*[1 << 20]readState)(state1)[:n]
{
l := all
for i := range state {
s := &state[i]
s.debugLogReader = l.w.r
s.first = true
s.lost = l.w.r.begin
s.nextTick = s.peek()
l = l.allLink
}
}
// Print records.
for {
// Find the next record.
var best struct {
tick uint64
i int
}
best.tick = ^uint64(0)
for i := range state {
if state[i].nextTick < best.tick {
best.tick = state[i].nextTick
best.i = i
}
}
if best.tick == ^uint64(0) {
break
}
// Print record.
s := &state[best.i]
if s.first {
print(">> begin log ", best.i)
if s.lost != 0 {
print("; lost first ", s.lost>>10, "KB")
}
print(" <<\n")
s.first = false
}
end, _, nano, p := s.header()
oldEnd := s.end
s.end = end
print("[")
var tmpbuf [21]byte
pnano := int64(nano) - runtimeInitTime
if pnano < 0 {
// Logged before runtimeInitTime was set.
pnano = 0
}
pnanoBytes := itoaDiv(tmpbuf[:], uint64(pnano), 9)
print(slicebytetostringtmp((*byte)(noescape(unsafe.Pointer(&pnanoBytes[0]))), len(pnanoBytes)))
print(" P ", p, "] ")
for i := 0; s.begin < s.end; i++ {
if i > 0 {
print(" ")
}
if !s.printVal() {
// Abort this P log.
print("<aborting P log>")
end = oldEnd
break
}
}
println()
// Move on to the next record.
s.begin = end
s.end = oldEnd
s.nextTick = s.peek()
}
printunlock()
}
// printDebugLogPC prints a single symbolized PC. If returnPC is true,
// pc is a return PC that must first be converted to a call PC.
func printDebugLogPC(pc uintptr, returnPC bool) {
fn := findfunc(pc)
if returnPC && (!fn.valid() || pc > fn.entry()) {
// TODO(austin): Don't back up if the previous frame
// was a sigpanic.
pc--
}
print(hex(pc))
if !fn.valid() {
print(" [unknown PC]")
} else {
name := funcname(fn)
file, line := funcline(fn, pc)
print(" [", name, "+", hex(pc-fn.entry()),
" ", file, ":", line, "]")
}
}