mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime: add valgrind instrumentation
Add build tag gated Valgrind annotations to the runtime which let it understand how the runtime manages memory. This allows for Go binaries to be run under Valgrind without emitting spurious errors. Instead of adding the Valgrind headers to the tree, and using cgo to call the various Valgrind client request macros, we just add an assembly function which emits the necessary instructions to trigger client requests. In particular we add instrumentation of the memory allocator, using a two-level mempool structure (as described in the Valgrind manual [0]). We also add annotations which allow Valgrind to track which memory we use for stacks, which seems necessary to let it properly function. We describe the memory model to Valgrind as follows: we treat heap arenas as a "pool" created with VALGRIND_CREATE_MEMPOOL_EXT (so that we can use VALGRIND_MEMPOOL_METAPOOL and VALGRIND_MEMPOOL_AUTO_FREE). Within the pool we treat spans as "superblocks", annotated with VALGRIND_MEMPOOL_ALLOC. We then allocate individual objects within spans with VALGRIND_MALLOCLIKE_BLOCK. It should be noted that running binaries under Valgrind can be _quite slow_, and certain operations, such as running the GC, can be _very slow_. It is recommended to run programs with GOGC=off. Additionally, async preemption should be turned off, since it'll cause strange behavior (GODEBUG=asyncpreemptoff=1). Running Valgrind with --leak-check=yes will result in some errors resulting from some things not being marked fully free'd. These likely need more annotations to rectify, but for now it is recommended to run with --leak-check=off. Updates #73602 [0] https://valgrind.org/docs/manual/mc-manual.html#mc-manual.mempools Change-Id: I71b26c47d7084de71ef1e03947ef6b1cc6d38301 Reviewed-on: https://go-review.googlesource.com/c/go/+/674077 LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Michael Knyszek <mknyszek@google.com>
This commit is contained in:
parent
2a5ac1a993
commit
40b19b56a9
15 changed files with 364 additions and 3 deletions
|
|
@ -211,6 +211,13 @@ func stackpoolalloc(order uint8) gclinkptr {
|
|||
s.elemsize = fixedStack << order
|
||||
for i := uintptr(0); i < _StackCacheSize; i += s.elemsize {
|
||||
x := gclinkptr(s.base() + i)
|
||||
if valgrindenabled {
|
||||
// The address of x.ptr() becomes the base of stacks. We need to
|
||||
// mark it allocated here and in stackfree and stackpoolfree, and free'd in
|
||||
// stackalloc in order to avoid overlapping allocations and
|
||||
// uninitialized memory errors in valgrind.
|
||||
valgrindMalloc(unsafe.Pointer(x.ptr()), unsafe.Sizeof(x.ptr()))
|
||||
}
|
||||
x.ptr().next = s.manualFreeList
|
||||
s.manualFreeList = x
|
||||
}
|
||||
|
|
@ -388,6 +395,12 @@ func stackalloc(n uint32) stack {
|
|||
c.stackcache[order].list = x.ptr().next
|
||||
c.stackcache[order].size -= uintptr(n)
|
||||
}
|
||||
if valgrindenabled {
|
||||
// We're about to allocate the stack region starting at x.ptr().
|
||||
// To prevent valgrind from complaining about overlapping allocations,
|
||||
// we need to mark the (previously allocated) memory as free'd.
|
||||
valgrindFree(unsafe.Pointer(x.ptr()))
|
||||
}
|
||||
v = unsafe.Pointer(x)
|
||||
} else {
|
||||
var s *mspan
|
||||
|
|
@ -432,6 +445,9 @@ func stackalloc(n uint32) stack {
|
|||
if asanenabled {
|
||||
asanunpoison(v, uintptr(n))
|
||||
}
|
||||
if valgrindenabled {
|
||||
valgrindMalloc(v, uintptr(n))
|
||||
}
|
||||
if stackDebug >= 1 {
|
||||
print(" allocated ", v, "\n")
|
||||
}
|
||||
|
|
@ -479,6 +495,9 @@ func stackfree(stk stack) {
|
|||
if asanenabled {
|
||||
asanpoison(v, n)
|
||||
}
|
||||
if valgrindenabled {
|
||||
valgrindFree(v)
|
||||
}
|
||||
if n < fixedStack<<_NumStackOrders && n < _StackCacheSize {
|
||||
order := uint8(0)
|
||||
n2 := n
|
||||
|
|
@ -489,6 +508,11 @@ func stackfree(stk stack) {
|
|||
x := gclinkptr(v)
|
||||
if stackNoCache != 0 || gp.m.p == 0 || gp.m.preemptoff != "" {
|
||||
lock(&stackpool[order].item.mu)
|
||||
if valgrindenabled {
|
||||
// x.ptr() is the head of the list of free stacks, and will be used
|
||||
// when allocating a new stack, so it has to be marked allocated.
|
||||
valgrindMalloc(unsafe.Pointer(x.ptr()), unsafe.Sizeof(x.ptr()))
|
||||
}
|
||||
stackpoolfree(x, order)
|
||||
unlock(&stackpool[order].item.mu)
|
||||
} else {
|
||||
|
|
@ -496,6 +520,12 @@ func stackfree(stk stack) {
|
|||
if c.stackcache[order].size >= _StackCacheSize {
|
||||
stackcacherelease(c, order)
|
||||
}
|
||||
if valgrindenabled {
|
||||
// x.ptr() is the head of the list of free stacks, and will
|
||||
// be used when allocating a new stack, so it has to be
|
||||
// marked allocated.
|
||||
valgrindMalloc(unsafe.Pointer(x.ptr()), unsafe.Sizeof(x.ptr()))
|
||||
}
|
||||
x.ptr().next = c.stackcache[order].list
|
||||
c.stackcache[order].list = x
|
||||
c.stackcache[order].size += n
|
||||
|
|
@ -583,6 +613,16 @@ func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
|
|||
if stackDebug >= 4 {
|
||||
print(" ", pp, ":", hex(p), "\n")
|
||||
}
|
||||
if valgrindenabled {
|
||||
// p is a pointer on a stack, it is inherently initialized, as
|
||||
// everything on the stack is, but valgrind for _some unknown reason_
|
||||
// sometimes thinks it's uninitialized, and flags operations on p below
|
||||
// as uninitialized. We just initialize it if valgrind thinks its
|
||||
// uninitialized.
|
||||
//
|
||||
// See go.dev/issues/73801.
|
||||
valgrindMakeMemDefined(unsafe.Pointer(&p), unsafe.Sizeof(&p))
|
||||
}
|
||||
if adjinfo.old.lo <= p && p < adjinfo.old.hi {
|
||||
*pp = p + adjinfo.delta
|
||||
if stackDebug >= 3 {
|
||||
|
|
@ -936,6 +976,14 @@ func copystack(gp *g, newsize uintptr) {
|
|||
adjustframe(&u.frame, &adjinfo)
|
||||
}
|
||||
|
||||
if valgrindenabled {
|
||||
if gp.valgrindStackID == 0 {
|
||||
gp.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(new.lo), unsafe.Pointer(new.hi))
|
||||
} else {
|
||||
valgrindChangeStack(gp.valgrindStackID, unsafe.Pointer(new.lo), unsafe.Pointer(new.hi))
|
||||
}
|
||||
}
|
||||
|
||||
// free old stack
|
||||
if stackPoisonCopy != 0 {
|
||||
fillstack(old, 0xfc)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue