go/src/runtime/stack.go

1485 lines
45 KiB
Go
Raw Normal View History

// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/abi"
"internal/cpu"
"internal/goarch"
"internal/goos"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
)
/*
Stack layout parameters.
Included both by runtime (compiled via 6c) and linkers (compiled via gcc).
The per-goroutine g->stackguard is set to point StackGuard bytes
above the bottom of the stack. Each function compares its stack
pointer against g->stackguard to check for overflow. To cut one
instruction from the check sequence for functions with tiny frames,
the stack is allowed to protrude StackSmall bytes below the stack
guard. Functions with large frames don't bother with the check and
always call morestack. The sequences are (for amd64, others are
similar):
guard = g->stackguard
frame = function's stack frame size
argsize = size of function arguments (call + return)
stack frame size <= StackSmall:
CMPQ guard, SP
JHI 3(PC)
MOVQ m->morearg, $(argsize << 32)
CALL morestack(SB)
stack frame size > StackSmall but < StackBig
LEAQ (frame-StackSmall)(SP), R0
CMPQ guard, R0
JHI 3(PC)
MOVQ m->morearg, $(argsize << 32)
CALL morestack(SB)
stack frame size >= StackBig:
MOVQ m->morearg, $((argsize << 32) | frame)
CALL morestack(SB)
The bottom StackGuard - StackSmall bytes are important: there has
to be enough room to execute functions that refuse to check for
stack overflow, either because they need to be adjacent to the
actual caller's frame (deferproc) or because they handle the imminent
stack overflow (morestack).
For example, deferproc might call malloc, which does one of the
above checks (without allocating a full frame), which might trigger
a call to morestack. This sequence needs to fit in the bottom
section of the stack. On amd64, morestack's frame is 40 bytes, and
deferproc's frame is 56 bytes. That fits well within the
StackGuard - StackSmall bytes at the bottom.
The linkers explore all possible call traces involving non-splitting
functions to make sure that this limit cannot be violated.
*/
const (
// StackSystem is a number of additional bytes to add
// to each stack below the usual guard area for OS-specific
// purposes like signal handling. Used on Windows, Plan 9,
// and iOS because they do not use a separate stack.
[dev.typeparams] runtime: replace Goarch* constants with internal/goarch versions [generated] [git-generate] cd src/runtime gofmt -w -r "sys.Goarch386 -> goarch.Is386" . gofmt -w -r "sys.GoarchAmd64 -> goarch.IsAmd64" . gofmt -w -r "sys.GoarchAmd64p32 -> goarch.IsAmd64p32" . gofmt -w -r "sys.GoarchArm -> goarch.IsArm" . gofmt -w -r "sys.GoarchArmbe -> goarch.IsArmbe" . gofmt -w -r "sys.GoarchArm64 -> goarch.IsArm64" . gofmt -w -r "sys.GoarchArm64be -> goarch.IsArm64be" . gofmt -w -r "sys.GoarchPpc64 -> goarch.IsPpc64" . gofmt -w -r "sys.GoarchPpc64le -> goarch.IsPpc64le" . gofmt -w -r "sys.GoarchMips -> goarch.IsMips" . gofmt -w -r "sys.GoarchMipsle -> goarch.IsMipsle" . gofmt -w -r "sys.GoarchMips64 -> goarch.IsMips64" . gofmt -w -r "sys.GoarchMips64le -> goarch.IsMips64le" . gofmt -w -r "sys.GoarchMips64p32 -> goarch.IsMips64p32" . gofmt -w -r "sys.GoarchMips64p32le -> goarch.IsMips64p32le" . gofmt -w -r "sys.GoarchPpc -> goarch.IsPpc" . gofmt -w -r "sys.GoarchRiscv -> goarch.IsRiscv" . gofmt -w -r "sys.GoarchRiscv64 -> goarch.IsRiscv64" . gofmt -w -r "sys.GoarchS390 -> goarch.IsS390" . gofmt -w -r "sys.GoarchS390x -> goarch.IsS390x" . gofmt -w -r "sys.GoarchSparc -> goarch.IsSparc" . gofmt -w -r "sys.GoarchSparc64 -> goarch.IsSparc64" . gofmt -w -r "sys.GoarchWasm -> goarch.IsWasm" . goimports -w *.go Change-Id: I9d88e1284efabaeb0ee3733cba6286247d078c85 Reviewed-on: https://go-review.googlesource.com/c/go/+/328345 Trust: Michael Knyszek <mknyszek@google.com> Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2021-06-16 21:57:58 +00:00
_StackSystem = goos.IsWindows*512*goarch.PtrSize + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024
// The minimum size of stack used by Go code
_StackMin = 2048
// The minimum stack size to allocate.
// The hackery here rounds FixedStack0 up to a power of 2.
_FixedStack0 = _StackMin + _StackSystem
_FixedStack1 = _FixedStack0 - 1
_FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1)
_FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2)
_FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4)
_FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8)
_FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16)
_FixedStack = _FixedStack6 + 1
// Functions that need frames bigger than this use an extra
// instruction to do the stack split check, to avoid overflow
// in case SP - framesize wraps below zero.
// This value can be no bigger than the size of the unmapped
// space at zero.
_StackBig = 4096
// The stack guard is a pointer this many bytes above the
// bottom of the stack.
//
// The guard leaves enough room for one _StackSmall frame plus
// a _StackLimit chain of NOSPLIT calls plus _StackSystem
// bytes for the OS.
runtime: static lock ranking for the runtime (enabled by GOEXPERIMENT) I took some of the infrastructure from Austin's lock logging CR https://go-review.googlesource.com/c/go/+/192704 (with deadlock detection from the logs), and developed a setup to give static lock ranking for runtime locks. Static lock ranking establishes a documented total ordering among locks, and then reports an error if the total order is violated. This can happen if a deadlock happens (by acquiring a sequence of locks in different orders), or if just one side of a possible deadlock happens. Lock ordering deadlocks cannot happen as long as the lock ordering is followed. Along the way, I found a deadlock involving the new timer code, which Ian fixed via https://go-review.googlesource.com/c/go/+/207348, as well as two other potential deadlocks. See the constants at the top of runtime/lockrank.go to show the static lock ranking that I ended up with, along with some comments. This is great documentation of the current intended lock ordering when acquiring multiple locks in the runtime. I also added an array lockPartialOrder[] which shows and enforces the current partial ordering among locks (which is embedded within the total ordering). This is more specific about the dependencies among locks. I don't try to check the ranking within a lock class with multiple locks that can be acquired at the same time (i.e. check the ranking when multiple hchan locks are acquired). Currently, I am doing a lockInit() call to set the lock rank of most locks. Any lock that is not otherwise initialized is assumed to be a leaf lock (a very high rank lock), so that eliminates the need to do anything for a bunch of locks (including all architecture-dependent locks). For two locks, root.lock and notifyList.lock (only in the runtime/sema.go file), it is not as easy to do lock initialization, so instead, I am passing the lock rank with the lock calls. For Windows compilation, I needed to increase the StackGuard size from 896 to 928 because of the new lock-rank checking functions. Checking of the static lock ranking is enabled by setting GOEXPERIMENT=staticlockranking before doing a run. To make sure that the static lock ranking code has no overhead in memory or CPU when not enabled by GOEXPERIMENT, I changed 'go build/install' so that it defines a build tag (with the same name) whenever any experiment has been baked into the toolchain (by checking Expstring()). This allows me to avoid increasing the size of the 'mutex' type when static lock ranking is not enabled. Fixes #38029 Change-Id: I154217ff307c47051f8dae9c2a03b53081acd83a Reviewed-on: https://go-review.googlesource.com/c/go/+/207619 Reviewed-by: Dan Scales <danscales@google.com> Reviewed-by: Keith Randall <khr@golang.org> Run-TryBot: Dan Scales <danscales@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
2019-11-13 17:34:47 -08:00
_StackGuard = 928*sys.StackGuardMultiplier + _StackSystem
// After a stack split check the SP is allowed to be this
// many bytes below the stack guard. This saves an instruction
// in the checking sequence for tiny frames.
_StackSmall = 128
// The maximum number of bytes that a chain of NOSPLIT
// functions can use.
_StackLimit = _StackGuard - _StackSystem - _StackSmall
)
const (
// stackDebug == 0: no logging
// == 1: logging of per-stack operations
// == 2: logging of per-frame operations
// == 3: logging of per-word updates
// == 4: logging of per-word reads
stackDebug = 0
stackFromSystem = 0 // allocate stacks from system memory instead of the heap
stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free
stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
stackNoCache = 0 // disable per-P small stack caches
// check the BP links during traceback.
debugCheckBP = false
)
const (
uintptrMask = 1<<(8*goarch.PtrSize) - 1
// The values below can be stored to g.stackguard0 to force
// the next stack check to fail.
// These are all larger than any real SP.
// Goroutine preemption request.
// 0xfffffade in hex.
stackPreempt = uintptrMask & -1314
// Thread is forking. Causes a split stack check failure.
// 0xfffffb2e in hex.
stackFork = uintptrMask & -1234
// Force a stack movement. Used for debugging.
// 0xfffffeed in hex.
stackForceMove = uintptrMask & -275
// stackPoisonMin is the lowest allowed stack poison value.
stackPoisonMin = uintptrMask & -4096
)
// Global pool of spans that have free stacks.
// Stacks are assigned an order according to size.
//
// order = log_2(size/FixedStack)
//
// There is a free list for each order.
var stackpool [_NumStackOrders]struct {
item stackpoolItem
_ [cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize]byte
}
type stackpoolItem struct {
_ sys.NotInHeap
mu mutex
span mSpanList
}
// Global pool of large stack spans.
var stackLarge struct {
lock mutex
free [heapAddrBits - pageShift]mSpanList // free lists by log_2(s.npages)
}
func stackinit() {
if _StackCacheSize&_PageMask != 0 {
throw("cache size must be a multiple of page size")
}
for i := range stackpool {
stackpool[i].item.span.init()
runtime: static lock ranking for the runtime (enabled by GOEXPERIMENT) I took some of the infrastructure from Austin's lock logging CR https://go-review.googlesource.com/c/go/+/192704 (with deadlock detection from the logs), and developed a setup to give static lock ranking for runtime locks. Static lock ranking establishes a documented total ordering among locks, and then reports an error if the total order is violated. This can happen if a deadlock happens (by acquiring a sequence of locks in different orders), or if just one side of a possible deadlock happens. Lock ordering deadlocks cannot happen as long as the lock ordering is followed. Along the way, I found a deadlock involving the new timer code, which Ian fixed via https://go-review.googlesource.com/c/go/+/207348, as well as two other potential deadlocks. See the constants at the top of runtime/lockrank.go to show the static lock ranking that I ended up with, along with some comments. This is great documentation of the current intended lock ordering when acquiring multiple locks in the runtime. I also added an array lockPartialOrder[] which shows and enforces the current partial ordering among locks (which is embedded within the total ordering). This is more specific about the dependencies among locks. I don't try to check the ranking within a lock class with multiple locks that can be acquired at the same time (i.e. check the ranking when multiple hchan locks are acquired). Currently, I am doing a lockInit() call to set the lock rank of most locks. Any lock that is not otherwise initialized is assumed to be a leaf lock (a very high rank lock), so that eliminates the need to do anything for a bunch of locks (including all architecture-dependent locks). For two locks, root.lock and notifyList.lock (only in the runtime/sema.go file), it is not as easy to do lock initialization, so instead, I am passing the lock rank with the lock calls. For Windows compilation, I needed to increase the StackGuard size from 896 to 928 because of the new lock-rank checking functions. Checking of the static lock ranking is enabled by setting GOEXPERIMENT=staticlockranking before doing a run. To make sure that the static lock ranking code has no overhead in memory or CPU when not enabled by GOEXPERIMENT, I changed 'go build/install' so that it defines a build tag (with the same name) whenever any experiment has been baked into the toolchain (by checking Expstring()). This allows me to avoid increasing the size of the 'mutex' type when static lock ranking is not enabled. Fixes #38029 Change-Id: I154217ff307c47051f8dae9c2a03b53081acd83a Reviewed-on: https://go-review.googlesource.com/c/go/+/207619 Reviewed-by: Dan Scales <danscales@google.com> Reviewed-by: Keith Randall <khr@golang.org> Run-TryBot: Dan Scales <danscales@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
2019-11-13 17:34:47 -08:00
lockInit(&stackpool[i].item.mu, lockRankStackpool)
}
for i := range stackLarge.free {
stackLarge.free[i].init()
runtime: static lock ranking for the runtime (enabled by GOEXPERIMENT) I took some of the infrastructure from Austin's lock logging CR https://go-review.googlesource.com/c/go/+/192704 (with deadlock detection from the logs), and developed a setup to give static lock ranking for runtime locks. Static lock ranking establishes a documented total ordering among locks, and then reports an error if the total order is violated. This can happen if a deadlock happens (by acquiring a sequence of locks in different orders), or if just one side of a possible deadlock happens. Lock ordering deadlocks cannot happen as long as the lock ordering is followed. Along the way, I found a deadlock involving the new timer code, which Ian fixed via https://go-review.googlesource.com/c/go/+/207348, as well as two other potential deadlocks. See the constants at the top of runtime/lockrank.go to show the static lock ranking that I ended up with, along with some comments. This is great documentation of the current intended lock ordering when acquiring multiple locks in the runtime. I also added an array lockPartialOrder[] which shows and enforces the current partial ordering among locks (which is embedded within the total ordering). This is more specific about the dependencies among locks. I don't try to check the ranking within a lock class with multiple locks that can be acquired at the same time (i.e. check the ranking when multiple hchan locks are acquired). Currently, I am doing a lockInit() call to set the lock rank of most locks. Any lock that is not otherwise initialized is assumed to be a leaf lock (a very high rank lock), so that eliminates the need to do anything for a bunch of locks (including all architecture-dependent locks). For two locks, root.lock and notifyList.lock (only in the runtime/sema.go file), it is not as easy to do lock initialization, so instead, I am passing the lock rank with the lock calls. For Windows compilation, I needed to increase the StackGuard size from 896 to 928 because of the new lock-rank checking functions. Checking of the static lock ranking is enabled by setting GOEXPERIMENT=staticlockranking before doing a run. To make sure that the static lock ranking code has no overhead in memory or CPU when not enabled by GOEXPERIMENT, I changed 'go build/install' so that it defines a build tag (with the same name) whenever any experiment has been baked into the toolchain (by checking Expstring()). This allows me to avoid increasing the size of the 'mutex' type when static lock ranking is not enabled. Fixes #38029 Change-Id: I154217ff307c47051f8dae9c2a03b53081acd83a Reviewed-on: https://go-review.googlesource.com/c/go/+/207619 Reviewed-by: Dan Scales <danscales@google.com> Reviewed-by: Keith Randall <khr@golang.org> Run-TryBot: Dan Scales <danscales@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
2019-11-13 17:34:47 -08:00
lockInit(&stackLarge.lock, lockRankStackLarge)
}
}
// stacklog2 returns ⌊log_2(n)⌋.
func stacklog2(n uintptr) int {
log2 := 0
for n > 1 {
n >>= 1
log2++
}
return log2
}
// Allocates a stack from the free pool. Must be called with
// stackpool[order].item.mu held.
func stackpoolalloc(order uint8) gclinkptr {
list := &stackpool[order].item.span
s := list.first
runtime: static lock ranking for the runtime (enabled by GOEXPERIMENT) I took some of the infrastructure from Austin's lock logging CR https://go-review.googlesource.com/c/go/+/192704 (with deadlock detection from the logs), and developed a setup to give static lock ranking for runtime locks. Static lock ranking establishes a documented total ordering among locks, and then reports an error if the total order is violated. This can happen if a deadlock happens (by acquiring a sequence of locks in different orders), or if just one side of a possible deadlock happens. Lock ordering deadlocks cannot happen as long as the lock ordering is followed. Along the way, I found a deadlock involving the new timer code, which Ian fixed via https://go-review.googlesource.com/c/go/+/207348, as well as two other potential deadlocks. See the constants at the top of runtime/lockrank.go to show the static lock ranking that I ended up with, along with some comments. This is great documentation of the current intended lock ordering when acquiring multiple locks in the runtime. I also added an array lockPartialOrder[] which shows and enforces the current partial ordering among locks (which is embedded within the total ordering). This is more specific about the dependencies among locks. I don't try to check the ranking within a lock class with multiple locks that can be acquired at the same time (i.e. check the ranking when multiple hchan locks are acquired). Currently, I am doing a lockInit() call to set the lock rank of most locks. Any lock that is not otherwise initialized is assumed to be a leaf lock (a very high rank lock), so that eliminates the need to do anything for a bunch of locks (including all architecture-dependent locks). For two locks, root.lock and notifyList.lock (only in the runtime/sema.go file), it is not as easy to do lock initialization, so instead, I am passing the lock rank with the lock calls. For Windows compilation, I needed to increase the StackGuard size from 896 to 928 because of the new lock-rank checking functions. Checking of the static lock ranking is enabled by setting GOEXPERIMENT=staticlockranking before doing a run. To make sure that the static lock ranking code has no overhead in memory or CPU when not enabled by GOEXPERIMENT, I changed 'go build/install' so that it defines a build tag (with the same name) whenever any experiment has been baked into the toolchain (by checking Expstring()). This allows me to avoid increasing the size of the 'mutex' type when static lock ranking is not enabled. Fixes #38029 Change-Id: I154217ff307c47051f8dae9c2a03b53081acd83a Reviewed-on: https://go-review.googlesource.com/c/go/+/207619 Reviewed-by: Dan Scales <danscales@google.com> Reviewed-by: Keith Randall <khr@golang.org> Run-TryBot: Dan Scales <danscales@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
2019-11-13 17:34:47 -08:00
lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
if s == nil {
// no free stacks. Allocate another span worth.
s = mheap_.allocManual(_StackCacheSize>>_PageShift, spanAllocStack)
if s == nil {
throw("out of memory")
}
if s.allocCount != 0 {
throw("bad allocCount")
}
if s.manualFreeList.ptr() != nil {
throw("bad manualFreeList")
}
osStackAlloc(s)
s.elemsize = _FixedStack << order
for i := uintptr(0); i < _StackCacheSize; i += s.elemsize {
x := gclinkptr(s.base() + i)
x.ptr().next = s.manualFreeList
s.manualFreeList = x
}
list.insert(s)
}
x := s.manualFreeList
if x.ptr() == nil {
throw("span has no free stacks")
}
s.manualFreeList = x.ptr().next
s.allocCount++
if s.manualFreeList.ptr() == nil {
// all stacks in s are allocated.
list.remove(s)
}
return x
}
// Adds stack x to the free pool. Must be called with stackpool[order].item.mu held.
func stackpoolfree(x gclinkptr, order uint8) {
s := spanOfUnchecked(uintptr(x))
runtime: atomically set span state and use as publication barrier When everything is working correctly, any pointer the garbage collector encounters can only point into a fully initialized heap span, since the span must have been initialized before that pointer could escape the heap allocator and become visible to the GC. However, in various cases, we try to be defensive against bad pointers. In findObject, this is just a sanity check: we never expect to find a bad pointer, but programming errors can lead to them. In spanOfHeap, we don't necessarily trust the pointer and we're trying to check if it really does point to the heap, though it should always point to something. Conservative scanning takes this to a new level, since it can only guess that a word may be a pointer and verify this. In all of these cases, we have a problem that the span lookup and check can race with span initialization, since the span becomes visible to lookups before it's fully initialized. Furthermore, we're about to start initializing the span without the heap lock held, which is going to introduce races where accesses were previously protected by the heap lock. To address this, this CL makes accesses to mspan.state atomic, and ensures that the span is fully initialized before setting the state to mSpanInUse. All loads are now atomic, and in any case where we don't trust the pointer, it first atomically loads the span state and checks that it's mSpanInUse, after which it will have synchronized with span initialization and can safely check the other span fields. For #10958, #24543, but a good fix in general. Change-Id: I518b7c63555b02064b98aa5f802c92b758fef853 Reviewed-on: https://go-review.googlesource.com/c/go/+/203286 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Michael Knyszek <mknyszek@google.com>
2019-10-23 11:25:38 -04:00
if s.state.get() != mSpanManual {
throw("freeing stack not in a stack span")
}
if s.manualFreeList.ptr() == nil {
// s will now have a free stack
stackpool[order].item.span.insert(s)
}
x.ptr().next = s.manualFreeList
s.manualFreeList = x
s.allocCount--
if gcphase == _GCoff && s.allocCount == 0 {
runtime: don't free stack spans during GC Memory for stacks is manually managed by the runtime and, currently (with one exception) we free stack spans immediately when the last stack on a span is freed. However, the garbage collector assumes that spans can never transition from non-free to free during scan or mark. This disagreement makes it possible for the garbage collector to mark uninitialized objects and is blocking us from re-enabling the bad pointer test in the garbage collector (issue #9880). For example, the following sequence will result in marking an uninitialized object: 1. scanobject loads a pointer slot out of the object it's scanning. This happens to be one of the special pointers from the heap into a stack. Call the pointer p and suppose it points into X's stack. 2. X, running on another thread, grows its stack and frees its old stack. 3. The old stack happens to be large or was the last stack in its span, so X frees this span, setting it to state _MSpanFree. 4. The span gets reused as a heap span. 5. scanobject calls heapBitsForObject, which loads the span containing p, which is now in state _MSpanInUse, but doesn't necessarily have an object at p. The not-object at p gets marked, and at this point all sorts of things can go wrong. We already have a partial solution to this. When shrinking a stack, we put the old stack on a queue to be freed at the end of garbage collection. This was done to address exactly this problem, but wasn't a complete solution. This commit generalizes this solution to both shrinking and growing stacks. For stacks that fit in the stack pool, we simply don't free the span, even if its reference count reaches zero. It's fine to reuse the span for other stacks, and this enables that. At the end of GC, we sweep for cached stack spans with a zero reference count and free them. For larger stacks, we simply queue the stack span to be freed at the end of GC. Ideally, we would reuse these large stack spans the way we can small stack spans, but that's a more invasive change that will have to wait until after the freeze. Fixes #11267. Change-Id: Ib7f2c5da4845cc0268e8dc098b08465116972a71 Reviewed-on: https://go-review.googlesource.com/11502 Reviewed-by: Russ Cox <rsc@golang.org>
2015-06-22 10:24:50 -04:00
// Span is completely free. Return it to the heap
// immediately if we're sweeping.
//
// If GC is active, we delay the free until the end of
// GC to avoid the following type of situation:
//
// 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
// 2) The stack that pointer points to is copied
// 3) The old stack is freed
// 4) The containing span is marked free
// 5) GC attempts to mark the SudoG.elem pointer. The
// marking fails because the pointer looks like a
// pointer into a free span.
//
// By not freeing, we prevent step #4 until GC is done.
stackpool[order].item.span.remove(s)
s.manualFreeList = 0
osStackFree(s)
mheap_.freeManual(s, spanAllocStack)
}
}
// stackcacherefill/stackcacherelease implement a global pool of stack segments.
// The pool is required to prevent unlimited growth of per-thread caches.
//
//go:systemstack
func stackcacherefill(c *mcache, order uint8) {
if stackDebug >= 1 {
print("stackcacherefill order=", order, "\n")
}
// Grab some stacks from the global cache.
// Grab half of the allowed capacity (to prevent thrashing).
var list gclinkptr
var size uintptr
lock(&stackpool[order].item.mu)
for size < _StackCacheSize/2 {
x := stackpoolalloc(order)
x.ptr().next = list
list = x
size += _FixedStack << order
}
unlock(&stackpool[order].item.mu)
c.stackcache[order].list = list
c.stackcache[order].size = size
}
//go:systemstack
func stackcacherelease(c *mcache, order uint8) {
if stackDebug >= 1 {
print("stackcacherelease order=", order, "\n")
}
x := c.stackcache[order].list
size := c.stackcache[order].size
lock(&stackpool[order].item.mu)
for size > _StackCacheSize/2 {
y := x.ptr().next
stackpoolfree(x, order)
x = y
size -= _FixedStack << order
}
unlock(&stackpool[order].item.mu)
c.stackcache[order].list = x
c.stackcache[order].size = size
}
//go:systemstack
func stackcache_clear(c *mcache) {
if stackDebug >= 1 {
print("stackcache clear\n")
}
for order := uint8(0); order < _NumStackOrders; order++ {
lock(&stackpool[order].item.mu)
x := c.stackcache[order].list
for x.ptr() != nil {
y := x.ptr().next
stackpoolfree(x, order)
x = y
}
c.stackcache[order].list = 0
c.stackcache[order].size = 0
unlock(&stackpool[order].item.mu)
}
}
// stackalloc allocates an n byte stack.
//
// stackalloc must run on the system stack because it uses per-P
// resources and must not split the stack.
//
//go:systemstack
func stackalloc(n uint32) stack {
// Stackalloc must be called on scheduler stack, so that we
// never try to grow the stack during the code that stackalloc runs.
// Doing so would cause a deadlock (issue 1547).
thisg := getg()
if thisg != thisg.m.g0 {
throw("stackalloc not on scheduler stack")
}
if n&(n-1) != 0 {
throw("stack size not a power of 2")
}
if stackDebug >= 1 {
print("stackalloc ", n, "\n")
}
if debug.efence != 0 || stackFromSystem != 0 {
n = uint32(alignUp(uintptr(n), physPageSize))
v := sysAlloc(uintptr(n), &memstats.stacks_sys)
if v == nil {
throw("out of memory (stackalloc)")
}
return stack{uintptr(v), uintptr(v) + uintptr(n)}
}
// Small stacks are allocated with a fixed-size free-list allocator.
// If we need a stack of a bigger size, we fall back on allocating
// a dedicated span.
var v unsafe.Pointer
if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
order := uint8(0)
n2 := n
for n2 > _FixedStack {
order++
n2 >>= 1
}
var x gclinkptr
if stackNoCache != 0 || thisg.m.p == 0 || thisg.m.preemptoff != "" {
// thisg.m.p == 0 can happen in the guts of exitsyscall
// or procresize. Just get a stack from the global pool.
// Also don't touch stackcache during gc
// as it's flushed concurrently.
lock(&stackpool[order].item.mu)
x = stackpoolalloc(order)
unlock(&stackpool[order].item.mu)
} else {
c := thisg.m.p.ptr().mcache
x = c.stackcache[order].list
if x.ptr() == nil {
stackcacherefill(c, order)
x = c.stackcache[order].list
}
c.stackcache[order].list = x.ptr().next
c.stackcache[order].size -= uintptr(n)
}
v = unsafe.Pointer(x)
} else {
var s *mspan
npage := uintptr(n) >> _PageShift
log2npage := stacklog2(npage)
// Try to get a stack from the large stack cache.
lock(&stackLarge.lock)
if !stackLarge.free[log2npage].isEmpty() {
s = stackLarge.free[log2npage].first
stackLarge.free[log2npage].remove(s)
}
unlock(&stackLarge.lock)
runtime: static lock ranking for the runtime (enabled by GOEXPERIMENT) I took some of the infrastructure from Austin's lock logging CR https://go-review.googlesource.com/c/go/+/192704 (with deadlock detection from the logs), and developed a setup to give static lock ranking for runtime locks. Static lock ranking establishes a documented total ordering among locks, and then reports an error if the total order is violated. This can happen if a deadlock happens (by acquiring a sequence of locks in different orders), or if just one side of a possible deadlock happens. Lock ordering deadlocks cannot happen as long as the lock ordering is followed. Along the way, I found a deadlock involving the new timer code, which Ian fixed via https://go-review.googlesource.com/c/go/+/207348, as well as two other potential deadlocks. See the constants at the top of runtime/lockrank.go to show the static lock ranking that I ended up with, along with some comments. This is great documentation of the current intended lock ordering when acquiring multiple locks in the runtime. I also added an array lockPartialOrder[] which shows and enforces the current partial ordering among locks (which is embedded within the total ordering). This is more specific about the dependencies among locks. I don't try to check the ranking within a lock class with multiple locks that can be acquired at the same time (i.e. check the ranking when multiple hchan locks are acquired). Currently, I am doing a lockInit() call to set the lock rank of most locks. Any lock that is not otherwise initialized is assumed to be a leaf lock (a very high rank lock), so that eliminates the need to do anything for a bunch of locks (including all architecture-dependent locks). For two locks, root.lock and notifyList.lock (only in the runtime/sema.go file), it is not as easy to do lock initialization, so instead, I am passing the lock rank with the lock calls. For Windows compilation, I needed to increase the StackGuard size from 896 to 928 because of the new lock-rank checking functions. Checking of the static lock ranking is enabled by setting GOEXPERIMENT=staticlockranking before doing a run. To make sure that the static lock ranking code has no overhead in memory or CPU when not enabled by GOEXPERIMENT, I changed 'go build/install' so that it defines a build tag (with the same name) whenever any experiment has been baked into the toolchain (by checking Expstring()). This allows me to avoid increasing the size of the 'mutex' type when static lock ranking is not enabled. Fixes #38029 Change-Id: I154217ff307c47051f8dae9c2a03b53081acd83a Reviewed-on: https://go-review.googlesource.com/c/go/+/207619 Reviewed-by: Dan Scales <danscales@google.com> Reviewed-by: Keith Randall <khr@golang.org> Run-TryBot: Dan Scales <danscales@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
2019-11-13 17:34:47 -08:00
lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
if s == nil {
// Allocate a new stack from the heap.
s = mheap_.allocManual(npage, spanAllocStack)
if s == nil {
throw("out of memory")
}
osStackAlloc(s)
s.elemsize = uintptr(n)
}
v = unsafe.Pointer(s.base())
}
if raceenabled {
racemalloc(v, uintptr(n))
}
if msanenabled {
msanmalloc(v, uintptr(n))
}
if asanenabled {
asanunpoison(v, uintptr(n))
}
if stackDebug >= 1 {
print(" allocated ", v, "\n")
}
return stack{uintptr(v), uintptr(v) + uintptr(n)}
}
// stackfree frees an n byte stack allocation at stk.
//
// stackfree must run on the system stack because it uses per-P
// resources and must not split the stack.
//
//go:systemstack
func stackfree(stk stack) {
gp := getg()
v := unsafe.Pointer(stk.lo)
n := stk.hi - stk.lo
if n&(n-1) != 0 {
throw("stack not a power of 2")
}
if stk.lo+n < stk.hi {
throw("bad stack size")
}
if stackDebug >= 1 {
println("stackfree", v, n)
memclrNoHeapPointers(v, n) // for testing, clobber stack data
}
if debug.efence != 0 || stackFromSystem != 0 {
if debug.efence != 0 || stackFaultOnFree != 0 {
sysFault(v, n)
} else {
sysFree(v, n, &memstats.stacks_sys)
}
return
}
if msanenabled {
msanfree(v, n)
}
if asanenabled {
asanpoison(v, n)
}
if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
order := uint8(0)
n2 := n
for n2 > _FixedStack {
order++
n2 >>= 1
}
x := gclinkptr(v)
if stackNoCache != 0 || gp.m.p == 0 || gp.m.preemptoff != "" {
lock(&stackpool[order].item.mu)
stackpoolfree(x, order)
unlock(&stackpool[order].item.mu)
} else {
c := gp.m.p.ptr().mcache
if c.stackcache[order].size >= _StackCacheSize {
stackcacherelease(c, order)
}
x.ptr().next = c.stackcache[order].list
c.stackcache[order].list = x
c.stackcache[order].size += n
}
} else {
s := spanOfUnchecked(uintptr(v))
runtime: atomically set span state and use as publication barrier When everything is working correctly, any pointer the garbage collector encounters can only point into a fully initialized heap span, since the span must have been initialized before that pointer could escape the heap allocator and become visible to the GC. However, in various cases, we try to be defensive against bad pointers. In findObject, this is just a sanity check: we never expect to find a bad pointer, but programming errors can lead to them. In spanOfHeap, we don't necessarily trust the pointer and we're trying to check if it really does point to the heap, though it should always point to something. Conservative scanning takes this to a new level, since it can only guess that a word may be a pointer and verify this. In all of these cases, we have a problem that the span lookup and check can race with span initialization, since the span becomes visible to lookups before it's fully initialized. Furthermore, we're about to start initializing the span without the heap lock held, which is going to introduce races where accesses were previously protected by the heap lock. To address this, this CL makes accesses to mspan.state atomic, and ensures that the span is fully initialized before setting the state to mSpanInUse. All loads are now atomic, and in any case where we don't trust the pointer, it first atomically loads the span state and checks that it's mSpanInUse, after which it will have synchronized with span initialization and can safely check the other span fields. For #10958, #24543, but a good fix in general. Change-Id: I518b7c63555b02064b98aa5f802c92b758fef853 Reviewed-on: https://go-review.googlesource.com/c/go/+/203286 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Michael Knyszek <mknyszek@google.com>
2019-10-23 11:25:38 -04:00
if s.state.get() != mSpanManual {
println(hex(s.base()), v)
throw("bad span state")
}
runtime: don't free stack spans during GC Memory for stacks is manually managed by the runtime and, currently (with one exception) we free stack spans immediately when the last stack on a span is freed. However, the garbage collector assumes that spans can never transition from non-free to free during scan or mark. This disagreement makes it possible for the garbage collector to mark uninitialized objects and is blocking us from re-enabling the bad pointer test in the garbage collector (issue #9880). For example, the following sequence will result in marking an uninitialized object: 1. scanobject loads a pointer slot out of the object it's scanning. This happens to be one of the special pointers from the heap into a stack. Call the pointer p and suppose it points into X's stack. 2. X, running on another thread, grows its stack and frees its old stack. 3. The old stack happens to be large or was the last stack in its span, so X frees this span, setting it to state _MSpanFree. 4. The span gets reused as a heap span. 5. scanobject calls heapBitsForObject, which loads the span containing p, which is now in state _MSpanInUse, but doesn't necessarily have an object at p. The not-object at p gets marked, and at this point all sorts of things can go wrong. We already have a partial solution to this. When shrinking a stack, we put the old stack on a queue to be freed at the end of garbage collection. This was done to address exactly this problem, but wasn't a complete solution. This commit generalizes this solution to both shrinking and growing stacks. For stacks that fit in the stack pool, we simply don't free the span, even if its reference count reaches zero. It's fine to reuse the span for other stacks, and this enables that. At the end of GC, we sweep for cached stack spans with a zero reference count and free them. For larger stacks, we simply queue the stack span to be freed at the end of GC. Ideally, we would reuse these large stack spans the way we can small stack spans, but that's a more invasive change that will have to wait until after the freeze. Fixes #11267. Change-Id: Ib7f2c5da4845cc0268e8dc098b08465116972a71 Reviewed-on: https://go-review.googlesource.com/11502 Reviewed-by: Russ Cox <rsc@golang.org>
2015-06-22 10:24:50 -04:00
if gcphase == _GCoff {
// Free the stack immediately if we're
// sweeping.
osStackFree(s)
mheap_.freeManual(s, spanAllocStack)
runtime: don't free stack spans during GC Memory for stacks is manually managed by the runtime and, currently (with one exception) we free stack spans immediately when the last stack on a span is freed. However, the garbage collector assumes that spans can never transition from non-free to free during scan or mark. This disagreement makes it possible for the garbage collector to mark uninitialized objects and is blocking us from re-enabling the bad pointer test in the garbage collector (issue #9880). For example, the following sequence will result in marking an uninitialized object: 1. scanobject loads a pointer slot out of the object it's scanning. This happens to be one of the special pointers from the heap into a stack. Call the pointer p and suppose it points into X's stack. 2. X, running on another thread, grows its stack and frees its old stack. 3. The old stack happens to be large or was the last stack in its span, so X frees this span, setting it to state _MSpanFree. 4. The span gets reused as a heap span. 5. scanobject calls heapBitsForObject, which loads the span containing p, which is now in state _MSpanInUse, but doesn't necessarily have an object at p. The not-object at p gets marked, and at this point all sorts of things can go wrong. We already have a partial solution to this. When shrinking a stack, we put the old stack on a queue to be freed at the end of garbage collection. This was done to address exactly this problem, but wasn't a complete solution. This commit generalizes this solution to both shrinking and growing stacks. For stacks that fit in the stack pool, we simply don't free the span, even if its reference count reaches zero. It's fine to reuse the span for other stacks, and this enables that. At the end of GC, we sweep for cached stack spans with a zero reference count and free them. For larger stacks, we simply queue the stack span to be freed at the end of GC. Ideally, we would reuse these large stack spans the way we can small stack spans, but that's a more invasive change that will have to wait until after the freeze. Fixes #11267. Change-Id: Ib7f2c5da4845cc0268e8dc098b08465116972a71 Reviewed-on: https://go-review.googlesource.com/11502 Reviewed-by: Russ Cox <rsc@golang.org>
2015-06-22 10:24:50 -04:00
} else {
// If the GC is running, we can't return a
// stack span to the heap because it could be
// reused as a heap span, and this state
// change would race with GC. Add it to the
// large stack cache instead.
log2npage := stacklog2(s.npages)
lock(&stackLarge.lock)
stackLarge.free[log2npage].insert(s)
unlock(&stackLarge.lock)
runtime: don't free stack spans during GC Memory for stacks is manually managed by the runtime and, currently (with one exception) we free stack spans immediately when the last stack on a span is freed. However, the garbage collector assumes that spans can never transition from non-free to free during scan or mark. This disagreement makes it possible for the garbage collector to mark uninitialized objects and is blocking us from re-enabling the bad pointer test in the garbage collector (issue #9880). For example, the following sequence will result in marking an uninitialized object: 1. scanobject loads a pointer slot out of the object it's scanning. This happens to be one of the special pointers from the heap into a stack. Call the pointer p and suppose it points into X's stack. 2. X, running on another thread, grows its stack and frees its old stack. 3. The old stack happens to be large or was the last stack in its span, so X frees this span, setting it to state _MSpanFree. 4. The span gets reused as a heap span. 5. scanobject calls heapBitsForObject, which loads the span containing p, which is now in state _MSpanInUse, but doesn't necessarily have an object at p. The not-object at p gets marked, and at this point all sorts of things can go wrong. We already have a partial solution to this. When shrinking a stack, we put the old stack on a queue to be freed at the end of garbage collection. This was done to address exactly this problem, but wasn't a complete solution. This commit generalizes this solution to both shrinking and growing stacks. For stacks that fit in the stack pool, we simply don't free the span, even if its reference count reaches zero. It's fine to reuse the span for other stacks, and this enables that. At the end of GC, we sweep for cached stack spans with a zero reference count and free them. For larger stacks, we simply queue the stack span to be freed at the end of GC. Ideally, we would reuse these large stack spans the way we can small stack spans, but that's a more invasive change that will have to wait until after the freeze. Fixes #11267. Change-Id: Ib7f2c5da4845cc0268e8dc098b08465116972a71 Reviewed-on: https://go-review.googlesource.com/11502 Reviewed-by: Russ Cox <rsc@golang.org>
2015-06-22 10:24:50 -04:00
}
}
}
var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real
var maxstackceiling = maxstacksize
cmd/internal/gc, runtime: use 1-bit bitmap for stack frames, data, bss The bitmaps were 2 bits per pointer because we needed to distinguish scalar, pointer, multiword, and we used the leftover value to distinguish uninitialized from scalar, even though the garbage collector (GC) didn't care. Now that there are no multiword structures from the GC's point of view, cut the bitmaps down to 1 bit per pointer, recording just live pointer vs not. The GC assumes the same layout for stack frames and for the maps describing the global data and bss sections, so change them all in one CL. The code still refers to 4-bit heap bitmaps and 2-bit "type bitmaps", since the 2-bit representation lives (at least for now) in some of the reflect data. Because these stack frame bitmaps are stored directly in the rodata in the binary, this CL reduces the size of the 6g binary by about 1.1%. Performance change is basically a wash, but using less memory, and smaller binaries, and enables other bitmap reductions. name old mean new mean delta BenchmarkBinaryTree17 13.2s × (0.97,1.03) 13.0s × (0.99,1.01) -0.93% (p=0.005) BenchmarkBinaryTree17-2 9.69s × (0.96,1.05) 9.51s × (0.96,1.03) -1.86% (p=0.001) BenchmarkBinaryTree17-4 10.1s × (0.97,1.05) 10.0s × (0.96,1.05) ~ (p=0.141) BenchmarkFannkuch11 4.35s × (0.99,1.01) 4.43s × (0.98,1.04) +1.75% (p=0.001) BenchmarkFannkuch11-2 4.31s × (0.99,1.03) 4.32s × (1.00,1.00) ~ (p=0.095) BenchmarkFannkuch11-4 4.32s × (0.99,1.02) 4.38s × (0.98,1.04) +1.38% (p=0.008) BenchmarkFmtFprintfEmpty 83.5ns × (0.97,1.10) 87.3ns × (0.92,1.11) +4.55% (p=0.014) BenchmarkFmtFprintfEmpty-2 81.8ns × (0.98,1.04) 82.5ns × (0.97,1.08) ~ (p=0.364) BenchmarkFmtFprintfEmpty-4 80.9ns × (0.99,1.01) 82.6ns × (0.97,1.08) +2.12% (p=0.010) BenchmarkFmtFprintfString 320ns × (0.95,1.04) 322ns × (0.97,1.05) ~ (p=0.368) BenchmarkFmtFprintfString-2 303ns × (0.97,1.04) 304ns × (0.97,1.04) ~ (p=0.484) BenchmarkFmtFprintfString-4 305ns × (0.97,1.05) 306ns × (0.98,1.05) ~ (p=0.543) BenchmarkFmtFprintfInt 311ns × (0.98,1.03) 319ns × (0.97,1.03) +2.63% (p=0.000) BenchmarkFmtFprintfInt-2 297ns × (0.98,1.04) 301ns × (0.97,1.04) +1.19% (p=0.023) BenchmarkFmtFprintfInt-4 302ns × (0.98,1.02) 304ns × (0.97,1.03) ~ (p=0.126) BenchmarkFmtFprintfIntInt 554ns × (0.96,1.05) 554ns × (0.97,1.03) ~ (p=0.975) BenchmarkFmtFprintfIntInt-2 520ns × (0.98,1.03) 517ns × (0.98,1.02) ~ (p=0.153) BenchmarkFmtFprintfIntInt-4 524ns × (0.98,1.02) 525ns × (0.98,1.03) ~ (p=0.597) BenchmarkFmtFprintfPrefixedInt 433ns × (0.97,1.06) 434ns × (0.97,1.06) ~ (p=0.804) BenchmarkFmtFprintfPrefixedInt-2 413ns × (0.98,1.04) 413ns × (0.98,1.03) ~ (p=0.881) BenchmarkFmtFprintfPrefixedInt-4 420ns × (0.97,1.03) 421ns × (0.97,1.03) ~ (p=0.561) BenchmarkFmtFprintfFloat 620ns × (0.99,1.03) 636ns × (0.97,1.03) +2.57% (p=0.000) BenchmarkFmtFprintfFloat-2 601ns × (0.98,1.02) 617ns × (0.98,1.03) +2.58% (p=0.000) BenchmarkFmtFprintfFloat-4 613ns × (0.98,1.03) 626ns × (0.98,1.02) +2.15% (p=0.000) BenchmarkFmtManyArgs 2.19µs × (0.96,1.04) 2.23µs × (0.97,1.02) +1.65% (p=0.000) BenchmarkFmtManyArgs-2 2.08µs × (0.98,1.03) 2.10µs × (0.99,1.02) +0.79% (p=0.019) BenchmarkFmtManyArgs-4 2.10µs × (0.98,1.02) 2.13µs × (0.98,1.02) +1.72% (p=0.000) BenchmarkGobDecode 21.3ms × (0.97,1.05) 21.1ms × (0.97,1.04) -1.36% (p=0.025) BenchmarkGobDecode-2 20.0ms × (0.97,1.03) 19.2ms × (0.97,1.03) -4.00% (p=0.000) BenchmarkGobDecode-4 19.5ms × (0.99,1.02) 19.0ms × (0.99,1.01) -2.39% (p=0.000) BenchmarkGobEncode 18.3ms × (0.95,1.07) 18.1ms × (0.96,1.08) ~ (p=0.305) BenchmarkGobEncode-2 16.8ms × (0.97,1.02) 16.4ms × (0.98,1.02) -2.79% (p=0.000) BenchmarkGobEncode-4 15.4ms × (0.98,1.02) 15.4ms × (0.98,1.02) ~ (p=0.465) BenchmarkGzip 650ms × (0.98,1.03) 655ms × (0.97,1.04) ~ (p=0.075) BenchmarkGzip-2 652ms × (0.98,1.03) 655ms × (0.98,1.02) ~ (p=0.337) BenchmarkGzip-4 656ms × (0.98,1.04) 653ms × (0.98,1.03) ~ (p=0.291) BenchmarkGunzip 143ms × (1.00,1.01) 143ms × (1.00,1.01) ~ (p=0.507) BenchmarkGunzip-2 143ms × (1.00,1.01) 143ms × (1.00,1.01) ~ (p=0.313) BenchmarkGunzip-4 143ms × (1.00,1.01) 143ms × (1.00,1.01) ~ (p=0.312) BenchmarkHTTPClientServer 110µs × (0.98,1.03) 109µs × (0.99,1.02) -1.40% (p=0.000) BenchmarkHTTPClientServer-2 154µs × (0.90,1.08) 149µs × (0.90,1.08) -3.43% (p=0.007) BenchmarkHTTPClientServer-4 138µs × (0.97,1.04) 138µs × (0.96,1.04) ~ (p=0.670) BenchmarkJSONEncode 40.2ms × (0.98,1.02) 40.2ms × (0.98,1.05) ~ (p=0.828) BenchmarkJSONEncode-2 35.1ms × (0.99,1.02) 35.2ms × (0.98,1.03) ~ (p=0.392) BenchmarkJSONEncode-4 35.3ms × (0.98,1.03) 35.3ms × (0.98,1.02) ~ (p=0.813) BenchmarkJSONDecode 119ms × (0.97,1.02) 117ms × (0.98,1.02) -1.80% (p=0.000) BenchmarkJSONDecode-2 115ms × (0.99,1.02) 114ms × (0.98,1.02) -1.18% (p=0.000) BenchmarkJSONDecode-4 116ms × (0.98,1.02) 114ms × (0.98,1.02) -1.43% (p=0.000) BenchmarkMandelbrot200 6.03ms × (1.00,1.01) 6.03ms × (1.00,1.01) ~ (p=0.985) BenchmarkMandelbrot200-2 6.03ms × (1.00,1.01) 6.02ms × (1.00,1.01) ~ (p=0.320) BenchmarkMandelbrot200-4 6.03ms × (1.00,1.01) 6.03ms × (1.00,1.01) ~ (p=0.799) BenchmarkGoParse 8.63ms × (0.89,1.10) 8.58ms × (0.93,1.09) ~ (p=0.667) BenchmarkGoParse-2 8.20ms × (0.97,1.04) 8.37ms × (0.97,1.04) +1.96% (p=0.001) BenchmarkGoParse-4 8.00ms × (0.98,1.02) 8.14ms × (0.99,1.02) +1.75% (p=0.000) BenchmarkRegexpMatchEasy0_32 162ns × (1.00,1.01) 164ns × (0.98,1.04) +1.35% (p=0.011) BenchmarkRegexpMatchEasy0_32-2 161ns × (1.00,1.01) 161ns × (1.00,1.00) ~ (p=0.185) BenchmarkRegexpMatchEasy0_32-4 161ns × (1.00,1.00) 161ns × (1.00,1.00) -0.19% (p=0.001) BenchmarkRegexpMatchEasy0_1K 540ns × (0.99,1.02) 566ns × (0.98,1.04) +4.98% (p=0.000) BenchmarkRegexpMatchEasy0_1K-2 540ns × (0.99,1.01) 557ns × (0.99,1.01) +3.21% (p=0.000) BenchmarkRegexpMatchEasy0_1K-4 541ns × (0.99,1.01) 559ns × (0.99,1.01) +3.26% (p=0.000) BenchmarkRegexpMatchEasy1_32 139ns × (0.98,1.04) 139ns × (0.99,1.03) ~ (p=0.979) BenchmarkRegexpMatchEasy1_32-2 139ns × (0.99,1.04) 139ns × (0.99,1.02) ~ (p=0.777) BenchmarkRegexpMatchEasy1_32-4 139ns × (0.98,1.04) 139ns × (0.99,1.04) ~ (p=0.771) BenchmarkRegexpMatchEasy1_1K 890ns × (0.99,1.03) 885ns × (1.00,1.01) -0.50% (p=0.004) BenchmarkRegexpMatchEasy1_1K-2 888ns × (0.99,1.01) 885ns × (0.99,1.01) -0.37% (p=0.004) BenchmarkRegexpMatchEasy1_1K-4 890ns × (0.99,1.02) 884ns × (1.00,1.00) -0.70% (p=0.000) BenchmarkRegexpMatchMedium_32 252ns × (0.99,1.01) 251ns × (0.99,1.01) ~ (p=0.081) BenchmarkRegexpMatchMedium_32-2 254ns × (0.99,1.04) 252ns × (0.99,1.01) -0.78% (p=0.027) BenchmarkRegexpMatchMedium_32-4 253ns × (0.99,1.04) 252ns × (0.99,1.01) -0.70% (p=0.022) BenchmarkRegexpMatchMedium_1K 72.9µs × (0.99,1.01) 72.7µs × (1.00,1.00) ~ (p=0.064) BenchmarkRegexpMatchMedium_1K-2 74.1µs × (0.98,1.05) 72.9µs × (1.00,1.01) -1.61% (p=0.001) BenchmarkRegexpMatchMedium_1K-4 73.6µs × (0.99,1.05) 72.8µs × (1.00,1.00) -1.13% (p=0.007) BenchmarkRegexpMatchHard_32 3.88µs × (0.99,1.03) 3.92µs × (0.98,1.05) ~ (p=0.143) BenchmarkRegexpMatchHard_32-2 3.89µs × (0.99,1.03) 3.93µs × (0.98,1.09) ~ (p=0.278) BenchmarkRegexpMatchHard_32-4 3.90µs × (0.99,1.05) 3.93µs × (0.98,1.05) ~ (p=0.252) BenchmarkRegexpMatchHard_1K 118µs × (0.99,1.01) 117µs × (0.99,1.02) -0.54% (p=0.003) BenchmarkRegexpMatchHard_1K-2 118µs × (0.99,1.01) 118µs × (0.99,1.03) ~ (p=0.581) BenchmarkRegexpMatchHard_1K-4 118µs × (0.99,1.02) 117µs × (0.99,1.01) -0.54% (p=0.002) BenchmarkRevcomp 991ms × (0.95,1.10) 989ms × (0.94,1.08) ~ (p=0.879) BenchmarkRevcomp-2 978ms × (0.95,1.11) 962ms × (0.96,1.08) ~ (p=0.257) BenchmarkRevcomp-4 979ms × (0.96,1.07) 974ms × (0.96,1.11) ~ (p=0.678) BenchmarkTemplate 141ms × (0.99,1.02) 145ms × (0.99,1.02) +2.75% (p=0.000) BenchmarkTemplate-2 135ms × (0.98,1.02) 138ms × (0.99,1.02) +2.34% (p=0.000) BenchmarkTemplate-4 136ms × (0.98,1.02) 140ms × (0.99,1.02) +2.71% (p=0.000) BenchmarkTimeParse 640ns × (0.99,1.01) 622ns × (0.99,1.01) -2.88% (p=0.000) BenchmarkTimeParse-2 640ns × (0.99,1.01) 622ns × (1.00,1.00) -2.81% (p=0.000) BenchmarkTimeParse-4 640ns × (1.00,1.01) 622ns × (0.99,1.01) -2.82% (p=0.000) BenchmarkTimeFormat 730ns × (0.98,1.02) 731ns × (0.98,1.03) ~ (p=0.767) BenchmarkTimeFormat-2 709ns × (0.99,1.02) 707ns × (0.99,1.02) ~ (p=0.347) BenchmarkTimeFormat-4 717ns × (0.98,1.01) 718ns × (0.98,1.02) ~ (p=0.793) Change-Id: Ie779c47e912bf80eb918bafa13638bd8dfd6c2d9 Reviewed-on: https://go-review.googlesource.com/9406 Reviewed-by: Rick Hudson <rlh@golang.org>
2015-04-27 22:45:57 -04:00
var ptrnames = []string{
0: "scalar",
1: "ptr",
}
// Stack frame layout
//
// (x86)
// +------------------+
// | args from caller |
// +------------------+ <- frame->argp
// | return address |
// +------------------+
// | caller's BP (*) | (*) if framepointer_enabled && varp < sp
// +------------------+ <- frame->varp
// | locals |
// +------------------+
// | args to callee |
// +------------------+ <- frame->sp
//
// (arm)
// +------------------+
// | args from caller |
// +------------------+ <- frame->argp
// | caller's retaddr |
// +------------------+ <- frame->varp
// | locals |
// +------------------+
// | args to callee |
// +------------------+
// | return address |
// +------------------+ <- frame->sp
type adjustinfo struct {
old stack
delta uintptr // ptr distance from old to new stack (newbase - oldbase)
runtime: add pcvalue cache to improve stack scan speed The cost of scanning large stacks is currently dominated by the time spent looking up and decoding the pcvalue table. However, large stacks are usually large not because they contain calls to many different functions, but because they contain many calls to the same, small set of recursive functions. Hence, walking large stacks tends to make the same pcvalue queries many times. Based on this observation, this commit adds a small, very simple, and fast cache in front of pcvalue lookup. We thread this cache down from operations that make many pcvalue calls, such as gentraceback, stack scanning, and stack adjusting. This simple cache works well because it has minimal overhead when it's not effective. I also tried a hashed direct-map cache, CLOCK-based replacement, round-robin replacement, and round-robin with lookups disabled until there had been at least 16 probes, but none of these approaches had obvious wins over the random replacement policy in this commit. This nearly doubles the overall performance of the deep stack test program from issue #10898: name old time/op new time/op delta Issue10898 16.5s ±12% 9.2s ±12% -44.37% (p=0.008 n=5+5) It's a very slight win on the garbage benchmark: name old time/op new time/op delta XBenchGarbage-12 4.92ms ± 1% 4.89ms ± 1% -0.75% (p=0.000 n=18+19) It's a wash (but doesn't harm performance) on the go1 benchmarks, which don't have particularly deep stacks: name old time/op new time/op delta BinaryTree17-12 3.11s ± 2% 3.20s ± 3% +2.83% (p=0.000 n=17+20) Fannkuch11-12 2.51s ± 1% 2.51s ± 1% -0.22% (p=0.034 n=19+18) FmtFprintfEmpty-12 50.8ns ± 3% 50.6ns ± 2% ~ (p=0.793 n=20+20) FmtFprintfString-12 174ns ± 0% 174ns ± 1% +0.17% (p=0.048 n=15+20) FmtFprintfInt-12 177ns ± 0% 165ns ± 1% -6.99% (p=0.000 n=17+19) FmtFprintfIntInt-12 283ns ± 1% 284ns ± 0% +0.22% (p=0.000 n=18+15) FmtFprintfPrefixedInt-12 243ns ± 1% 244ns ± 1% +0.40% (p=0.000 n=20+19) FmtFprintfFloat-12 318ns ± 0% 319ns ± 0% +0.27% (p=0.001 n=19+20) FmtManyArgs-12 1.12µs ± 0% 1.14µs ± 0% +1.74% (p=0.000 n=19+20) GobDecode-12 8.69ms ± 0% 8.73ms ± 1% +0.46% (p=0.000 n=18+18) GobEncode-12 6.64ms ± 1% 6.61ms ± 1% -0.46% (p=0.000 n=20+20) Gzip-12 323ms ± 2% 319ms ± 1% -1.11% (p=0.000 n=20+20) Gunzip-12 42.8ms ± 0% 42.9ms ± 0% ~ (p=0.158 n=18+20) HTTPClientServer-12 63.3µs ± 1% 63.1µs ± 1% -0.35% (p=0.011 n=20+20) JSONEncode-12 16.9ms ± 1% 17.3ms ± 1% +2.84% (p=0.000 n=19+20) JSONDecode-12 59.7ms ± 0% 58.5ms ± 0% -2.05% (p=0.000 n=19+17) Mandelbrot200-12 3.92ms ± 0% 3.91ms ± 0% -0.16% (p=0.003 n=19+19) GoParse-12 3.79ms ± 2% 3.75ms ± 2% -0.91% (p=0.005 n=20+20) RegexpMatchEasy0_32-12 102ns ± 1% 101ns ± 1% -0.80% (p=0.001 n=14+20) RegexpMatchEasy0_1K-12 337ns ± 1% 346ns ± 1% +2.90% (p=0.000 n=20+19) RegexpMatchEasy1_32-12 84.4ns ± 2% 84.3ns ± 2% ~ (p=0.743 n=20+20) RegexpMatchEasy1_1K-12 502ns ± 1% 505ns ± 0% +0.64% (p=0.000 n=20+20) RegexpMatchMedium_32-12 133ns ± 1% 132ns ± 1% -0.85% (p=0.000 n=20+19) RegexpMatchMedium_1K-12 40.1µs ± 1% 39.8µs ± 1% -0.77% (p=0.000 n=18+18) RegexpMatchHard_32-12 2.08µs ± 1% 2.07µs ± 1% -0.55% (p=0.001 n=18+19) RegexpMatchHard_1K-12 62.4µs ± 1% 62.0µs ± 1% -0.74% (p=0.000 n=19+19) Revcomp-12 545ms ± 2% 545ms ± 3% ~ (p=0.771 n=19+20) Template-12 73.7ms ± 1% 72.0ms ± 0% -2.33% (p=0.000 n=20+18) TimeParse-12 358ns ± 1% 351ns ± 1% -2.07% (p=0.000 n=20+20) TimeFormat-12 369ns ± 1% 356ns ± 0% -3.53% (p=0.000 n=20+18) [Geo mean] 63.5µs 63.2µs -0.41% name old speed new speed delta GobDecode-12 88.3MB/s ± 0% 87.9MB/s ± 0% -0.43% (p=0.000 n=18+17) GobEncode-12 116MB/s ± 1% 116MB/s ± 1% +0.47% (p=0.000 n=20+20) Gzip-12 60.2MB/s ± 2% 60.8MB/s ± 1% +1.13% (p=0.000 n=20+20) Gunzip-12 453MB/s ± 0% 453MB/s ± 0% ~ (p=0.160 n=18+20) JSONEncode-12 115MB/s ± 1% 112MB/s ± 1% -2.76% (p=0.000 n=19+20) JSONDecode-12 32.5MB/s ± 0% 33.2MB/s ± 0% +2.09% (p=0.000 n=19+17) GoParse-12 15.3MB/s ± 2% 15.4MB/s ± 2% +0.92% (p=0.004 n=20+20) RegexpMatchEasy0_32-12 311MB/s ± 1% 314MB/s ± 1% +0.78% (p=0.000 n=15+19) RegexpMatchEasy0_1K-12 3.04GB/s ± 1% 2.95GB/s ± 1% -2.90% (p=0.000 n=19+19) RegexpMatchEasy1_32-12 379MB/s ± 2% 380MB/s ± 2% ~ (p=0.779 n=20+20) RegexpMatchEasy1_1K-12 2.04GB/s ± 1% 2.02GB/s ± 0% -0.62% (p=0.000 n=20+20) RegexpMatchMedium_32-12 7.46MB/s ± 1% 7.53MB/s ± 1% +0.86% (p=0.000 n=20+19) RegexpMatchMedium_1K-12 25.5MB/s ± 1% 25.7MB/s ± 1% +0.78% (p=0.000 n=18+18) RegexpMatchHard_32-12 15.4MB/s ± 1% 15.5MB/s ± 1% +0.62% (p=0.000 n=19+19) RegexpMatchHard_1K-12 16.4MB/s ± 1% 16.5MB/s ± 1% +0.82% (p=0.000 n=20+19) Revcomp-12 466MB/s ± 2% 466MB/s ± 3% ~ (p=0.765 n=19+20) Template-12 26.3MB/s ± 1% 27.0MB/s ± 0% +2.38% (p=0.000 n=20+18) [Geo mean] 97.8MB/s 98.0MB/s +0.23% Change-Id: I281044ae0b24990ba46487cacbc1069493274bc4 Reviewed-on: https://go-review.googlesource.com/13614 Reviewed-by: Keith Randall <khr@golang.org>
2015-08-12 23:43:43 -04:00
cache pcvalueCache
// sghi is the highest sudog.elem on the stack.
sghi uintptr
}
// Adjustpointer checks whether *vpp is in the old stack described by adjinfo.
// If so, it rewrites *vpp to point into the new stack.
func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
pp := (*uintptr)(vpp)
p := *pp
if stackDebug >= 4 {
print(" ", pp, ":", hex(p), "\n")
}
if adjinfo.old.lo <= p && p < adjinfo.old.hi {
*pp = p + adjinfo.delta
if stackDebug >= 3 {
print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
}
}
}
// Information from the compiler about the layout of stack frames.
// Note: this type must agree with reflect.bitVector.
type bitvector struct {
n int32 // # of bits
bytedata *uint8
}
// ptrbit returns the i'th bit in bv.
// ptrbit is less efficient than iterating directly over bitvector bits,
// and should only be used in non-performance-critical code.
// See adjustpointers for an example of a high-efficiency walk of a bitvector.
func (bv *bitvector) ptrbit(i uintptr) uint8 {
b := *(addb(bv.bytedata, i/8))
return (b >> (i % 8)) & 1
}
// bv describes the memory starting at address scanp.
// Adjust any pointers contained therein.
func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) {
minp := adjinfo.old.lo
maxp := adjinfo.old.hi
delta := adjinfo.delta
num := uintptr(bv.n)
// If this frame might contain channel receive slots, use CAS
// to adjust pointers. If the slot hasn't been received into
// yet, it may contain stack pointers and a concurrent send
// could race with adjusting those pointers. (The sent value
// itself can never contain stack pointers.)
useCAS := uintptr(scanp) < adjinfo.sghi
for i := uintptr(0); i < num; i += 8 {
if stackDebug >= 4 {
for j := uintptr(0); j < 8; j++ {
print(" ", add(scanp, (i+j)*goarch.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*goarch.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
}
}
b := *(addb(bv.bytedata, i/8))
for b != 0 {
j := uintptr(sys.Ctz8(b))
b &= b - 1
pp := (*uintptr)(add(scanp, (i+j)*goarch.PtrSize))
retry:
p := *pp
if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
// Looks like a junk value in a pointer slot.
// Live analysis wrong?
getg().m.traceback = 2
print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
throw("invalid pointer found on stack")
}
if minp <= p && p < maxp {
if stackDebug >= 3 {
print("adjust ptr ", hex(p), " ", funcname(f), "\n")
}
if useCAS {
ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
goto retry
}
} else {
*pp = p + delta
}
}
}
}
}
// Note: the argument/return area is adjusted by the callee.
func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
adjinfo := (*adjustinfo)(arg)
if frame.continpc == 0 {
// Frame is dead.
return true
}
f := frame.fn
if stackDebug >= 2 {
print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
}
if f.funcID == funcID_systemstack_switch {
// A special routine at the bottom of stack of a goroutine that does a systemstack call.
// We will allow it to be copied even though we don't
// have full GC info for it (because it is written in asm).
return true
}
locals, args, objs := getStackMap(frame, &adjinfo.cache, true)
// Adjust local variables if stack frame has been allocated.
if locals.n > 0 {
size := uintptr(locals.n) * goarch.PtrSize
adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f)
}
// Adjust saved base pointer if there is one.
// TODO what about arm64 frame pointer adjustment?
if goarch.ArchFamily == goarch.AMD64 && frame.argp-frame.varp == 2*goarch.PtrSize {
if stackDebug >= 3 {
print(" saved bp\n")
}
if debugCheckBP {
// Frame pointers should always point to the next higher frame on
// the Go stack (or be nil, for the top frame on the stack).
bp := *(*uintptr)(unsafe.Pointer(frame.varp))
if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
println("runtime: found invalid frame pointer")
print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
throw("bad frame pointer")
}
}
adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
}
// Adjust arguments.
if args.n > 0 {
if stackDebug >= 3 {
print(" args\n")
}
adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{})
}
// Adjust pointers in all stack objects (whether they are live or not).
// See comments in mgcmark.go:scanframeworker.
if frame.varp != 0 {
cmd/link,runtime: remove relocations from stkobjs Use an offset from go.func.* instead. This removes the last relocation from funcdata symbols, which lets us simplify that code. size before after Δ % addr2line 3683218 3680706 -2512 -0.068% api 4951074 4944850 -6224 -0.126% asm 4744258 4757586 +13328 +0.281% buildid 2419986 2418546 -1440 -0.060% cgo 4218306 4197346 -20960 -0.497% compile 22132066 22076882 -55184 -0.249% cover 4432834 4411362 -21472 -0.484% dist 3111202 3091346 -19856 -0.638% doc 3583602 3563234 -20368 -0.568% fix 3023922 3020658 -3264 -0.108% link 6188034 6164642 -23392 -0.378% nm 3665826 3646818 -19008 -0.519% objdump 4015234 4012450 -2784 -0.069% pack 2155010 2153554 -1456 -0.068% pprof 13044178 13011522 -32656 -0.250% test2json 2402146 2383906 -18240 -0.759% trace 9765410 9736514 -28896 -0.296% vet 6681250 6655058 -26192 -0.392% total 104217556 103926980 -290576 -0.279% relocs before after Δ % addr2line 25563 25066 -497 -1.944% api 18409 17176 -1233 -6.698% asm 18903 18271 -632 -3.343% buildid 9513 9233 -280 -2.943% cgo 17103 16222 -881 -5.151% compile 64825 60421 -4404 -6.794% cover 19464 18479 -985 -5.061% dist 10798 10135 -663 -6.140% doc 13503 12735 -768 -5.688% fix 11465 10820 -645 -5.626% link 23214 21849 -1365 -5.880% nm 25480 24987 -493 -1.935% objdump 26610 26057 -553 -2.078% pack 7951 7665 -286 -3.597% pprof 63964 60761 -3203 -5.008% test2json 8735 8389 -346 -3.961% trace 39639 37180 -2459 -6.203% vet 25970 24044 -1926 -7.416% total 431108 409489 -21619 -5.015% Change-Id: I43c26196a008da6d1cb3a782eea2f428778bd569 Reviewed-on: https://go-review.googlesource.com/c/go/+/353138 Trust: Josh Bleecher Snyder <josharian@gmail.com> Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-09-27 14:27:20 -07:00
for i := range objs {
obj := &objs[i]
off := obj.off
base := frame.varp // locals base pointer
if off >= 0 {
base = frame.argp // arguments and return values base pointer
}
p := base + uintptr(off)
if p < frame.sp {
// Object hasn't been allocated in the frame yet.
// (Happens when the stack bounds check fails and
// we call into morestack.)
continue
}
cmd/compile, runtime: emit only GC data for stack objects Currently, for stack objects, the compiler emits metadata that includes the offset and type descriptor for each object. The type descriptor symbol has many fields, and it references many other symbols, e.g. field/element types, equality functions, names. Observe that what we actually need at runtime is only the GC metadata that are needed to scan the object, and the GC metadata are "leaf" symbols (which doesn't reference other symbols). Emit only the GC data instead. This avoids bringing live the type descriptor as well as things referenced by it (if it is not otherwise live). This reduces binary sizes: old new hello (println) 1187776 1133856 (-4.5%) hello (fmt) 1902448 1844416 (-3.1%) cmd/compile 22670432 22438576 (-1.0%) cmd/link 6346272 6225408 (-1.9%) No significant change in compiler speed. name old time/op new time/op delta Template 184ms ± 2% 186ms ± 5% ~ (p=0.905 n=9+10) Unicode 78.4ms ± 5% 76.3ms ± 3% -2.60% (p=0.009 n=10+10) GoTypes 1.09s ± 1% 1.08s ± 1% -0.73% (p=0.027 n=10+8) Compiler 85.6ms ± 3% 84.6ms ± 4% ~ (p=0.143 n=10+10) SSA 7.23s ± 1% 7.25s ± 1% ~ (p=0.780 n=10+9) Flate 116ms ± 5% 115ms ± 6% ~ (p=0.912 n=10+10) GoParser 201ms ± 4% 195ms ± 1% ~ (p=0.089 n=10+10) Reflect 455ms ± 1% 458ms ± 2% ~ (p=0.050 n=9+9) Tar 155ms ± 2% 155ms ± 3% ~ (p=0.436 n=10+10) XML 202ms ± 2% 200ms ± 2% ~ (p=0.053 n=10+9) Change-Id: I33a7f383d79afba1a482cac6da0cf5b7de9c0ec4 Reviewed-on: https://go-review.googlesource.com/c/go/+/313514 Trust: Cherry Zhang <cherryyz@google.com> Reviewed-by: Than McIntosh <thanm@google.com>
2021-04-24 12:41:17 -04:00
ptrdata := obj.ptrdata()
cmd/link,runtime: remove relocations from stkobjs Use an offset from go.func.* instead. This removes the last relocation from funcdata symbols, which lets us simplify that code. size before after Δ % addr2line 3683218 3680706 -2512 -0.068% api 4951074 4944850 -6224 -0.126% asm 4744258 4757586 +13328 +0.281% buildid 2419986 2418546 -1440 -0.060% cgo 4218306 4197346 -20960 -0.497% compile 22132066 22076882 -55184 -0.249% cover 4432834 4411362 -21472 -0.484% dist 3111202 3091346 -19856 -0.638% doc 3583602 3563234 -20368 -0.568% fix 3023922 3020658 -3264 -0.108% link 6188034 6164642 -23392 -0.378% nm 3665826 3646818 -19008 -0.519% objdump 4015234 4012450 -2784 -0.069% pack 2155010 2153554 -1456 -0.068% pprof 13044178 13011522 -32656 -0.250% test2json 2402146 2383906 -18240 -0.759% trace 9765410 9736514 -28896 -0.296% vet 6681250 6655058 -26192 -0.392% total 104217556 103926980 -290576 -0.279% relocs before after Δ % addr2line 25563 25066 -497 -1.944% api 18409 17176 -1233 -6.698% asm 18903 18271 -632 -3.343% buildid 9513 9233 -280 -2.943% cgo 17103 16222 -881 -5.151% compile 64825 60421 -4404 -6.794% cover 19464 18479 -985 -5.061% dist 10798 10135 -663 -6.140% doc 13503 12735 -768 -5.688% fix 11465 10820 -645 -5.626% link 23214 21849 -1365 -5.880% nm 25480 24987 -493 -1.935% objdump 26610 26057 -553 -2.078% pack 7951 7665 -286 -3.597% pprof 63964 60761 -3203 -5.008% test2json 8735 8389 -346 -3.961% trace 39639 37180 -2459 -6.203% vet 25970 24044 -1926 -7.416% total 431108 409489 -21619 -5.015% Change-Id: I43c26196a008da6d1cb3a782eea2f428778bd569 Reviewed-on: https://go-review.googlesource.com/c/go/+/353138 Trust: Josh Bleecher Snyder <josharian@gmail.com> Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-09-27 14:27:20 -07:00
gcdata := obj.gcdata()
var s *mspan
cmd/compile, runtime: emit only GC data for stack objects Currently, for stack objects, the compiler emits metadata that includes the offset and type descriptor for each object. The type descriptor symbol has many fields, and it references many other symbols, e.g. field/element types, equality functions, names. Observe that what we actually need at runtime is only the GC metadata that are needed to scan the object, and the GC metadata are "leaf" symbols (which doesn't reference other symbols). Emit only the GC data instead. This avoids bringing live the type descriptor as well as things referenced by it (if it is not otherwise live). This reduces binary sizes: old new hello (println) 1187776 1133856 (-4.5%) hello (fmt) 1902448 1844416 (-3.1%) cmd/compile 22670432 22438576 (-1.0%) cmd/link 6346272 6225408 (-1.9%) No significant change in compiler speed. name old time/op new time/op delta Template 184ms ± 2% 186ms ± 5% ~ (p=0.905 n=9+10) Unicode 78.4ms ± 5% 76.3ms ± 3% -2.60% (p=0.009 n=10+10) GoTypes 1.09s ± 1% 1.08s ± 1% -0.73% (p=0.027 n=10+8) Compiler 85.6ms ± 3% 84.6ms ± 4% ~ (p=0.143 n=10+10) SSA 7.23s ± 1% 7.25s ± 1% ~ (p=0.780 n=10+9) Flate 116ms ± 5% 115ms ± 6% ~ (p=0.912 n=10+10) GoParser 201ms ± 4% 195ms ± 1% ~ (p=0.089 n=10+10) Reflect 455ms ± 1% 458ms ± 2% ~ (p=0.050 n=9+9) Tar 155ms ± 2% 155ms ± 3% ~ (p=0.436 n=10+10) XML 202ms ± 2% 200ms ± 2% ~ (p=0.053 n=10+9) Change-Id: I33a7f383d79afba1a482cac6da0cf5b7de9c0ec4 Reviewed-on: https://go-review.googlesource.com/c/go/+/313514 Trust: Cherry Zhang <cherryyz@google.com> Reviewed-by: Than McIntosh <thanm@google.com>
2021-04-24 12:41:17 -04:00
if obj.useGCProg() {
// See comments in mgcmark.go:scanstack
cmd/compile, runtime: emit only GC data for stack objects Currently, for stack objects, the compiler emits metadata that includes the offset and type descriptor for each object. The type descriptor symbol has many fields, and it references many other symbols, e.g. field/element types, equality functions, names. Observe that what we actually need at runtime is only the GC metadata that are needed to scan the object, and the GC metadata are "leaf" symbols (which doesn't reference other symbols). Emit only the GC data instead. This avoids bringing live the type descriptor as well as things referenced by it (if it is not otherwise live). This reduces binary sizes: old new hello (println) 1187776 1133856 (-4.5%) hello (fmt) 1902448 1844416 (-3.1%) cmd/compile 22670432 22438576 (-1.0%) cmd/link 6346272 6225408 (-1.9%) No significant change in compiler speed. name old time/op new time/op delta Template 184ms ± 2% 186ms ± 5% ~ (p=0.905 n=9+10) Unicode 78.4ms ± 5% 76.3ms ± 3% -2.60% (p=0.009 n=10+10) GoTypes 1.09s ± 1% 1.08s ± 1% -0.73% (p=0.027 n=10+8) Compiler 85.6ms ± 3% 84.6ms ± 4% ~ (p=0.143 n=10+10) SSA 7.23s ± 1% 7.25s ± 1% ~ (p=0.780 n=10+9) Flate 116ms ± 5% 115ms ± 6% ~ (p=0.912 n=10+10) GoParser 201ms ± 4% 195ms ± 1% ~ (p=0.089 n=10+10) Reflect 455ms ± 1% 458ms ± 2% ~ (p=0.050 n=9+9) Tar 155ms ± 2% 155ms ± 3% ~ (p=0.436 n=10+10) XML 202ms ± 2% 200ms ± 2% ~ (p=0.053 n=10+9) Change-Id: I33a7f383d79afba1a482cac6da0cf5b7de9c0ec4 Reviewed-on: https://go-review.googlesource.com/c/go/+/313514 Trust: Cherry Zhang <cherryyz@google.com> Reviewed-by: Than McIntosh <thanm@google.com>
2021-04-24 12:41:17 -04:00
s = materializeGCProg(ptrdata, gcdata)
gcdata = (*byte)(unsafe.Pointer(s.startAddr))
}
for i := uintptr(0); i < ptrdata; i += goarch.PtrSize {
if *addb(gcdata, i/(8*goarch.PtrSize))>>(i/goarch.PtrSize&7)&1 != 0 {
adjustpointer(adjinfo, unsafe.Pointer(p+i))
}
}
if s != nil {
dematerializeGCProg(s)
}
}
}
return true
}
func adjustctxt(gp *g, adjinfo *adjustinfo) {
adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
if !framepointer_enabled {
return
}
if debugCheckBP {
bp := gp.sched.bp
if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
println("runtime: found invalid top frame pointer")
print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
throw("bad top frame pointer")
}
}
adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))
}
func adjustdefers(gp *g, adjinfo *adjustinfo) {
// Adjust pointers in the Defer structs.
// We need to do this first because we need to adjust the
// defer.link fields so we always work on the new stack.
adjustpointer(adjinfo, unsafe.Pointer(&gp._defer))
for d := gp._defer; d != nil; d = d.link {
adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
adjustpointer(adjinfo, unsafe.Pointer(&d.link))
cmd/compile, cmd/link, runtime: make defers low-cost through inline code and extra funcdata Generate inline code at defer time to save the args of defer calls to unique (autotmp) stack slots, and generate inline code at exit time to check which defer calls were made and make the associated function/method/interface calls. We remember that a particular defer statement was reached by storing in the deferBits variable (always stored on the stack). At exit time, we check the bits of the deferBits variable to determine which defer function calls to make (in reverse order). These low-cost defers are only used for functions where no defers appear in loops. In addition, we don't do these low-cost defers if there are too many defer statements or too many exits in a function (to limit code increase). When a function uses open-coded defers, we produce extra FUNCDATA_OpenCodedDeferInfo information that specifies the number of defers, and for each defer, the stack slots where the closure and associated args have been stored. The funcdata also includes the location of the deferBits variable. Therefore, for panics, we can use this funcdata to determine exactly which defers are active, and call the appropriate functions/methods/closures with the correct arguments for each active defer. In order to unwind the stack correctly after a recover(), we need to add an extra code segment to functions with open-coded defers that simply calls deferreturn() and returns. This segment is not reachable by the normal function, but is returned to by the runtime during recovery. We set the liveness information of this deferreturn() to be the same as the liveness at the first function call during the last defer exit code (so all return values and all stack slots needed by the defer calls will be live). I needed to increase the stackguard constant from 880 to 896, because of a small amount of new code in deferreturn(). The -N flag disables open-coded defers. '-d defer' prints out the kind of defer being used at each defer statement (heap-allocated, stack-allocated, or open-coded). Cost of defer statement [ go test -run NONE -bench BenchmarkDefer$ runtime ] With normal (stack-allocated) defers only: 35.4 ns/op With open-coded defers: 5.6 ns/op Cost of function call alone (remove defer keyword): 4.4 ns/op Text size increase (including funcdata) for go binary without/with open-coded defers: 0.09% The average size increase (including funcdata) for only the functions that use open-coded defers is 1.1%. The cost of a panic followed by a recover got noticeably slower, since panic processing now requires a scan of the stack for open-coded defer frames. This scan is required, even if no frames are using open-coded defers: Cost of panic and recover [ go test -run NONE -bench BenchmarkPanicRecover runtime ] Without open-coded defers: 62.0 ns/op With open-coded defers: 255 ns/op A CGO Go-to-C-to-Go benchmark got noticeably faster because of open-coded defers: CGO Go-to-C-to-Go benchmark [cd misc/cgo/test; go test -run NONE -bench BenchmarkCGoCallback ] Without open-coded defers: 443 ns/op With open-coded defers: 347 ns/op Updates #14939 (defer performance) Updates #34481 (design doc) Change-Id: I63b1a60d1ebf28126f55ee9fd7ecffe9cb23d1ff Reviewed-on: https://go-review.googlesource.com/c/go/+/202340 Reviewed-by: Austin Clements <austin@google.com>
2019-06-24 12:59:22 -07:00
adjustpointer(adjinfo, unsafe.Pointer(&d.varp))
adjustpointer(adjinfo, unsafe.Pointer(&d.fd))
}
}
func adjustpanics(gp *g, adjinfo *adjustinfo) {
// Panics are on stack and already adjusted.
// Update pointer to head of list in G.
adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
}
func adjustsudogs(gp *g, adjinfo *adjustinfo) {
// the data elements pointed to by a SudoG structure
// might be in the stack.
for s := gp.waiting; s != nil; s = s.waitlink {
adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
}
}
func fillstack(stk stack, b byte) {
for p := stk.lo; p < stk.hi; p++ {
*(*byte)(unsafe.Pointer(p)) = b
}
}
func findsghi(gp *g, stk stack) uintptr {
var sghi uintptr
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
p := uintptr(sg.elem) + uintptr(sg.c.elemsize)
if stk.lo <= p && p < stk.hi && p > sghi {
sghi = p
}
}
return sghi
}
// syncadjustsudogs adjusts gp's sudogs and copies the part of gp's
// stack they refer to while synchronizing with concurrent channel
// operations. It returns the number of bytes of stack copied.
func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
if gp.waiting == nil {
return 0
}
// Lock channels to prevent concurrent send/receive.
var lastc *hchan
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
if sg.c != lastc {
// There is a ranking cycle here between gscan bit and
// hchan locks. Normally, we only allow acquiring hchan
// locks and then getting a gscan bit. In this case, we
// already have the gscan bit. We allow acquiring hchan
// locks here as a special case, since a deadlock can't
// happen because the G involved must already be
// suspended. So, we get a special hchan lock rank here
// that is lower than gscan, but doesn't allow acquiring
// any other locks other than hchan.
lockWithRank(&sg.c.lock, lockRankHchanLeaf)
}
lastc = sg.c
}
// Adjust sudogs.
adjustsudogs(gp, adjinfo)
// Copy the part of the stack the sudogs point in to
// while holding the lock to prevent races on
// send/receive slots.
var sgsize uintptr
if adjinfo.sghi != 0 {
oldBot := adjinfo.old.hi - used
newBot := oldBot + adjinfo.delta
sgsize = adjinfo.sghi - oldBot
memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
}
// Unlock channels.
lastc = nil
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
if sg.c != lastc {
unlock(&sg.c.lock)
}
lastc = sg.c
}
return sgsize
}
// Copies gp's stack to a new stack of a different size.
// Caller must have changed gp status to Gcopystack.
runtime: make copystack/sudog synchronization more explicit When we copy a stack of a goroutine blocked in a channel operation, we have to be very careful because other goroutines may be writing to that goroutine's stack. To handle this, stack copying acquires the locks for the channels a goroutine is waiting on. One complication is that stack growth may happen while a goroutine holds these locks, in which case stack copying must *not* acquire these locks because that would self-deadlock. Currently, stack growth never acquires these locks because stack growth only happens when a goroutine is running, which means it's either not blocking on a channel or it's holding the channel locks already. Stack shrinking always acquires these locks because shrinking happens asynchronously, so the goroutine is never running, so there are either no locks or they've been released by the goroutine. However, we're about to change when stack shrinking can happen, which is going to break the current rules. Rather than find a new way to derive whether to acquire these locks or not, this CL simply adds a flag to the g struct that indicates that stack copying should acquire channel locks. This flag is set while the goroutine is blocked on a channel op. For #10958, #24543. Change-Id: Ia2ac8831b1bfda98d39bb30285e144c4f7eaf9ab Reviewed-on: https://go-review.googlesource.com/c/go/+/172982 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Michael Knyszek <mknyszek@google.com>
2019-04-03 14:00:12 -04:00
func copystack(gp *g, newsize uintptr) {
if gp.syscallsp != 0 {
throw("stack growth not allowed in system call")
}
old := gp.stack
if old.lo == 0 {
throw("nil stackbase")
}
used := old.hi - gp.sched.sp
// Add just the difference to gcController.addScannableStack.
// g0 stacks never move, so this will never account for them.
// It's also fine if we have no P, addScannableStack can deal with
// that case.
gcController.addScannableStack(getg().m.p.ptr(), int64(newsize)-int64(old.hi-old.lo))
// allocate new stack
new := stackalloc(uint32(newsize))
if stackPoisonCopy != 0 {
fillstack(new, 0xfd)
}
if stackDebug >= 1 {
print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
}
// Compute adjustment.
var adjinfo adjustinfo
adjinfo.old = old
adjinfo.delta = new.hi - old.hi
// Adjust sudogs, synchronizing with channel ops if necessary.
ncopy := used
runtime: make copystack/sudog synchronization more explicit When we copy a stack of a goroutine blocked in a channel operation, we have to be very careful because other goroutines may be writing to that goroutine's stack. To handle this, stack copying acquires the locks for the channels a goroutine is waiting on. One complication is that stack growth may happen while a goroutine holds these locks, in which case stack copying must *not* acquire these locks because that would self-deadlock. Currently, stack growth never acquires these locks because stack growth only happens when a goroutine is running, which means it's either not blocking on a channel or it's holding the channel locks already. Stack shrinking always acquires these locks because shrinking happens asynchronously, so the goroutine is never running, so there are either no locks or they've been released by the goroutine. However, we're about to change when stack shrinking can happen, which is going to break the current rules. Rather than find a new way to derive whether to acquire these locks or not, this CL simply adds a flag to the g struct that indicates that stack copying should acquire channel locks. This flag is set while the goroutine is blocked on a channel op. For #10958, #24543. Change-Id: Ia2ac8831b1bfda98d39bb30285e144c4f7eaf9ab Reviewed-on: https://go-review.googlesource.com/c/go/+/172982 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Michael Knyszek <mknyszek@google.com>
2019-04-03 14:00:12 -04:00
if !gp.activeStackChans {
if newsize < old.hi-old.lo && gp.parkingOnChan.Load() {
runtime: disable stack shrinking in activeStackChans race window Currently activeStackChans is set before a goroutine blocks on a channel operation in an unlockf passed to gopark. The trouble is that the unlockf is called *after* the G's status is changed, and the G's status is what is used by a concurrent mark worker (calling suspendG) to determine that a G has successfully been suspended. In this window between the status change and unlockf, the mark worker could try to shrink the G's stack, and in particular observe that activeStackChans is false. This observation will cause the mark worker to *not* synchronize with concurrent channel operations when it should, and so updating pointers in the sudog for the blocked goroutine (which may point to the goroutine's stack) races with channel operations which may also manipulate the pointer (read it, dereference it, update it, etc.). Fix the problem by adding a new atomically-updated flag to the g struct called parkingOnChan, which is non-zero in the race window above. Then, in isShrinkStackSafe, check if parkingOnChan is zero. The race is resolved like so: * Blocking G sets parkingOnChan, then changes status in gopark. * Mark worker successfully suspends blocking G. * If the mark worker observes parkingOnChan is non-zero when checking isShrinkStackSafe, then it's not safe to shrink (we're in the race window). * If the mark worker observes parkingOnChan as zero, then because the mark worker observed the G status change, it can be sure that gopark's unlockf completed, and gp.activeStackChans will be correct. The risk of this change is low, since although it reduces the number of places that stack shrinking is allowed, the window here is incredibly small. Essentially, every place that it might crash now is replaced with no shrink. This change adds a test, but the race window is so small that it's hard to trigger without a well-placed sleep in park_m. Also, this change fixes stackGrowRecursive in proc_test.go to actually allocate a 128-byte stack frame. It turns out the compiler was destructuring the "pad" field and only allocating one uint64 on the stack. Fixes #40641. Change-Id: I7dfbe7d460f6972b8956116b137bc13bc24464e8 Reviewed-on: https://go-review.googlesource.com/c/go/+/247050 Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Michael Pratt <mpratt@google.com> Trust: Michael Knyszek <mknyszek@google.com>
2020-08-10 20:02:22 +00:00
// It's not safe for someone to shrink this stack while we're actively
// parking on a channel, but it is safe to grow since we do that
// ourselves and explicitly don't want to synchronize with channels
// since we could self-deadlock.
throw("racy sudog adjustment due to parking on channel")
}
adjustsudogs(gp, &adjinfo)
} else {
runtime: make copystack/sudog synchronization more explicit When we copy a stack of a goroutine blocked in a channel operation, we have to be very careful because other goroutines may be writing to that goroutine's stack. To handle this, stack copying acquires the locks for the channels a goroutine is waiting on. One complication is that stack growth may happen while a goroutine holds these locks, in which case stack copying must *not* acquire these locks because that would self-deadlock. Currently, stack growth never acquires these locks because stack growth only happens when a goroutine is running, which means it's either not blocking on a channel or it's holding the channel locks already. Stack shrinking always acquires these locks because shrinking happens asynchronously, so the goroutine is never running, so there are either no locks or they've been released by the goroutine. However, we're about to change when stack shrinking can happen, which is going to break the current rules. Rather than find a new way to derive whether to acquire these locks or not, this CL simply adds a flag to the g struct that indicates that stack copying should acquire channel locks. This flag is set while the goroutine is blocked on a channel op. For #10958, #24543. Change-Id: Ia2ac8831b1bfda98d39bb30285e144c4f7eaf9ab Reviewed-on: https://go-review.googlesource.com/c/go/+/172982 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Michael Knyszek <mknyszek@google.com>
2019-04-03 14:00:12 -04:00
// sudogs may be pointing in to the stack and gp has
// released channel locks, so other goroutines could
// be writing to gp's stack. Find the highest such
// pointer so we can handle everything there and below
// carefully. (This shouldn't be far from the bottom
// of the stack, so there's little cost in handling
// everything below it carefully.)
adjinfo.sghi = findsghi(gp, old)
// Synchronize with channel ops and copy the part of
// the stack they may interact with.
ncopy -= syncadjustsudogs(gp, used, &adjinfo)
}
// Copy the stack (or the rest of it) to the new location
memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
// Adjust remaining structures that have pointers into stacks.
// We have to do most of these before we traceback the new
// stack because gentraceback uses them.
adjustctxt(gp, &adjinfo)
adjustdefers(gp, &adjinfo)
adjustpanics(gp, &adjinfo)
if adjinfo.sghi != 0 {
adjinfo.sghi += adjinfo.delta
}
// Swap out old stack for new one
gp.stack = new
gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
gp.sched.sp = new.hi - used
gp.stktopsp += adjinfo.delta
// Adjust pointers in the new stack.
gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
// free old stack
if stackPoisonCopy != 0 {
fillstack(old, 0xfc)
}
stackfree(old)
}
// round x up to a power of 2.
func round2(x int32) int32 {
s := uint(0)
for 1<<s < x {
s++
}
return 1 << s
}
// Called from runtime·morestack when more stack is needed.
// Allocate larger stack and relocate to new stack.
// Stack growth is multiplicative, for constant amortized cost.
//
// g->atomicstatus will be Grunning or Gscanrunning upon entry.
// If the scheduler is trying to stop this g, then it will set preemptStop.
//
runtime: remove write barriers from newstack, gogo Currently, newstack and gogo have write barriers for maintaining the context register saved in g.sched.ctxt. This is troublesome, because newstack can be called from go:nowritebarrierrec places that can't allow write barriers. It happens to be benign because g.sched.ctxt will always be nil on entry to newstack *and* it so happens the incoming ctxt will also always be nil in these contexts (I think/hope), but this is playing with fire. It's also desirable to mark newstack go:nowritebarrierrec to prevent any other, non-benign write barriers from creeping in, but we can't do that right now because of this one write barrier. Fix all of this by observing that g.sched.ctxt is really just a saved live pointer register. Hence, we can shade it when we scan g's stack and otherwise move it back and forth between the actual context register and g.sched.ctxt without write barriers. This means we can save it in morestack along with all of the other g.sched, eliminate the save from newstack along with its troublesome write barrier, and eliminate the shenanigans in gogo to invoke the write barrier when restoring it. Once we've done all of this, we can mark newstack go:nowritebarrierrec. Fixes #22385. For #22460. Change-Id: I43c24958e3f6785b53c1350e1e83c2844e0d1522 Reviewed-on: https://go-review.googlesource.com/72553 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Rick Hudson <rlh@golang.org> Reviewed-by: Cherry Zhang <cherryyz@google.com>
2017-10-22 21:37:05 -04:00
// This must be nowritebarrierrec because it can be called as part of
// stack growth from other nowritebarrierrec functions, but the
// compiler doesn't check this.
//
//go:nowritebarrierrec
func newstack() {
thisg := getg()
// TODO: double check all gp. shouldn't be getg().
if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
throw("stack growth after fork")
}
if thisg.m.morebuf.g.ptr() != thisg.m.curg {
print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
morebuf := thisg.m.morebuf
traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
throw("runtime: wrong goroutine in newstack")
}
gp := thisg.m.curg
if thisg.m.curg.throwsplit {
// Update syscallsp, syscallpc in case traceback uses them.
morebuf := thisg.m.morebuf
gp.syscallsp = morebuf.sp
gp.syscallpc = morebuf.pc
pcname, pcoff := "(unknown)", uintptr(0)
f := findfunc(gp.sched.pc)
if f.valid() {
pcname = funcname(f)
pcoff = gp.sched.pc - f.entry()
}
print("runtime: newstack at ", pcname, "+", hex(pcoff),
" sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
thisg.m.traceback = 2 // Include runtime frames
traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
throw("runtime: stack split at bad time")
}
morebuf := thisg.m.morebuf
thisg.m.morebuf.pc = 0
thisg.m.morebuf.lr = 0
thisg.m.morebuf.sp = 0
thisg.m.morebuf.g = 0
// NOTE: stackguard0 may change underfoot, if another thread
// is about to try to preempt gp. Read it just once and use that same
// value now and below.
stackguard0 := atomic.Loaduintptr(&gp.stackguard0)
// Be conservative about where we preempt.
// We are interested in preempting user Go code, not runtime code.
// If we're holding locks, mallocing, or preemption is disabled, don't
// preempt.
// This check is very early in newstack so that even the status change
// from Grunning to Gwaiting and back doesn't happen in this case.
// That status change by itself can be viewed as a small preemption,
// because the GC might change Gwaiting to Gscanwaiting, and then
// this goroutine has to wait for the GC to finish before continuing.
// If the GC is in some way dependent on this goroutine (for example,
// it needs a lock held by the goroutine), that small preemption turns
// into a real deadlock.
preempt := stackguard0 == stackPreempt
if preempt {
if !canPreemptM(thisg.m) {
// Let the goroutine keep running for now.
// gp->preempt is set, so it will be preempted next time.
gp.stackguard0 = gp.stack.lo + _StackGuard
gogo(&gp.sched) // never return
}
}
if gp.stack.lo == 0 {
throw("missing stack in newstack")
}
sp := gp.sched.sp
if goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.I386 || goarch.ArchFamily == goarch.WASM {
// The call to morestack cost a word.
sp -= goarch.PtrSize
}
if stackDebug >= 1 || sp < gp.stack.lo {
print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
}
if sp < gp.stack.lo {
print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->status=", hex(readgstatus(gp)), "\n ")
print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
throw("runtime: split stack overflow")
}
if preempt {
if gp == thisg.m.g0 {
throw("runtime: preempt g0")
}
if thisg.m.p == 0 && thisg.m.locks == 0 {
throw("runtime: g is running but p is not")
}
2019-09-27 12:27:51 -04:00
if gp.preemptShrink {
// We're at a synchronous safe point now, so
// do the pending stack shrink.
gp.preemptShrink = false
shrinkstack(gp)
}
2019-09-27 12:27:51 -04:00
if gp.preemptStop {
preemptPark(gp) // never returns
}
// Act like goroutine called runtime.Gosched.
gopreempt_m(gp) // never return
}
// Allocate a bigger segment and move the stack.
oldsize := gp.stack.hi - gp.stack.lo
newsize := oldsize * 2
// Make sure we grow at least as much as needed to fit the new frame.
// (This is just an optimization - the caller of morestack will
// recheck the bounds on return.)
if f := findfunc(gp.sched.pc); f.valid() {
max := uintptr(funcMaxSPDelta(f))
needed := max + _StackGuard
used := gp.stack.hi - gp.sched.sp
for newsize-used < needed {
newsize *= 2
}
}
if stackguard0 == stackForceMove {
// Forced stack movement used for debugging.
// Don't double the stack (or we may quickly run out
// if this is done repeatedly).
newsize = oldsize
}
if newsize > maxstacksize || newsize > maxstackceiling {
if maxstacksize < maxstackceiling {
print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
} else {
print("runtime: goroutine stack exceeds ", maxstackceiling, "-byte limit\n")
}
print("runtime: sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
throw("stack overflow")
}
// The goroutine must be executing in order to call newstack,
// so it must be Grunning (or Gscanrunning).
casgstatus(gp, _Grunning, _Gcopystack)
// The concurrent GC will not scan the stack while we are doing the copy since
// the gp is in a Gcopystack status.
runtime: make copystack/sudog synchronization more explicit When we copy a stack of a goroutine blocked in a channel operation, we have to be very careful because other goroutines may be writing to that goroutine's stack. To handle this, stack copying acquires the locks for the channels a goroutine is waiting on. One complication is that stack growth may happen while a goroutine holds these locks, in which case stack copying must *not* acquire these locks because that would self-deadlock. Currently, stack growth never acquires these locks because stack growth only happens when a goroutine is running, which means it's either not blocking on a channel or it's holding the channel locks already. Stack shrinking always acquires these locks because shrinking happens asynchronously, so the goroutine is never running, so there are either no locks or they've been released by the goroutine. However, we're about to change when stack shrinking can happen, which is going to break the current rules. Rather than find a new way to derive whether to acquire these locks or not, this CL simply adds a flag to the g struct that indicates that stack copying should acquire channel locks. This flag is set while the goroutine is blocked on a channel op. For #10958, #24543. Change-Id: Ia2ac8831b1bfda98d39bb30285e144c4f7eaf9ab Reviewed-on: https://go-review.googlesource.com/c/go/+/172982 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Michael Knyszek <mknyszek@google.com>
2019-04-03 14:00:12 -04:00
copystack(gp, newsize)
if stackDebug >= 1 {
print("stack grow done\n")
}
casgstatus(gp, _Gcopystack, _Grunning)
gogo(&gp.sched)
}
//go:nosplit
func nilfunc() {
*(*uint8)(nil) = 0
}
// adjust Gobuf as if it executed a call to fn
// and then stopped before the first instruction in fn.
func gostartcallfn(gobuf *gobuf, fv *funcval) {
var fn unsafe.Pointer
if fv != nil {
fn = unsafe.Pointer(fv.fn)
} else {
fn = unsafe.Pointer(abi.FuncPCABIInternal(nilfunc))
}
gostartcall(gobuf, fn, unsafe.Pointer(fv))
}
// isShrinkStackSafe returns whether it's safe to attempt to shrink
// gp's stack. Shrinking the stack is only safe when we have precise
// pointer maps for all frames on the stack.
func isShrinkStackSafe(gp *g) bool {
// We can't copy the stack if we're in a syscall.
// The syscall might have pointers into the stack and
// often we don't have precise pointer maps for the innermost
// frames.
//
// We also can't copy the stack if we're at an asynchronous
// safe-point because we don't have precise pointer maps for
// all frames.
runtime: disable stack shrinking in activeStackChans race window Currently activeStackChans is set before a goroutine blocks on a channel operation in an unlockf passed to gopark. The trouble is that the unlockf is called *after* the G's status is changed, and the G's status is what is used by a concurrent mark worker (calling suspendG) to determine that a G has successfully been suspended. In this window between the status change and unlockf, the mark worker could try to shrink the G's stack, and in particular observe that activeStackChans is false. This observation will cause the mark worker to *not* synchronize with concurrent channel operations when it should, and so updating pointers in the sudog for the blocked goroutine (which may point to the goroutine's stack) races with channel operations which may also manipulate the pointer (read it, dereference it, update it, etc.). Fix the problem by adding a new atomically-updated flag to the g struct called parkingOnChan, which is non-zero in the race window above. Then, in isShrinkStackSafe, check if parkingOnChan is zero. The race is resolved like so: * Blocking G sets parkingOnChan, then changes status in gopark. * Mark worker successfully suspends blocking G. * If the mark worker observes parkingOnChan is non-zero when checking isShrinkStackSafe, then it's not safe to shrink (we're in the race window). * If the mark worker observes parkingOnChan as zero, then because the mark worker observed the G status change, it can be sure that gopark's unlockf completed, and gp.activeStackChans will be correct. The risk of this change is low, since although it reduces the number of places that stack shrinking is allowed, the window here is incredibly small. Essentially, every place that it might crash now is replaced with no shrink. This change adds a test, but the race window is so small that it's hard to trigger without a well-placed sleep in park_m. Also, this change fixes stackGrowRecursive in proc_test.go to actually allocate a 128-byte stack frame. It turns out the compiler was destructuring the "pad" field and only allocating one uint64 on the stack. Fixes #40641. Change-Id: I7dfbe7d460f6972b8956116b137bc13bc24464e8 Reviewed-on: https://go-review.googlesource.com/c/go/+/247050 Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Michael Pratt <mpratt@google.com> Trust: Michael Knyszek <mknyszek@google.com>
2020-08-10 20:02:22 +00:00
//
// We also can't *shrink* the stack in the window between the
// goroutine calling gopark to park on a channel and
// gp.activeStackChans being set.
return gp.syscallsp == 0 && !gp.asyncSafePoint && !gp.parkingOnChan.Load()
}
// Maybe shrink the stack being used by gp.
//
// gp must be stopped and we must own its stack. It may be in
// _Grunning, but only if this is our own user G.
func shrinkstack(gp *g) {
if gp.stack.lo == 0 {
throw("missing stack in shrinkstack")
}
if s := readgstatus(gp); s&_Gscan == 0 {
// We don't own the stack via _Gscan. We could still
// own it if this is our own user G and we're on the
// system stack.
if !(gp == getg().m.curg && getg() != getg().m.curg && s == _Grunning) {
// We don't own the stack.
throw("bad status in shrinkstack")
}
}
if !isShrinkStackSafe(gp) {
throw("shrinkstack at bad time")
runtime: shrink stacks during concurrent mark Currently we shrink stacks during STW mark termination because it used to be unsafe to shrink them concurrently. For some programs, this significantly increases pause time: stack shrinking costs ~5ms/MB copied plus 2µs/shrink. Now that we've made it safe to shrink a stack without the world being stopped, shrink them during the concurrent mark phase. This reduces the STW time in the program from issue #12967 by an order of magnitude and brings it from over the 10ms goal to well under: name old 95%ile-markTerm-time new 95%ile-markTerm-time delta Stackshrink-4 23.8ms ±60% 1.80ms ±39% -92.44% (p=0.008 n=5+5) Fixes #12967. This slows down the go1 and garbage benchmarks overall by < 0.5%. name old time/op new time/op delta XBenchGarbage-12 2.48ms ± 1% 2.49ms ± 1% +0.45% (p=0.005 n=25+21) name old time/op new time/op delta BinaryTree17-12 2.93s ± 2% 2.97s ± 2% +1.34% (p=0.002 n=19+20) Fannkuch11-12 2.51s ± 1% 2.59s ± 0% +3.09% (p=0.000 n=18+18) FmtFprintfEmpty-12 51.1ns ± 2% 51.5ns ± 1% ~ (p=0.280 n=20+17) FmtFprintfString-12 175ns ± 1% 169ns ± 1% -3.01% (p=0.000 n=20+20) FmtFprintfInt-12 160ns ± 1% 160ns ± 0% +0.53% (p=0.000 n=20+20) FmtFprintfIntInt-12 265ns ± 0% 266ns ± 1% +0.59% (p=0.000 n=20+20) FmtFprintfPrefixedInt-12 237ns ± 1% 238ns ± 1% +0.44% (p=0.000 n=20+20) FmtFprintfFloat-12 326ns ± 1% 341ns ± 1% +4.55% (p=0.000 n=20+19) FmtManyArgs-12 1.01µs ± 0% 1.02µs ± 0% +0.43% (p=0.000 n=20+19) GobDecode-12 8.41ms ± 1% 8.30ms ± 2% -1.22% (p=0.000 n=20+19) GobEncode-12 6.66ms ± 1% 6.68ms ± 0% +0.30% (p=0.000 n=18+19) Gzip-12 322ms ± 1% 322ms ± 1% ~ (p=1.000 n=20+20) Gunzip-12 42.8ms ± 0% 42.9ms ± 0% ~ (p=0.174 n=20+20) HTTPClientServer-12 69.7µs ± 1% 70.6µs ± 1% +1.20% (p=0.000 n=20+20) JSONEncode-12 16.8ms ± 0% 16.8ms ± 1% ~ (p=0.154 n=19+19) JSONDecode-12 65.1ms ± 0% 65.3ms ± 1% +0.34% (p=0.003 n=20+20) Mandelbrot200-12 3.93ms ± 0% 3.92ms ± 0% ~ (p=0.396 n=19+20) GoParse-12 3.66ms ± 1% 3.65ms ± 1% ~ (p=0.117 n=16+18) RegexpMatchEasy0_32-12 85.0ns ± 2% 85.5ns ± 2% ~ (p=0.143 n=20+20) RegexpMatchEasy0_1K-12 267ns ± 1% 267ns ± 1% ~ (p=0.867 n=20+17) RegexpMatchEasy1_32-12 83.3ns ± 2% 83.8ns ± 1% ~ (p=0.068 n=20+20) RegexpMatchEasy1_1K-12 432ns ± 1% 432ns ± 1% ~ (p=0.804 n=20+19) RegexpMatchMedium_32-12 133ns ± 0% 133ns ± 0% ~ (p=1.000 n=20+20) RegexpMatchMedium_1K-12 40.3µs ± 1% 40.4µs ± 1% ~ (p=0.319 n=20+19) RegexpMatchHard_32-12 2.10µs ± 1% 2.10µs ± 1% ~ (p=0.723 n=20+18) RegexpMatchHard_1K-12 63.0µs ± 0% 63.0µs ± 0% ~ (p=0.158 n=19+17) Revcomp-12 461ms ± 1% 476ms ± 8% +3.29% (p=0.002 n=20+20) Template-12 80.1ms ± 1% 79.3ms ± 1% -1.00% (p=0.000 n=20+20) TimeParse-12 360ns ± 0% 360ns ± 0% ~ (p=0.802 n=18+19) TimeFormat-12 374ns ± 1% 372ns ± 0% -0.77% (p=0.000 n=20+19) [Geo mean] 61.8µs 62.0µs +0.40% Change-Id: Ib60cd46b7a4987e07670eb271d22f6cee5802842 Reviewed-on: https://go-review.googlesource.com/20044 Reviewed-by: Keith Randall <khr@golang.org>
2016-02-15 18:30:48 -05:00
}
runtime: make m.libcallsp check in shrinkstack panic Currently, shrinkstack will not shrink a stack on Windows if gp.m.libcallsp != 0. In general, we can't shrink stacks in syscalls because the syscall may hold pointers into the stack, and in principle this is supposed to be preventing that for libcall-based syscalls (which are direct syscalls from the runtime). But this test is actually broken and has been for a long time. That turns out to be okay because it also appears it's not necessary. This test is racy. g.m points to whatever M the G was last running on, even if the G is in a blocked state, and that M could be doing anything, including making libcalls. Hence, observing that libcallsp == 0 at one moment in shrinkstack is no guarantee that it won't become non-zero while we're shrinking the stack, and vice-versa. It's also weird that this check is only performed on Windows, given that we now use libcalls on macOS, Solaris, and AIX. This check was added when stack shrinking was first implemented in CL 69580044. The history of that CL (though not the final version) suggests this was necessary for libcalls that happened on Go user stacks, which we never do now because of the limited stack space. It could also be defending against user stack pointers passed to libcall system calls from blocked Gs. But the runtime isn't allowed to keep pointers into the user stack for blocked Gs on any OS, so it's not clear this would be of any value. Hence, this checks seems to be simply unnecessary. Rather than simply remove it, this CL makes it defensive. We can't do anything about blocked Gs, since it doesn't even make sense to look at their M, but if a G tries to shrink its own stack while in a libcall, that indicates a bug in the libcall code. This CL makes shrinkstack panic in this case. For #10958, #24543, since those are going to rearrange how we decide that it's safe to shrink a stack. Change-Id: Ia865e1f6340cff26637f8d513970f9ebb4735c6d Reviewed-on: https://go-review.googlesource.com/c/go/+/173724 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Cherry Zhang <cherryyz@google.com>
2019-04-24 11:53:34 -04:00
// Check for self-shrinks while in a libcall. These may have
// pointers into the stack disguised as uintptrs, but these
// code paths should all be nosplit.
if gp == getg().m.curg && gp.m.libcallsp != 0 {
throw("shrinking stack in libcall")
}
if debug.gcshrinkstackoff > 0 {
return
}
f := findfunc(gp.startpc)
if f.valid() && f.funcID == funcID_gcBgMarkWorker {
runtime: scan mark worker stacks like normal Currently, markroot delays scanning mark worker stacks until mark termination by putting the mark worker G directly on the rescan list when it encounters one during the mark phase. Without this, since mark workers are non-preemptible, two mark workers that attempt to scan each other's stacks can deadlock. However, this is annoyingly asymmetric and causes some real problems. First, markroot does not own the G at that point, so it's not technically safe to add it to the rescan list. I haven't been able to find a specific problem this could cause, but I suspect it's the root cause of issue #17099. Second, this will interfere with the hybrid barrier, since there is no stack rescanning during mark termination with the hybrid barrier. This commit switches to a different approach. We move the mark worker's call to gcDrain to the system stack and set the mark worker's status to _Gwaiting for the duration of the drain to indicate that it's preemptible. This lets another mark worker scan its G stack while the drain is running on the system stack. We don't return to the G stack until we can switch back to _Grunning, which ensures we don't race with a stack scan. This lets us eliminate the special case for mark worker stack scans and scan them just like any other goroutine. The only subtlety to this approach is that we have to disable stack shrinking for mark workers; they could be referring to captured variables from the G stack, so it's not safe to move their stacks. Updates #17099 and #17503. Change-Id: Ia5213949ec470af63e24dfce01df357c12adbbea Reviewed-on: https://go-review.googlesource.com/31820 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Rick Hudson <rlh@golang.org>
2016-10-24 14:20:07 -04:00
// We're not allowed to shrink the gcBgMarkWorker
// stack (see gcBgMarkWorker for explanation).
return
}
oldsize := gp.stack.hi - gp.stack.lo
newsize := oldsize / 2
runtime: account for stack guard when shrinking the stack Currently, when shrinkstack computes whether the halved stack allocation will have enough room for the stack, it accounts for the stack space that's actively in use but fails to leave extra room for the stack guard space. As a result, *if* the minimum stack size is small enough or the guard large enough, it may shrink the stack and leave less than enough room to run nosplit functions. If the next function called after the stack shrink is a nosplit function, it may overflow the stack without noticing and overwrite non-stack memory. We don't think this is happening under normal conditions right now. The minimum stack allocation is 2K and the guard is 640 bytes. The "worst case" stack shrink is from 4K (4048 bytes after stack barrier array reservation) to 2K (2016 bytes after stack barrier array reservation), which means the largest "used" size that will qualify for shrinking is 4048/4 - 8 = 1004 bytes. After copying, that leaves 2016 - 1004 = 1012 bytes of available stack, which is significantly more than the guard space. If we were to reduce the minimum stack size to 1K or raise the guard space above 1012 bytes, the logic in shrinkstack would no longer leave enough space. It's also possible to trigger this problem by setting firstStackBarrierOffset to 0, which puts stack barriers in a debug mode that steals away *half* of the stack for the stack barrier array reservation. Then, the largest "used" size that qualifies for shrinking is (4096/2)/4 - 8 = 504 bytes. After copying, that leaves (2096/2) - 504 = 8 bytes of available stack; much less than the required guard space. This causes failures like those in issue #11027 because func gc() shrinks its own stack and then immediately calls casgstatus (a nosplit function), which overflows the stack and overwrites a free list pointer in the neighboring span. However, since this seems to require the special debug mode, we don't think it's responsible for issue #11027. To forestall all of these subtle issues, this commit modifies shrinkstack to correctly account for the guard space when considering whether to halve the stack allocation. Change-Id: I7312584addc63b5bfe55cc384a1012f6181f1b9d Reviewed-on: https://go-review.googlesource.com/10714 Reviewed-by: Keith Randall <khr@golang.org> Reviewed-by: Russ Cox <rsc@golang.org>
2015-06-04 17:28:02 -04:00
// Don't shrink the allocation below the minimum-sized stack
// allocation.
if newsize < _FixedStack {
runtime: account for stack guard when shrinking the stack Currently, when shrinkstack computes whether the halved stack allocation will have enough room for the stack, it accounts for the stack space that's actively in use but fails to leave extra room for the stack guard space. As a result, *if* the minimum stack size is small enough or the guard large enough, it may shrink the stack and leave less than enough room to run nosplit functions. If the next function called after the stack shrink is a nosplit function, it may overflow the stack without noticing and overwrite non-stack memory. We don't think this is happening under normal conditions right now. The minimum stack allocation is 2K and the guard is 640 bytes. The "worst case" stack shrink is from 4K (4048 bytes after stack barrier array reservation) to 2K (2016 bytes after stack barrier array reservation), which means the largest "used" size that will qualify for shrinking is 4048/4 - 8 = 1004 bytes. After copying, that leaves 2016 - 1004 = 1012 bytes of available stack, which is significantly more than the guard space. If we were to reduce the minimum stack size to 1K or raise the guard space above 1012 bytes, the logic in shrinkstack would no longer leave enough space. It's also possible to trigger this problem by setting firstStackBarrierOffset to 0, which puts stack barriers in a debug mode that steals away *half* of the stack for the stack barrier array reservation. Then, the largest "used" size that qualifies for shrinking is (4096/2)/4 - 8 = 504 bytes. After copying, that leaves (2096/2) - 504 = 8 bytes of available stack; much less than the required guard space. This causes failures like those in issue #11027 because func gc() shrinks its own stack and then immediately calls casgstatus (a nosplit function), which overflows the stack and overwrites a free list pointer in the neighboring span. However, since this seems to require the special debug mode, we don't think it's responsible for issue #11027. To forestall all of these subtle issues, this commit modifies shrinkstack to correctly account for the guard space when considering whether to halve the stack allocation. Change-Id: I7312584addc63b5bfe55cc384a1012f6181f1b9d Reviewed-on: https://go-review.googlesource.com/10714 Reviewed-by: Keith Randall <khr@golang.org> Reviewed-by: Russ Cox <rsc@golang.org>
2015-06-04 17:28:02 -04:00
return
}
runtime: account for stack guard when shrinking the stack Currently, when shrinkstack computes whether the halved stack allocation will have enough room for the stack, it accounts for the stack space that's actively in use but fails to leave extra room for the stack guard space. As a result, *if* the minimum stack size is small enough or the guard large enough, it may shrink the stack and leave less than enough room to run nosplit functions. If the next function called after the stack shrink is a nosplit function, it may overflow the stack without noticing and overwrite non-stack memory. We don't think this is happening under normal conditions right now. The minimum stack allocation is 2K and the guard is 640 bytes. The "worst case" stack shrink is from 4K (4048 bytes after stack barrier array reservation) to 2K (2016 bytes after stack barrier array reservation), which means the largest "used" size that will qualify for shrinking is 4048/4 - 8 = 1004 bytes. After copying, that leaves 2016 - 1004 = 1012 bytes of available stack, which is significantly more than the guard space. If we were to reduce the minimum stack size to 1K or raise the guard space above 1012 bytes, the logic in shrinkstack would no longer leave enough space. It's also possible to trigger this problem by setting firstStackBarrierOffset to 0, which puts stack barriers in a debug mode that steals away *half* of the stack for the stack barrier array reservation. Then, the largest "used" size that qualifies for shrinking is (4096/2)/4 - 8 = 504 bytes. After copying, that leaves (2096/2) - 504 = 8 bytes of available stack; much less than the required guard space. This causes failures like those in issue #11027 because func gc() shrinks its own stack and then immediately calls casgstatus (a nosplit function), which overflows the stack and overwrites a free list pointer in the neighboring span. However, since this seems to require the special debug mode, we don't think it's responsible for issue #11027. To forestall all of these subtle issues, this commit modifies shrinkstack to correctly account for the guard space when considering whether to halve the stack allocation. Change-Id: I7312584addc63b5bfe55cc384a1012f6181f1b9d Reviewed-on: https://go-review.googlesource.com/10714 Reviewed-by: Keith Randall <khr@golang.org> Reviewed-by: Russ Cox <rsc@golang.org>
2015-06-04 17:28:02 -04:00
// Compute how much of the stack is currently in use and only
// shrink the stack if gp is using less than a quarter of its
// current stack. The currently used stack includes everything
// down to the SP plus the stack guard space that ensures
// there's room for nosplit functions.
avail := gp.stack.hi - gp.stack.lo
if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 {
return
}
if stackDebug > 0 {
print("shrinking stack ", oldsize, "->", newsize, "\n")
}
runtime: make copystack/sudog synchronization more explicit When we copy a stack of a goroutine blocked in a channel operation, we have to be very careful because other goroutines may be writing to that goroutine's stack. To handle this, stack copying acquires the locks for the channels a goroutine is waiting on. One complication is that stack growth may happen while a goroutine holds these locks, in which case stack copying must *not* acquire these locks because that would self-deadlock. Currently, stack growth never acquires these locks because stack growth only happens when a goroutine is running, which means it's either not blocking on a channel or it's holding the channel locks already. Stack shrinking always acquires these locks because shrinking happens asynchronously, so the goroutine is never running, so there are either no locks or they've been released by the goroutine. However, we're about to change when stack shrinking can happen, which is going to break the current rules. Rather than find a new way to derive whether to acquire these locks or not, this CL simply adds a flag to the g struct that indicates that stack copying should acquire channel locks. This flag is set while the goroutine is blocked on a channel op. For #10958, #24543. Change-Id: Ia2ac8831b1bfda98d39bb30285e144c4f7eaf9ab Reviewed-on: https://go-review.googlesource.com/c/go/+/172982 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Michael Knyszek <mknyszek@google.com>
2019-04-03 14:00:12 -04:00
copystack(gp, newsize)
}
runtime: don't free stack spans during GC Memory for stacks is manually managed by the runtime and, currently (with one exception) we free stack spans immediately when the last stack on a span is freed. However, the garbage collector assumes that spans can never transition from non-free to free during scan or mark. This disagreement makes it possible for the garbage collector to mark uninitialized objects and is blocking us from re-enabling the bad pointer test in the garbage collector (issue #9880). For example, the following sequence will result in marking an uninitialized object: 1. scanobject loads a pointer slot out of the object it's scanning. This happens to be one of the special pointers from the heap into a stack. Call the pointer p and suppose it points into X's stack. 2. X, running on another thread, grows its stack and frees its old stack. 3. The old stack happens to be large or was the last stack in its span, so X frees this span, setting it to state _MSpanFree. 4. The span gets reused as a heap span. 5. scanobject calls heapBitsForObject, which loads the span containing p, which is now in state _MSpanInUse, but doesn't necessarily have an object at p. The not-object at p gets marked, and at this point all sorts of things can go wrong. We already have a partial solution to this. When shrinking a stack, we put the old stack on a queue to be freed at the end of garbage collection. This was done to address exactly this problem, but wasn't a complete solution. This commit generalizes this solution to both shrinking and growing stacks. For stacks that fit in the stack pool, we simply don't free the span, even if its reference count reaches zero. It's fine to reuse the span for other stacks, and this enables that. At the end of GC, we sweep for cached stack spans with a zero reference count and free them. For larger stacks, we simply queue the stack span to be freed at the end of GC. Ideally, we would reuse these large stack spans the way we can small stack spans, but that's a more invasive change that will have to wait until after the freeze. Fixes #11267. Change-Id: Ib7f2c5da4845cc0268e8dc098b08465116972a71 Reviewed-on: https://go-review.googlesource.com/11502 Reviewed-by: Russ Cox <rsc@golang.org>
2015-06-22 10:24:50 -04:00
// freeStackSpans frees unused stack spans at the end of GC.
func freeStackSpans() {
// Scan stack pools for empty stack spans.
for order := range stackpool {
lock(&stackpool[order].item.mu)
list := &stackpool[order].item.span
for s := list.first; s != nil; {
runtime: don't free stack spans during GC Memory for stacks is manually managed by the runtime and, currently (with one exception) we free stack spans immediately when the last stack on a span is freed. However, the garbage collector assumes that spans can never transition from non-free to free during scan or mark. This disagreement makes it possible for the garbage collector to mark uninitialized objects and is blocking us from re-enabling the bad pointer test in the garbage collector (issue #9880). For example, the following sequence will result in marking an uninitialized object: 1. scanobject loads a pointer slot out of the object it's scanning. This happens to be one of the special pointers from the heap into a stack. Call the pointer p and suppose it points into X's stack. 2. X, running on another thread, grows its stack and frees its old stack. 3. The old stack happens to be large or was the last stack in its span, so X frees this span, setting it to state _MSpanFree. 4. The span gets reused as a heap span. 5. scanobject calls heapBitsForObject, which loads the span containing p, which is now in state _MSpanInUse, but doesn't necessarily have an object at p. The not-object at p gets marked, and at this point all sorts of things can go wrong. We already have a partial solution to this. When shrinking a stack, we put the old stack on a queue to be freed at the end of garbage collection. This was done to address exactly this problem, but wasn't a complete solution. This commit generalizes this solution to both shrinking and growing stacks. For stacks that fit in the stack pool, we simply don't free the span, even if its reference count reaches zero. It's fine to reuse the span for other stacks, and this enables that. At the end of GC, we sweep for cached stack spans with a zero reference count and free them. For larger stacks, we simply queue the stack span to be freed at the end of GC. Ideally, we would reuse these large stack spans the way we can small stack spans, but that's a more invasive change that will have to wait until after the freeze. Fixes #11267. Change-Id: Ib7f2c5da4845cc0268e8dc098b08465116972a71 Reviewed-on: https://go-review.googlesource.com/11502 Reviewed-by: Russ Cox <rsc@golang.org>
2015-06-22 10:24:50 -04:00
next := s.next
if s.allocCount == 0 {
list.remove(s)
s.manualFreeList = 0
osStackFree(s)
mheap_.freeManual(s, spanAllocStack)
runtime: don't free stack spans during GC Memory for stacks is manually managed by the runtime and, currently (with one exception) we free stack spans immediately when the last stack on a span is freed. However, the garbage collector assumes that spans can never transition from non-free to free during scan or mark. This disagreement makes it possible for the garbage collector to mark uninitialized objects and is blocking us from re-enabling the bad pointer test in the garbage collector (issue #9880). For example, the following sequence will result in marking an uninitialized object: 1. scanobject loads a pointer slot out of the object it's scanning. This happens to be one of the special pointers from the heap into a stack. Call the pointer p and suppose it points into X's stack. 2. X, running on another thread, grows its stack and frees its old stack. 3. The old stack happens to be large or was the last stack in its span, so X frees this span, setting it to state _MSpanFree. 4. The span gets reused as a heap span. 5. scanobject calls heapBitsForObject, which loads the span containing p, which is now in state _MSpanInUse, but doesn't necessarily have an object at p. The not-object at p gets marked, and at this point all sorts of things can go wrong. We already have a partial solution to this. When shrinking a stack, we put the old stack on a queue to be freed at the end of garbage collection. This was done to address exactly this problem, but wasn't a complete solution. This commit generalizes this solution to both shrinking and growing stacks. For stacks that fit in the stack pool, we simply don't free the span, even if its reference count reaches zero. It's fine to reuse the span for other stacks, and this enables that. At the end of GC, we sweep for cached stack spans with a zero reference count and free them. For larger stacks, we simply queue the stack span to be freed at the end of GC. Ideally, we would reuse these large stack spans the way we can small stack spans, but that's a more invasive change that will have to wait until after the freeze. Fixes #11267. Change-Id: Ib7f2c5da4845cc0268e8dc098b08465116972a71 Reviewed-on: https://go-review.googlesource.com/11502 Reviewed-by: Russ Cox <rsc@golang.org>
2015-06-22 10:24:50 -04:00
}
s = next
}
unlock(&stackpool[order].item.mu)
}
runtime: don't free stack spans during GC Memory for stacks is manually managed by the runtime and, currently (with one exception) we free stack spans immediately when the last stack on a span is freed. However, the garbage collector assumes that spans can never transition from non-free to free during scan or mark. This disagreement makes it possible for the garbage collector to mark uninitialized objects and is blocking us from re-enabling the bad pointer test in the garbage collector (issue #9880). For example, the following sequence will result in marking an uninitialized object: 1. scanobject loads a pointer slot out of the object it's scanning. This happens to be one of the special pointers from the heap into a stack. Call the pointer p and suppose it points into X's stack. 2. X, running on another thread, grows its stack and frees its old stack. 3. The old stack happens to be large or was the last stack in its span, so X frees this span, setting it to state _MSpanFree. 4. The span gets reused as a heap span. 5. scanobject calls heapBitsForObject, which loads the span containing p, which is now in state _MSpanInUse, but doesn't necessarily have an object at p. The not-object at p gets marked, and at this point all sorts of things can go wrong. We already have a partial solution to this. When shrinking a stack, we put the old stack on a queue to be freed at the end of garbage collection. This was done to address exactly this problem, but wasn't a complete solution. This commit generalizes this solution to both shrinking and growing stacks. For stacks that fit in the stack pool, we simply don't free the span, even if its reference count reaches zero. It's fine to reuse the span for other stacks, and this enables that. At the end of GC, we sweep for cached stack spans with a zero reference count and free them. For larger stacks, we simply queue the stack span to be freed at the end of GC. Ideally, we would reuse these large stack spans the way we can small stack spans, but that's a more invasive change that will have to wait until after the freeze. Fixes #11267. Change-Id: Ib7f2c5da4845cc0268e8dc098b08465116972a71 Reviewed-on: https://go-review.googlesource.com/11502 Reviewed-by: Russ Cox <rsc@golang.org>
2015-06-22 10:24:50 -04:00
// Free large stack spans.
lock(&stackLarge.lock)
for i := range stackLarge.free {
for s := stackLarge.free[i].first; s != nil; {
next := s.next
stackLarge.free[i].remove(s)
osStackFree(s)
mheap_.freeManual(s, spanAllocStack)
s = next
}
}
unlock(&stackLarge.lock)
}
// getStackMap returns the locals and arguments live pointer maps, and
// stack object list for frame.
func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args bitvector, objs []stackObjectRecord) {
targetpc := frame.continpc
if targetpc == 0 {
// Frame is dead. Return empty bitvectors.
return
}
f := frame.fn
pcdata := int32(-1)
if targetpc != f.entry() {
// Back up to the CALL. If we're at the function entry
// point, we want to use the entry map (-1), even if
// the first instruction of the function changes the
// stack map.
targetpc--
pcdata = pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, cache)
}
if pcdata == -1 {
// We do not have a valid pcdata value but there might be a
// stackmap for this function. It is likely that we are looking
// at the function prologue, assume so and hope for the best.
pcdata = 0
}
// Local variables.
size := frame.varp - frame.sp
var minsize uintptr
switch goarch.ArchFamily {
case goarch.ARM64:
minsize = sys.StackAlign
default:
minsize = sys.MinFrameSize
}
if size > minsize {
stackid := pcdata
runtime: use conservative scanning for debug calls A debugger can inject a call at almost any PC, which causes significant complications with stack scanning and growth. Currently, the runtime solves this using precise stack maps and register maps at nearly all PCs, but these extra maps require roughly 5% of the binary. These extra maps were originally considered worth this space because they were intended to be used for non-cooperative preemption, but are now used only for debug call injection. This CL switches from using precise maps to instead using conservative frame scanning, much like how non-cooperative preemption works. When a call is injected, the runtime flushes all potential pointer registers to the stack, and then treats that frame as well as the interrupted frame conservatively. The limitation of conservative frame scanning is that we cannot grow the goroutine stack. That's doable because the previous CL switched to performing debug calls on a new goroutine, where they are free to grow the stack. With this CL, there are no remaining uses of precise register maps (though we still use the unsafe-point information that's encoded in the register map PCDATA stream), and stack maps are only used at call sites. For #36365. Change-Id: Ie217b6711f3741ccc437552d8ff88f961a73cee0 Reviewed-on: https://go-review.googlesource.com/c/go/+/229300 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Cherry Zhang <cherryyz@google.com>
2020-04-15 15:44:57 -04:00
stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
if stkmap == nil || stkmap.n <= 0 {
print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
throw("missing stackmap")
}
// If nbit == 0, there's no work to do.
if stkmap.nbit > 0 {
if stackid < 0 || stackid >= stkmap.n {
// don't know where we are
print("runtime: pcdata is ", stackid, " and ", stkmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
throw("bad symbol table")
}
locals = stackmapdata(stkmap, stackid)
if stackDebug >= 3 && debug {
print(" locals ", stackid, "/", stkmap.n, " ", locals.n, " words ", locals.bytedata, "\n")
}
} else if stackDebug >= 3 && debug {
print(" no locals to adjust\n")
}
}
// Arguments.
if frame.arglen > 0 {
if frame.argmap != nil {
// argmap is set when the function is reflect.makeFuncStub or reflect.methodValueCall.
// In this case, arglen specifies how much of the args section is actually live.
// (It could be either all the args + results, or just the args.)
args = *frame.argmap
n := int32(frame.arglen / goarch.PtrSize)
if n < args.n {
args.n = n // Don't use more of the arguments than arglen.
}
} else {
stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
if stackmap == nil || stackmap.n <= 0 {
print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(frame.arglen), "\n")
throw("missing stackmap")
}
if pcdata < 0 || pcdata >= stackmap.n {
// don't know where we are
print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
throw("bad symbol table")
}
if stackmap.nbit > 0 {
args = stackmapdata(stackmap, pcdata)
}
}
}
// stack objects.
if (GOARCH == "amd64" || GOARCH == "arm64" || GOARCH == "ppc64" || GOARCH == "ppc64le" || GOARCH == "riscv64") &&
unsafe.Sizeof(abi.RegArgs{}) > 0 && frame.argmap != nil {
// argmap is set when the function is reflect.makeFuncStub or reflect.methodValueCall.
// We don't actually use argmap in this case, but we need to fake the stack object
// record for these frames which contain an internal/abi.RegArgs at a hard-coded offset.
// This offset matches the assembly code on amd64 and arm64.
cmd/link,runtime: remove relocations from stkobjs Use an offset from go.func.* instead. This removes the last relocation from funcdata symbols, which lets us simplify that code. size before after Δ % addr2line 3683218 3680706 -2512 -0.068% api 4951074 4944850 -6224 -0.126% asm 4744258 4757586 +13328 +0.281% buildid 2419986 2418546 -1440 -0.060% cgo 4218306 4197346 -20960 -0.497% compile 22132066 22076882 -55184 -0.249% cover 4432834 4411362 -21472 -0.484% dist 3111202 3091346 -19856 -0.638% doc 3583602 3563234 -20368 -0.568% fix 3023922 3020658 -3264 -0.108% link 6188034 6164642 -23392 -0.378% nm 3665826 3646818 -19008 -0.519% objdump 4015234 4012450 -2784 -0.069% pack 2155010 2153554 -1456 -0.068% pprof 13044178 13011522 -32656 -0.250% test2json 2402146 2383906 -18240 -0.759% trace 9765410 9736514 -28896 -0.296% vet 6681250 6655058 -26192 -0.392% total 104217556 103926980 -290576 -0.279% relocs before after Δ % addr2line 25563 25066 -497 -1.944% api 18409 17176 -1233 -6.698% asm 18903 18271 -632 -3.343% buildid 9513 9233 -280 -2.943% cgo 17103 16222 -881 -5.151% compile 64825 60421 -4404 -6.794% cover 19464 18479 -985 -5.061% dist 10798 10135 -663 -6.140% doc 13503 12735 -768 -5.688% fix 11465 10820 -645 -5.626% link 23214 21849 -1365 -5.880% nm 25480 24987 -493 -1.935% objdump 26610 26057 -553 -2.078% pack 7951 7665 -286 -3.597% pprof 63964 60761 -3203 -5.008% test2json 8735 8389 -346 -3.961% trace 39639 37180 -2459 -6.203% vet 25970 24044 -1926 -7.416% total 431108 409489 -21619 -5.015% Change-Id: I43c26196a008da6d1cb3a782eea2f428778bd569 Reviewed-on: https://go-review.googlesource.com/c/go/+/353138 Trust: Josh Bleecher Snyder <josharian@gmail.com> Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-09-27 14:27:20 -07:00
objs = methodValueCallFrameObjs[:]
} else {
p := funcdata(f, _FUNCDATA_StackObjects)
if p != nil {
n := *(*uintptr)(p)
p = add(p, goarch.PtrSize)
r0 := (*stackObjectRecord)(noescape(p))
objs = unsafe.Slice(r0, int(n))
// Note: the noescape above is needed to keep
// getStackMap from "leaking param content:
// frame". That leak propagates up to getgcmask, then
// GCMask, then verifyGCInfo, which converts the stack
// gcinfo tests into heap gcinfo tests :(
}
}
return
}
cmd/link,runtime: remove relocations from stkobjs Use an offset from go.func.* instead. This removes the last relocation from funcdata symbols, which lets us simplify that code. size before after Δ % addr2line 3683218 3680706 -2512 -0.068% api 4951074 4944850 -6224 -0.126% asm 4744258 4757586 +13328 +0.281% buildid 2419986 2418546 -1440 -0.060% cgo 4218306 4197346 -20960 -0.497% compile 22132066 22076882 -55184 -0.249% cover 4432834 4411362 -21472 -0.484% dist 3111202 3091346 -19856 -0.638% doc 3583602 3563234 -20368 -0.568% fix 3023922 3020658 -3264 -0.108% link 6188034 6164642 -23392 -0.378% nm 3665826 3646818 -19008 -0.519% objdump 4015234 4012450 -2784 -0.069% pack 2155010 2153554 -1456 -0.068% pprof 13044178 13011522 -32656 -0.250% test2json 2402146 2383906 -18240 -0.759% trace 9765410 9736514 -28896 -0.296% vet 6681250 6655058 -26192 -0.392% total 104217556 103926980 -290576 -0.279% relocs before after Δ % addr2line 25563 25066 -497 -1.944% api 18409 17176 -1233 -6.698% asm 18903 18271 -632 -3.343% buildid 9513 9233 -280 -2.943% cgo 17103 16222 -881 -5.151% compile 64825 60421 -4404 -6.794% cover 19464 18479 -985 -5.061% dist 10798 10135 -663 -6.140% doc 13503 12735 -768 -5.688% fix 11465 10820 -645 -5.626% link 23214 21849 -1365 -5.880% nm 25480 24987 -493 -1.935% objdump 26610 26057 -553 -2.078% pack 7951 7665 -286 -3.597% pprof 63964 60761 -3203 -5.008% test2json 8735 8389 -346 -3.961% trace 39639 37180 -2459 -6.203% vet 25970 24044 -1926 -7.416% total 431108 409489 -21619 -5.015% Change-Id: I43c26196a008da6d1cb3a782eea2f428778bd569 Reviewed-on: https://go-review.googlesource.com/c/go/+/353138 Trust: Josh Bleecher Snyder <josharian@gmail.com> Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-09-27 14:27:20 -07:00
var methodValueCallFrameObjs [1]stackObjectRecord // initialized in stackobjectinit
cmd/link,runtime: remove relocations from stkobjs Use an offset from go.func.* instead. This removes the last relocation from funcdata symbols, which lets us simplify that code. size before after Δ % addr2line 3683218 3680706 -2512 -0.068% api 4951074 4944850 -6224 -0.126% asm 4744258 4757586 +13328 +0.281% buildid 2419986 2418546 -1440 -0.060% cgo 4218306 4197346 -20960 -0.497% compile 22132066 22076882 -55184 -0.249% cover 4432834 4411362 -21472 -0.484% dist 3111202 3091346 -19856 -0.638% doc 3583602 3563234 -20368 -0.568% fix 3023922 3020658 -3264 -0.108% link 6188034 6164642 -23392 -0.378% nm 3665826 3646818 -19008 -0.519% objdump 4015234 4012450 -2784 -0.069% pack 2155010 2153554 -1456 -0.068% pprof 13044178 13011522 -32656 -0.250% test2json 2402146 2383906 -18240 -0.759% trace 9765410 9736514 -28896 -0.296% vet 6681250 6655058 -26192 -0.392% total 104217556 103926980 -290576 -0.279% relocs before after Δ % addr2line 25563 25066 -497 -1.944% api 18409 17176 -1233 -6.698% asm 18903 18271 -632 -3.343% buildid 9513 9233 -280 -2.943% cgo 17103 16222 -881 -5.151% compile 64825 60421 -4404 -6.794% cover 19464 18479 -985 -5.061% dist 10798 10135 -663 -6.140% doc 13503 12735 -768 -5.688% fix 11465 10820 -645 -5.626% link 23214 21849 -1365 -5.880% nm 25480 24987 -493 -1.935% objdump 26610 26057 -553 -2.078% pack 7951 7665 -286 -3.597% pprof 63964 60761 -3203 -5.008% test2json 8735 8389 -346 -3.961% trace 39639 37180 -2459 -6.203% vet 25970 24044 -1926 -7.416% total 431108 409489 -21619 -5.015% Change-Id: I43c26196a008da6d1cb3a782eea2f428778bd569 Reviewed-on: https://go-review.googlesource.com/c/go/+/353138 Trust: Josh Bleecher Snyder <josharian@gmail.com> Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-09-27 14:27:20 -07:00
func stkobjinit() {
var abiRegArgsEface any = abi.RegArgs{}
cmd/link,runtime: remove relocations from stkobjs Use an offset from go.func.* instead. This removes the last relocation from funcdata symbols, which lets us simplify that code. size before after Δ % addr2line 3683218 3680706 -2512 -0.068% api 4951074 4944850 -6224 -0.126% asm 4744258 4757586 +13328 +0.281% buildid 2419986 2418546 -1440 -0.060% cgo 4218306 4197346 -20960 -0.497% compile 22132066 22076882 -55184 -0.249% cover 4432834 4411362 -21472 -0.484% dist 3111202 3091346 -19856 -0.638% doc 3583602 3563234 -20368 -0.568% fix 3023922 3020658 -3264 -0.108% link 6188034 6164642 -23392 -0.378% nm 3665826 3646818 -19008 -0.519% objdump 4015234 4012450 -2784 -0.069% pack 2155010 2153554 -1456 -0.068% pprof 13044178 13011522 -32656 -0.250% test2json 2402146 2383906 -18240 -0.759% trace 9765410 9736514 -28896 -0.296% vet 6681250 6655058 -26192 -0.392% total 104217556 103926980 -290576 -0.279% relocs before after Δ % addr2line 25563 25066 -497 -1.944% api 18409 17176 -1233 -6.698% asm 18903 18271 -632 -3.343% buildid 9513 9233 -280 -2.943% cgo 17103 16222 -881 -5.151% compile 64825 60421 -4404 -6.794% cover 19464 18479 -985 -5.061% dist 10798 10135 -663 -6.140% doc 13503 12735 -768 -5.688% fix 11465 10820 -645 -5.626% link 23214 21849 -1365 -5.880% nm 25480 24987 -493 -1.935% objdump 26610 26057 -553 -2.078% pack 7951 7665 -286 -3.597% pprof 63964 60761 -3203 -5.008% test2json 8735 8389 -346 -3.961% trace 39639 37180 -2459 -6.203% vet 25970 24044 -1926 -7.416% total 431108 409489 -21619 -5.015% Change-Id: I43c26196a008da6d1cb3a782eea2f428778bd569 Reviewed-on: https://go-review.googlesource.com/c/go/+/353138 Trust: Josh Bleecher Snyder <josharian@gmail.com> Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-09-27 14:27:20 -07:00
abiRegArgsType := efaceOf(&abiRegArgsEface)._type
cmd/compile, runtime: emit only GC data for stack objects Currently, for stack objects, the compiler emits metadata that includes the offset and type descriptor for each object. The type descriptor symbol has many fields, and it references many other symbols, e.g. field/element types, equality functions, names. Observe that what we actually need at runtime is only the GC metadata that are needed to scan the object, and the GC metadata are "leaf" symbols (which doesn't reference other symbols). Emit only the GC data instead. This avoids bringing live the type descriptor as well as things referenced by it (if it is not otherwise live). This reduces binary sizes: old new hello (println) 1187776 1133856 (-4.5%) hello (fmt) 1902448 1844416 (-3.1%) cmd/compile 22670432 22438576 (-1.0%) cmd/link 6346272 6225408 (-1.9%) No significant change in compiler speed. name old time/op new time/op delta Template 184ms ± 2% 186ms ± 5% ~ (p=0.905 n=9+10) Unicode 78.4ms ± 5% 76.3ms ± 3% -2.60% (p=0.009 n=10+10) GoTypes 1.09s ± 1% 1.08s ± 1% -0.73% (p=0.027 n=10+8) Compiler 85.6ms ± 3% 84.6ms ± 4% ~ (p=0.143 n=10+10) SSA 7.23s ± 1% 7.25s ± 1% ~ (p=0.780 n=10+9) Flate 116ms ± 5% 115ms ± 6% ~ (p=0.912 n=10+10) GoParser 201ms ± 4% 195ms ± 1% ~ (p=0.089 n=10+10) Reflect 455ms ± 1% 458ms ± 2% ~ (p=0.050 n=9+9) Tar 155ms ± 2% 155ms ± 3% ~ (p=0.436 n=10+10) XML 202ms ± 2% 200ms ± 2% ~ (p=0.053 n=10+9) Change-Id: I33a7f383d79afba1a482cac6da0cf5b7de9c0ec4 Reviewed-on: https://go-review.googlesource.com/c/go/+/313514 Trust: Cherry Zhang <cherryyz@google.com> Reviewed-by: Than McIntosh <thanm@google.com>
2021-04-24 12:41:17 -04:00
if abiRegArgsType.kind&kindGCProg != 0 {
throw("abiRegArgsType needs GC Prog, update methodValueCallFrameObjs")
}
cmd/link,runtime: remove relocations from stkobjs Use an offset from go.func.* instead. This removes the last relocation from funcdata symbols, which lets us simplify that code. size before after Δ % addr2line 3683218 3680706 -2512 -0.068% api 4951074 4944850 -6224 -0.126% asm 4744258 4757586 +13328 +0.281% buildid 2419986 2418546 -1440 -0.060% cgo 4218306 4197346 -20960 -0.497% compile 22132066 22076882 -55184 -0.249% cover 4432834 4411362 -21472 -0.484% dist 3111202 3091346 -19856 -0.638% doc 3583602 3563234 -20368 -0.568% fix 3023922 3020658 -3264 -0.108% link 6188034 6164642 -23392 -0.378% nm 3665826 3646818 -19008 -0.519% objdump 4015234 4012450 -2784 -0.069% pack 2155010 2153554 -1456 -0.068% pprof 13044178 13011522 -32656 -0.250% test2json 2402146 2383906 -18240 -0.759% trace 9765410 9736514 -28896 -0.296% vet 6681250 6655058 -26192 -0.392% total 104217556 103926980 -290576 -0.279% relocs before after Δ % addr2line 25563 25066 -497 -1.944% api 18409 17176 -1233 -6.698% asm 18903 18271 -632 -3.343% buildid 9513 9233 -280 -2.943% cgo 17103 16222 -881 -5.151% compile 64825 60421 -4404 -6.794% cover 19464 18479 -985 -5.061% dist 10798 10135 -663 -6.140% doc 13503 12735 -768 -5.688% fix 11465 10820 -645 -5.626% link 23214 21849 -1365 -5.880% nm 25480 24987 -493 -1.935% objdump 26610 26057 -553 -2.078% pack 7951 7665 -286 -3.597% pprof 63964 60761 -3203 -5.008% test2json 8735 8389 -346 -3.961% trace 39639 37180 -2459 -6.203% vet 25970 24044 -1926 -7.416% total 431108 409489 -21619 -5.015% Change-Id: I43c26196a008da6d1cb3a782eea2f428778bd569 Reviewed-on: https://go-review.googlesource.com/c/go/+/353138 Trust: Josh Bleecher Snyder <josharian@gmail.com> Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-09-27 14:27:20 -07:00
// Set methodValueCallFrameObjs[0].gcdataoff so that
// stackObjectRecord.gcdata() will work correctly with it.
ptr := uintptr(unsafe.Pointer(&methodValueCallFrameObjs[0]))
var mod *moduledata
for datap := &firstmoduledata; datap != nil; datap = datap.next {
if datap.gofunc <= ptr && ptr < datap.end {
mod = datap
break
}
}
if mod == nil {
throw("methodValueCallFrameObjs is not in a module")
}
methodValueCallFrameObjs[0] = stackObjectRecord{
off: -int32(alignUp(abiRegArgsType.size, 8)), // It's always the highest address local.
size: int32(abiRegArgsType.size),
_ptrdata: int32(abiRegArgsType.ptrdata),
gcdataoff: uint32(uintptr(unsafe.Pointer(abiRegArgsType.gcdata)) - mod.rodata),
}
cmd/compile, runtime: emit only GC data for stack objects Currently, for stack objects, the compiler emits metadata that includes the offset and type descriptor for each object. The type descriptor symbol has many fields, and it references many other symbols, e.g. field/element types, equality functions, names. Observe that what we actually need at runtime is only the GC metadata that are needed to scan the object, and the GC metadata are "leaf" symbols (which doesn't reference other symbols). Emit only the GC data instead. This avoids bringing live the type descriptor as well as things referenced by it (if it is not otherwise live). This reduces binary sizes: old new hello (println) 1187776 1133856 (-4.5%) hello (fmt) 1902448 1844416 (-3.1%) cmd/compile 22670432 22438576 (-1.0%) cmd/link 6346272 6225408 (-1.9%) No significant change in compiler speed. name old time/op new time/op delta Template 184ms ± 2% 186ms ± 5% ~ (p=0.905 n=9+10) Unicode 78.4ms ± 5% 76.3ms ± 3% -2.60% (p=0.009 n=10+10) GoTypes 1.09s ± 1% 1.08s ± 1% -0.73% (p=0.027 n=10+8) Compiler 85.6ms ± 3% 84.6ms ± 4% ~ (p=0.143 n=10+10) SSA 7.23s ± 1% 7.25s ± 1% ~ (p=0.780 n=10+9) Flate 116ms ± 5% 115ms ± 6% ~ (p=0.912 n=10+10) GoParser 201ms ± 4% 195ms ± 1% ~ (p=0.089 n=10+10) Reflect 455ms ± 1% 458ms ± 2% ~ (p=0.050 n=9+9) Tar 155ms ± 2% 155ms ± 3% ~ (p=0.436 n=10+10) XML 202ms ± 2% 200ms ± 2% ~ (p=0.053 n=10+9) Change-Id: I33a7f383d79afba1a482cac6da0cf5b7de9c0ec4 Reviewed-on: https://go-review.googlesource.com/c/go/+/313514 Trust: Cherry Zhang <cherryyz@google.com> Reviewed-by: Than McIntosh <thanm@google.com>
2021-04-24 12:41:17 -04:00
}
// A stackObjectRecord is generated by the compiler for each stack object in a stack frame.
// This record must match the generator code in cmd/compile/internal/liveness/plive.go:emitStackObjects.
type stackObjectRecord struct {
// offset in frame
// if negative, offset from varp
// if non-negative, offset from argp
cmd/link,runtime: remove relocations from stkobjs Use an offset from go.func.* instead. This removes the last relocation from funcdata symbols, which lets us simplify that code. size before after Δ % addr2line 3683218 3680706 -2512 -0.068% api 4951074 4944850 -6224 -0.126% asm 4744258 4757586 +13328 +0.281% buildid 2419986 2418546 -1440 -0.060% cgo 4218306 4197346 -20960 -0.497% compile 22132066 22076882 -55184 -0.249% cover 4432834 4411362 -21472 -0.484% dist 3111202 3091346 -19856 -0.638% doc 3583602 3563234 -20368 -0.568% fix 3023922 3020658 -3264 -0.108% link 6188034 6164642 -23392 -0.378% nm 3665826 3646818 -19008 -0.519% objdump 4015234 4012450 -2784 -0.069% pack 2155010 2153554 -1456 -0.068% pprof 13044178 13011522 -32656 -0.250% test2json 2402146 2383906 -18240 -0.759% trace 9765410 9736514 -28896 -0.296% vet 6681250 6655058 -26192 -0.392% total 104217556 103926980 -290576 -0.279% relocs before after Δ % addr2line 25563 25066 -497 -1.944% api 18409 17176 -1233 -6.698% asm 18903 18271 -632 -3.343% buildid 9513 9233 -280 -2.943% cgo 17103 16222 -881 -5.151% compile 64825 60421 -4404 -6.794% cover 19464 18479 -985 -5.061% dist 10798 10135 -663 -6.140% doc 13503 12735 -768 -5.688% fix 11465 10820 -645 -5.626% link 23214 21849 -1365 -5.880% nm 25480 24987 -493 -1.935% objdump 26610 26057 -553 -2.078% pack 7951 7665 -286 -3.597% pprof 63964 60761 -3203 -5.008% test2json 8735 8389 -346 -3.961% trace 39639 37180 -2459 -6.203% vet 25970 24044 -1926 -7.416% total 431108 409489 -21619 -5.015% Change-Id: I43c26196a008da6d1cb3a782eea2f428778bd569 Reviewed-on: https://go-review.googlesource.com/c/go/+/353138 Trust: Josh Bleecher Snyder <josharian@gmail.com> Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-09-27 14:27:20 -07:00
off int32
size int32
_ptrdata int32 // ptrdata, or -ptrdata is GC prog is used
gcdataoff uint32 // offset to gcdata from moduledata.rodata
cmd/compile, runtime: emit only GC data for stack objects Currently, for stack objects, the compiler emits metadata that includes the offset and type descriptor for each object. The type descriptor symbol has many fields, and it references many other symbols, e.g. field/element types, equality functions, names. Observe that what we actually need at runtime is only the GC metadata that are needed to scan the object, and the GC metadata are "leaf" symbols (which doesn't reference other symbols). Emit only the GC data instead. This avoids bringing live the type descriptor as well as things referenced by it (if it is not otherwise live). This reduces binary sizes: old new hello (println) 1187776 1133856 (-4.5%) hello (fmt) 1902448 1844416 (-3.1%) cmd/compile 22670432 22438576 (-1.0%) cmd/link 6346272 6225408 (-1.9%) No significant change in compiler speed. name old time/op new time/op delta Template 184ms ± 2% 186ms ± 5% ~ (p=0.905 n=9+10) Unicode 78.4ms ± 5% 76.3ms ± 3% -2.60% (p=0.009 n=10+10) GoTypes 1.09s ± 1% 1.08s ± 1% -0.73% (p=0.027 n=10+8) Compiler 85.6ms ± 3% 84.6ms ± 4% ~ (p=0.143 n=10+10) SSA 7.23s ± 1% 7.25s ± 1% ~ (p=0.780 n=10+9) Flate 116ms ± 5% 115ms ± 6% ~ (p=0.912 n=10+10) GoParser 201ms ± 4% 195ms ± 1% ~ (p=0.089 n=10+10) Reflect 455ms ± 1% 458ms ± 2% ~ (p=0.050 n=9+9) Tar 155ms ± 2% 155ms ± 3% ~ (p=0.436 n=10+10) XML 202ms ± 2% 200ms ± 2% ~ (p=0.053 n=10+9) Change-Id: I33a7f383d79afba1a482cac6da0cf5b7de9c0ec4 Reviewed-on: https://go-review.googlesource.com/c/go/+/313514 Trust: Cherry Zhang <cherryyz@google.com> Reviewed-by: Than McIntosh <thanm@google.com>
2021-04-24 12:41:17 -04:00
}
func (r *stackObjectRecord) useGCProg() bool {
return r._ptrdata < 0
}
func (r *stackObjectRecord) ptrdata() uintptr {
x := r._ptrdata
if x < 0 {
return uintptr(-x)
}
return uintptr(x)
}
cmd/link,runtime: remove relocations from stkobjs Use an offset from go.func.* instead. This removes the last relocation from funcdata symbols, which lets us simplify that code. size before after Δ % addr2line 3683218 3680706 -2512 -0.068% api 4951074 4944850 -6224 -0.126% asm 4744258 4757586 +13328 +0.281% buildid 2419986 2418546 -1440 -0.060% cgo 4218306 4197346 -20960 -0.497% compile 22132066 22076882 -55184 -0.249% cover 4432834 4411362 -21472 -0.484% dist 3111202 3091346 -19856 -0.638% doc 3583602 3563234 -20368 -0.568% fix 3023922 3020658 -3264 -0.108% link 6188034 6164642 -23392 -0.378% nm 3665826 3646818 -19008 -0.519% objdump 4015234 4012450 -2784 -0.069% pack 2155010 2153554 -1456 -0.068% pprof 13044178 13011522 -32656 -0.250% test2json 2402146 2383906 -18240 -0.759% trace 9765410 9736514 -28896 -0.296% vet 6681250 6655058 -26192 -0.392% total 104217556 103926980 -290576 -0.279% relocs before after Δ % addr2line 25563 25066 -497 -1.944% api 18409 17176 -1233 -6.698% asm 18903 18271 -632 -3.343% buildid 9513 9233 -280 -2.943% cgo 17103 16222 -881 -5.151% compile 64825 60421 -4404 -6.794% cover 19464 18479 -985 -5.061% dist 10798 10135 -663 -6.140% doc 13503 12735 -768 -5.688% fix 11465 10820 -645 -5.626% link 23214 21849 -1365 -5.880% nm 25480 24987 -493 -1.935% objdump 26610 26057 -553 -2.078% pack 7951 7665 -286 -3.597% pprof 63964 60761 -3203 -5.008% test2json 8735 8389 -346 -3.961% trace 39639 37180 -2459 -6.203% vet 25970 24044 -1926 -7.416% total 431108 409489 -21619 -5.015% Change-Id: I43c26196a008da6d1cb3a782eea2f428778bd569 Reviewed-on: https://go-review.googlesource.com/c/go/+/353138 Trust: Josh Bleecher Snyder <josharian@gmail.com> Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com>
2021-09-27 14:27:20 -07:00
// gcdata returns pointer map or GC prog of the type.
func (r *stackObjectRecord) gcdata() *byte {
ptr := uintptr(unsafe.Pointer(r))
var mod *moduledata
for datap := &firstmoduledata; datap != nil; datap = datap.next {
if datap.gofunc <= ptr && ptr < datap.end {
mod = datap
break
}
}
// If you get a panic here due to a nil mod,
// you may have made a copy of a stackObjectRecord.
// You must use the original pointer.
res := mod.rodata + uintptr(r.gcdataoff)
return (*byte)(unsafe.Pointer(res))
}
// This is exported as ABI0 via linkname so obj can call it.
//
//go:nosplit
//go:linkname morestackc
func morestackc() {
throw("attempt to execute system stack code on user stack")
}
// startingStackSize is the amount of stack that new goroutines start with.
// It is a power of 2, and between _FixedStack and maxstacksize, inclusive.
// startingStackSize is updated every GC by tracking the average size of
// stacks scanned during the GC.
var startingStackSize uint32 = _FixedStack
func gcComputeStartingStackSize() {
if debug.adaptivestackstart == 0 {
return
}
// For details, see the design doc at
// https://docs.google.com/document/d/1YDlGIdVTPnmUiTAavlZxBI1d9pwGQgZT7IKFKlIXohQ/edit?usp=sharing
// The basic algorithm is to track the average size of stacks
// and start goroutines with stack equal to that average size.
// Starting at the average size uses at most 2x the space that
// an ideal algorithm would have used.
// This is just a heuristic to avoid excessive stack growth work
// early in a goroutine's lifetime. See issue 18138. Stacks that
// are allocated too small can still grow, and stacks allocated
// too large can still shrink.
var scannedStackSize uint64
var scannedStacks uint64
for _, p := range allp {
scannedStackSize += p.scannedStackSize
scannedStacks += p.scannedStacks
// Reset for next time
p.scannedStackSize = 0
p.scannedStacks = 0
}
if scannedStacks == 0 {
startingStackSize = _FixedStack
return
}
avg := scannedStackSize/scannedStacks + _StackGuard
// Note: we add _StackGuard to ensure that a goroutine that
// uses the average space will not trigger a growth.
if avg > uint64(maxstacksize) {
avg = uint64(maxstacksize)
}
if avg < _FixedStack {
avg = _FixedStack
}
// Note: maxstacksize fits in 30 bits, so avg also does.
startingStackSize = uint32(round2(int32(avg)))
}