2014-11-11 17:05:19 -05:00
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
2015-11-02 14:09:24 -05:00
import (
"runtime/internal/atomic"
2015-11-11 12:39:30 -05:00
"runtime/internal/sys"
2015-11-02 14:09:24 -05:00
"unsafe"
)
2014-11-11 17:05:19 -05:00
2016-04-13 18:16:21 +09:00
// defined constants
2014-11-11 17:05:19 -05:00
const (
// G status
//
2016-02-27 18:44:25 -05:00
// Beyond indicating the general state of a G, the G status
// acts like a lock on the goroutine's stack (and hence its
// ability to execute user code).
//
2014-11-11 17:05:19 -05:00
// If you add to this list, add to the list
// of "okay during garbage collection" status
2015-03-11 12:58:47 -07:00
// in mgcmark.go too.
2016-02-27 18:44:25 -05:00
// _Gidle means this goroutine was just allocated and has not
// yet been initialized.
_Gidle = iota // 0
// _Grunnable means this goroutine is on a run queue. It is
// not currently executing user code. The stack is not owned.
_Grunnable // 1
// _Grunning means this goroutine may execute user code. The
// stack is owned by this goroutine. It is not on a run queue.
// It is assigned an M and a P.
_Grunning // 2
// _Gsyscall means this goroutine is executing a system call.
// It is not executing user code. The stack is owned by this
// goroutine. It is not on a run queue. It is assigned an M.
_Gsyscall // 3
// _Gwaiting means this goroutine is blocked in the runtime.
// It is not executing user code. It is not on a run queue,
// but should be recorded somewhere (e.g., a channel wait
// queue) so it can be ready()d when necessary. The stack is
// not owned *except* that a channel operation may read or
// write parts of the stack under the appropriate channel
// lock. Otherwise, it is not safe to access the stack after a
// goroutine enters _Gwaiting (e.g., it may get moved).
_Gwaiting // 4
// _Gmoribund_unused is currently unused, but hardcoded in gdb
// scripts.
_Gmoribund_unused // 5
// _Gdead means this goroutine is currently unused. It may be
// just exited, on a free list, or just being initialized. It
// is not executing user code. It may or may not have a stack
// allocated. The G and its stack (if any) are owned by the M
// that is exiting the G or that obtained the G from the free
// list.
_Gdead // 6
// _Genqueue_unused is currently unused.
_Genqueue_unused // 7
// _Gcopystack means this goroutine's stack is being moved. It
// is not executing user code and is not on a run queue. The
// stack is owned by the goroutine that put it in _Gcopystack.
_Gcopystack // 8
// _Gscan combined with one of the above states other than
// _Grunning indicates that GC is scanning the stack. The
// goroutine is not executing user code and the stack is owned
// by the goroutine that set the _Gscan bit.
//
// _Gscanrunning is different: it is used to briefly block
// state transitions while GC signals the G to scan its own
// stack. This is otherwise like _Grunning.
//
// atomicstatus&~Gscan gives the state the goroutine will
// return to when the scan completes.
_Gscan = 0x1000
_Gscanrunnable = _Gscan + _Grunnable // 0x1001
_Gscanrunning = _Gscan + _Grunning // 0x1002
_Gscansyscall = _Gscan + _Gsyscall // 0x1003
_Gscanwaiting = _Gscan + _Gwaiting // 0x1004
2014-11-11 17:05:19 -05:00
)
const (
// P status
2015-03-27 16:49:12 -04:00
_Pidle = iota
_Prunning // Only this P is allowed to change from _Prunning.
2014-11-11 17:05:19 -05:00
_Psyscall
_Pgcstop
_Pdead
)
2016-04-13 18:16:21 +09:00
// Mutual exclusion locks. In the uncontended case,
// as fast as spin locks (just a few user-level instructions),
// but on the contention path they sleep in the kernel.
// A zeroed Mutex is unlocked (no need to initialize each lock).
2014-11-11 17:05:19 -05:00
type mutex struct {
// Futex-based impl treats it as uint32 key,
// while sema-based impl as M* waitm.
// Used to be a union, but unions break precise GC.
key uintptr
}
2016-04-13 18:16:21 +09:00
// sleep and wakeup on one-time events.
// before any calls to notesleep or notewakeup,
// must call noteclear to initialize the Note.
// then, exactly one thread can call notesleep
// and exactly one thread can call notewakeup (once).
// once notewakeup has been called, the notesleep
// will return. future notesleep will return immediately.
// subsequent noteclear must be called only after
// previous notesleep has returned, e.g. it's disallowed
// to call noteclear straight after notewakeup.
//
// notetsleep is like notesleep but wakes up after
// a given number of nanoseconds even if the event
// has not yet happened. if a goroutine uses notetsleep to
// wake up early, it must wait to call noteclear until it
// can be sure that no other goroutine is calling
// notewakeup.
//
// notesleep/notetsleep are generally called on g0,
// notetsleepg is similar to notetsleep but is called on user g.
2014-11-11 17:05:19 -05:00
type note struct {
// Futex-based impl treats it as uint32 key,
// while sema-based impl as M* waitm.
// Used to be a union, but unions break precise GC.
key uintptr
}
type funcval struct {
fn uintptr
// variable-size, fn-specific data here
}
type iface struct {
tab * itab
data unsafe . Pointer
}
type eface struct {
_type * _type
data unsafe . Pointer
}
2015-10-21 12:12:25 -07:00
func efaceOf ( ep * interface { } ) * eface {
return ( * eface ) ( unsafe . Pointer ( ep ) )
}
2015-04-17 00:21:30 -04:00
// The guintptr, muintptr, and puintptr are all used to bypass write barriers.
// It is particularly important to avoid write barriers when the current P has
// been released, because the GC thinks the world is stopped, and an
// unexpected write barrier would not be synchronized with the GC,
// which can lead to a half-executed write barrier that has marked the object
// but not queued it. If the GC skips the object and completes before the
// queuing can occur, it will incorrectly free the object.
//
// We tried using special assignment functions invoked only when not
// holding a running P, but then some updates to a particular memory
// word went through write barriers and some did not. This breaks the
// write barrier shadow checking mode, and it is also scary: better to have
// a word that is completely ignored by the GC than to have one for which
// only a few updates are ignored.
//
// Gs, Ms, and Ps are always reachable via true pointers in the
// allgs, allm, and allp lists or (during allocation before they reach those lists)
// from stack variables.
2014-12-22 22:43:49 -05:00
// A guintptr holds a goroutine pointer, but typed as a uintptr
2015-04-17 00:21:30 -04:00
// to bypass write barriers. It is used in the Gobuf goroutine state
// and in scheduling lists that are manipulated without a P.
2014-12-22 22:43:49 -05:00
//
// The Gobuf.g goroutine pointer is almost always updated by assembly code.
// In one of the few places it is updated by Go code - func save - it must be
// treated as a uintptr to avoid a write barrier being emitted at a bad time.
// Instead of figuring out how to emit the write barriers missing in the
// assembly manipulation, we change the type of the field to uintptr,
// so that it does not require write barriers at all.
//
// Goroutine structs are published in the allg list and never freed.
// That will keep the goroutine structs from being collected.
// There is never a time that Gobuf.g's contain the only references
// to a goroutine: the publishing of the goroutine in allg comes first.
// Goroutine pointers are also kept in non-GC-visible places like TLS,
// so I can't see them ever moving. If we did want to start moving data
// in the GC, we'd need to allocate the goroutine structs from an
// alternate arena. Using guintptr doesn't make that problem any worse.
type guintptr uintptr
2015-11-24 09:15:36 +13:00
//go:nosplit
func ( gp guintptr ) ptr ( ) * g { return ( * g ) ( unsafe . Pointer ( gp ) ) }
//go:nosplit
2015-04-17 00:21:30 -04:00
func ( gp * guintptr ) set ( g * g ) { * gp = guintptr ( unsafe . Pointer ( g ) ) }
2015-11-24 09:15:36 +13:00
//go:nosplit
runtime: yield time slice to most recently readied G
Currently, when the runtime ready()s a G, it adds it to the end of the
current P's run queue and continues running. If there are many other
things in the run queue, this can result in a significant delay before
the ready()d G actually runs and can hurt fairness when other Gs in
the run queue are CPU hogs. For example, if there are three Gs sharing
a P, one of which is a CPU hog that never voluntarily gives up the P
and the other two of which are doing small amounts of work and
communicating back and forth on an unbuffered channel, the two
communicating Gs will get very little CPU time.
Change this so that when G1 ready()s G2 and then blocks, the scheduler
immediately hands off the remainder of G1's time slice to G2. In the
above example, the two communicating Gs will now act as a unit and
together get half of the CPU time, while the CPU hog gets the other
half of the CPU time.
This fixes the problem demonstrated by the ping-pong benchmark added
in the previous commit:
benchmark old ns/op new ns/op delta
BenchmarkPingPongHog 684287 825 -99.88%
On the x/benchmarks suite, this change improves the performance of
garbage by ~6% (for GOMAXPROCS=1 and 4), and json by 28% and 36% for
GOMAXPROCS=1 and 4. It has negligible effect on heap size.
This has no effect on the go1 benchmark suite since those benchmarks
are mostly single-threaded.
Change-Id: I858a08eaa78f702ea98a5fac99d28a4ac91d339f
Reviewed-on: https://go-review.googlesource.com/9289
Reviewed-by: Rick Hudson <rlh@golang.org>
Reviewed-by: Russ Cox <rsc@golang.org>
2015-04-22 14:42:26 -04:00
func ( gp * guintptr ) cas ( old , new guintptr ) bool {
2015-11-02 14:09:24 -05:00
return atomic . Casuintptr ( ( * uintptr ) ( unsafe . Pointer ( gp ) ) , uintptr ( old ) , uintptr ( new ) )
runtime: yield time slice to most recently readied G
Currently, when the runtime ready()s a G, it adds it to the end of the
current P's run queue and continues running. If there are many other
things in the run queue, this can result in a significant delay before
the ready()d G actually runs and can hurt fairness when other Gs in
the run queue are CPU hogs. For example, if there are three Gs sharing
a P, one of which is a CPU hog that never voluntarily gives up the P
and the other two of which are doing small amounts of work and
communicating back and forth on an unbuffered channel, the two
communicating Gs will get very little CPU time.
Change this so that when G1 ready()s G2 and then blocks, the scheduler
immediately hands off the remainder of G1's time slice to G2. In the
above example, the two communicating Gs will now act as a unit and
together get half of the CPU time, while the CPU hog gets the other
half of the CPU time.
This fixes the problem demonstrated by the ping-pong benchmark added
in the previous commit:
benchmark old ns/op new ns/op delta
BenchmarkPingPongHog 684287 825 -99.88%
On the x/benchmarks suite, this change improves the performance of
garbage by ~6% (for GOMAXPROCS=1 and 4), and json by 28% and 36% for
GOMAXPROCS=1 and 4. It has negligible effect on heap size.
This has no effect on the go1 benchmark suite since those benchmarks
are mostly single-threaded.
Change-Id: I858a08eaa78f702ea98a5fac99d28a4ac91d339f
Reviewed-on: https://go-review.googlesource.com/9289
Reviewed-by: Rick Hudson <rlh@golang.org>
Reviewed-by: Russ Cox <rsc@golang.org>
2015-04-22 14:42:26 -04:00
}
2014-12-22 22:43:49 -05:00
2016-10-19 16:00:07 -04:00
// setGNoWB performs *gp = new without a write barrier.
// For times when it's impractical to use a guintptr.
//go:nosplit
//go:nowritebarrier
func setGNoWB ( gp * * g , new * g ) {
( * guintptr ) ( unsafe . Pointer ( gp ) ) . set ( new )
}
2015-04-17 00:21:30 -04:00
type puintptr uintptr
runtime: Remove write barriers during STW.
The GC assumes that there will be no asynchronous write barriers when
the world is stopped. This keeps the synchronization between write
barriers and the GC simple. However, currently, there are a few places
in runtime code where this assumption does not hold.
The GC stops the world by collecting all Ps, which stops all user Go
code, but small parts of the runtime can run without a P. For example,
the code that releases a P must still deschedule its G onto a runnable
queue before stopping. Similarly, when a G returns from a long-running
syscall, it must run code to reacquire a P.
Currently, this code can contain write barriers. This can lead to the
GC collecting reachable objects if something like the following
sequence of events happens:
1. GC stops the world by collecting all Ps.
2. G #1 returns from a syscall (for example), tries to install a
pointer to object X, and calls greyobject on X.
3. greyobject on G #1 marks X, but does not yet add it to a write
buffer. At this point, X is effectively black, not grey, even though
it may point to white objects.
4. GC reaches X through some other path and calls greyobject on X, but
greyobject does nothing because X is already marked.
5. GC completes.
6. greyobject on G #1 adds X to a work buffer, but it's too late.
7. Objects that were reachable only through X are incorrectly collected.
To fix this, we check the invariant that no asynchronous write
barriers happen when the world is stopped by checking that write
barriers always have a P, and modify all currently known sources of
these writes to disable the write barrier. In all modified cases this
is safe because the object in question will always be reachable via
some other path.
Some of the trace code was turned off, in particular the
code that traces returning from a syscall. The GC assumes
that as far as the heap is concerned the thread is stopped
when it is in a syscall. Upon returning the trace code
must not do any heap writes for the same reasons discussed
above.
Fixes #10098
Fixes #9953
Fixes #9951
Fixes #9884
May relate to #9610 #9771
Change-Id: Ic2e70b7caffa053e56156838eb8d89503e3c0c8a
Reviewed-on: https://go-review.googlesource.com/7504
Reviewed-by: Austin Clements <austin@google.com>
2015-03-12 14:19:21 -04:00
2015-11-24 09:15:36 +13:00
//go:nosplit
func ( pp puintptr ) ptr ( ) * p { return ( * p ) ( unsafe . Pointer ( pp ) ) }
//go:nosplit
2015-04-17 00:21:30 -04:00
func ( pp * puintptr ) set ( p * p ) { * pp = puintptr ( unsafe . Pointer ( p ) ) }
runtime: Remove write barriers during STW.
The GC assumes that there will be no asynchronous write barriers when
the world is stopped. This keeps the synchronization between write
barriers and the GC simple. However, currently, there are a few places
in runtime code where this assumption does not hold.
The GC stops the world by collecting all Ps, which stops all user Go
code, but small parts of the runtime can run without a P. For example,
the code that releases a P must still deschedule its G onto a runnable
queue before stopping. Similarly, when a G returns from a long-running
syscall, it must run code to reacquire a P.
Currently, this code can contain write barriers. This can lead to the
GC collecting reachable objects if something like the following
sequence of events happens:
1. GC stops the world by collecting all Ps.
2. G #1 returns from a syscall (for example), tries to install a
pointer to object X, and calls greyobject on X.
3. greyobject on G #1 marks X, but does not yet add it to a write
buffer. At this point, X is effectively black, not grey, even though
it may point to white objects.
4. GC reaches X through some other path and calls greyobject on X, but
greyobject does nothing because X is already marked.
5. GC completes.
6. greyobject on G #1 adds X to a work buffer, but it's too late.
7. Objects that were reachable only through X are incorrectly collected.
To fix this, we check the invariant that no asynchronous write
barriers happen when the world is stopped by checking that write
barriers always have a P, and modify all currently known sources of
these writes to disable the write barrier. In all modified cases this
is safe because the object in question will always be reachable via
some other path.
Some of the trace code was turned off, in particular the
code that traces returning from a syscall. The GC assumes
that as far as the heap is concerned the thread is stopped
when it is in a syscall. Upon returning the trace code
must not do any heap writes for the same reasons discussed
above.
Fixes #10098
Fixes #9953
Fixes #9951
Fixes #9884
May relate to #9610 #9771
Change-Id: Ic2e70b7caffa053e56156838eb8d89503e3c0c8a
Reviewed-on: https://go-review.googlesource.com/7504
Reviewed-by: Austin Clements <austin@google.com>
2015-03-12 14:19:21 -04:00
2015-04-17 00:21:30 -04:00
type muintptr uintptr
runtime: Remove write barriers during STW.
The GC assumes that there will be no asynchronous write barriers when
the world is stopped. This keeps the synchronization between write
barriers and the GC simple. However, currently, there are a few places
in runtime code where this assumption does not hold.
The GC stops the world by collecting all Ps, which stops all user Go
code, but small parts of the runtime can run without a P. For example,
the code that releases a P must still deschedule its G onto a runnable
queue before stopping. Similarly, when a G returns from a long-running
syscall, it must run code to reacquire a P.
Currently, this code can contain write barriers. This can lead to the
GC collecting reachable objects if something like the following
sequence of events happens:
1. GC stops the world by collecting all Ps.
2. G #1 returns from a syscall (for example), tries to install a
pointer to object X, and calls greyobject on X.
3. greyobject on G #1 marks X, but does not yet add it to a write
buffer. At this point, X is effectively black, not grey, even though
it may point to white objects.
4. GC reaches X through some other path and calls greyobject on X, but
greyobject does nothing because X is already marked.
5. GC completes.
6. greyobject on G #1 adds X to a work buffer, but it's too late.
7. Objects that were reachable only through X are incorrectly collected.
To fix this, we check the invariant that no asynchronous write
barriers happen when the world is stopped by checking that write
barriers always have a P, and modify all currently known sources of
these writes to disable the write barrier. In all modified cases this
is safe because the object in question will always be reachable via
some other path.
Some of the trace code was turned off, in particular the
code that traces returning from a syscall. The GC assumes
that as far as the heap is concerned the thread is stopped
when it is in a syscall. Upon returning the trace code
must not do any heap writes for the same reasons discussed
above.
Fixes #10098
Fixes #9953
Fixes #9951
Fixes #9884
May relate to #9610 #9771
Change-Id: Ic2e70b7caffa053e56156838eb8d89503e3c0c8a
Reviewed-on: https://go-review.googlesource.com/7504
Reviewed-by: Austin Clements <austin@google.com>
2015-03-12 14:19:21 -04:00
2015-11-24 09:15:36 +13:00
//go:nosplit
func ( mp muintptr ) ptr ( ) * m { return ( * m ) ( unsafe . Pointer ( mp ) ) }
//go:nosplit
2015-04-17 00:21:30 -04:00
func ( mp * muintptr ) set ( m * m ) { * mp = muintptr ( unsafe . Pointer ( m ) ) }
runtime: Remove write barriers during STW.
The GC assumes that there will be no asynchronous write barriers when
the world is stopped. This keeps the synchronization between write
barriers and the GC simple. However, currently, there are a few places
in runtime code where this assumption does not hold.
The GC stops the world by collecting all Ps, which stops all user Go
code, but small parts of the runtime can run without a P. For example,
the code that releases a P must still deschedule its G onto a runnable
queue before stopping. Similarly, when a G returns from a long-running
syscall, it must run code to reacquire a P.
Currently, this code can contain write barriers. This can lead to the
GC collecting reachable objects if something like the following
sequence of events happens:
1. GC stops the world by collecting all Ps.
2. G #1 returns from a syscall (for example), tries to install a
pointer to object X, and calls greyobject on X.
3. greyobject on G #1 marks X, but does not yet add it to a write
buffer. At this point, X is effectively black, not grey, even though
it may point to white objects.
4. GC reaches X through some other path and calls greyobject on X, but
greyobject does nothing because X is already marked.
5. GC completes.
6. greyobject on G #1 adds X to a work buffer, but it's too late.
7. Objects that were reachable only through X are incorrectly collected.
To fix this, we check the invariant that no asynchronous write
barriers happen when the world is stopped by checking that write
barriers always have a P, and modify all currently known sources of
these writes to disable the write barrier. In all modified cases this
is safe because the object in question will always be reachable via
some other path.
Some of the trace code was turned off, in particular the
code that traces returning from a syscall. The GC assumes
that as far as the heap is concerned the thread is stopped
when it is in a syscall. Upon returning the trace code
must not do any heap writes for the same reasons discussed
above.
Fixes #10098
Fixes #9953
Fixes #9951
Fixes #9884
May relate to #9610 #9771
Change-Id: Ic2e70b7caffa053e56156838eb8d89503e3c0c8a
Reviewed-on: https://go-review.googlesource.com/7504
Reviewed-by: Austin Clements <austin@google.com>
2015-03-12 14:19:21 -04:00
2016-10-19 16:00:07 -04:00
// setMNoWB performs *mp = new without a write barrier.
// For times when it's impractical to use an muintptr.
//go:nosplit
//go:nowritebarrier
func setMNoWB ( mp * * m , new * m ) {
( * muintptr ) ( unsafe . Pointer ( mp ) ) . set ( new )
}
2014-11-11 17:05:19 -05:00
type gobuf struct {
// The offsets of sp, pc, and g are known to (hard-coded in) libmach.
2016-10-19 15:49:31 -04:00
//
// ctxt is unusual with respect to GC: it may be a
// heap-allocated funcval so write require a write barrier,
// but gobuf needs to be cleared from assembly. We take
// advantage of the fact that the only path that uses a
// non-nil ctxt is morestack. As a result, gogo is the only
// place where it may not already be nil, so gogo uses an
// explicit write barrier. Everywhere else that resets the
// gobuf asserts that ctxt is already nil.
2014-11-11 17:05:19 -05:00
sp uintptr
pc uintptr
2014-12-22 22:43:49 -05:00
g guintptr
2014-11-11 17:05:19 -05:00
ctxt unsafe . Pointer // this has to be a pointer so that gc scans it
2015-11-11 12:39:30 -05:00
ret sys . Uintreg
2014-11-11 17:05:19 -05:00
lr uintptr
2015-01-14 11:09:50 -05:00
bp uintptr // for GOEXPERIMENT=framepointer
2014-11-11 17:05:19 -05:00
}
2016-03-17 14:08:32 -04:00
// sudog represents a g in a wait list, such as for sending/receiving
// on a channel.
//
// sudog is necessary because the g ↔ synchronization object relation
// is many-to-many. A g can be on many wait lists, so there may be
// many sudogs for one g; and many gs may be waiting on the same
// synchronization object, so there may be many sudogs for one object.
//
// sudogs are allocated from a special pool. Use acquireSudog and
// releaseSudog to allocate and free them.
2014-11-11 17:05:19 -05:00
type sudog struct {
2016-02-18 09:34:43 -05:00
// The following fields are protected by the hchan.lock of the
2016-02-15 17:38:06 -05:00
// channel this sudog is blocking on. shrinkstack depends on
// this.
2016-02-18 09:34:43 -05:00
g * g
selectdone * uint32 // CAS to 1 to win select race (may point to stack)
next * sudog
prev * sudog
elem unsafe . Pointer // data element (may point to stack)
// The following fields are never accessed concurrently.
// waitlink is only accessed by g.
2016-09-22 09:48:30 -04:00
acquiretime int64
2014-11-11 17:05:19 -05:00
releasetime int64
2016-01-24 19:23:48 +01:00
ticket uint32
2014-11-11 17:05:19 -05:00
waitlink * sudog // g.waiting list
2016-02-15 17:37:04 -05:00
c * hchan // channel
2014-11-11 17:05:19 -05:00
}
type gcstats struct {
// the struct must consist of only uint64's,
// because it is casted to uint64[].
nhandoff uint64
nhandoffcnt uint64
nprocyield uint64
nosyield uint64
nsleep uint64
}
type libcall struct {
fn uintptr
n uintptr // number of parameters
args uintptr // parameters
r1 uintptr // return values
r2 uintptr
err uintptr // error number
}
// describes how to handle callback
type wincallbackcontext struct {
gobody unsafe . Pointer // go function to call
argsize uintptr // callback arguments size (in bytes)
restorestack uintptr // adjust stack on return by (in bytes) (386 only)
cleanstack bool
}
// Stack describes a Go execution stack.
// The bounds of the stack are exactly [lo, hi),
// with no implicit data structures on either side.
type stack struct {
lo uintptr
hi uintptr
}
2015-05-20 16:16:04 -04:00
// stkbar records the state of a G's stack barrier.
type stkbar struct {
savedLRPtr uintptr // location overwritten by stack barrier PC
savedLRVal uintptr // value overwritten at savedLRPtr
}
2014-11-11 17:05:19 -05:00
type g struct {
// Stack parameters.
// stack describes the actual stack memory: [stack.lo, stack.hi).
2015-01-05 16:29:21 +00:00
// stackguard0 is the stack pointer compared in the Go stack growth prologue.
2014-11-11 17:05:19 -05:00
// It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
2015-01-05 16:29:21 +00:00
// stackguard1 is the stack pointer compared in the C stack growth prologue.
// It is stack.lo+StackGuard on g0 and gsignal stacks.
// It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
stack stack // offset known to runtime/cgo
stackguard0 uintptr // offset known to liblink
stackguard1 uintptr // offset known to liblink
2014-11-11 17:05:19 -05:00
2015-04-20 15:57:52 +03:00
_panic * _panic // innermost panic - offset known to liblink
_defer * _defer // innermost defer
2015-07-30 10:45:01 -04:00
m * m // current m; offset known to arm liblink
2015-04-20 15:57:52 +03:00
stackAlloc uintptr // stack allocation is [stack.lo,stack.lo+stackAlloc)
sched gobuf
syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc
syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc
2015-12-17 16:31:56 -08:00
stkbar [ ] stkbar // stack barriers, from low to high (see top of mstkbar.go)
2015-04-20 15:57:52 +03:00
stkbarPos uintptr // index of lowest stack barrier not hit
2015-08-26 11:39:10 -04:00
stktopsp uintptr // expected sp at top of stack, to check in traceback
2015-04-20 15:57:52 +03:00
param unsafe . Pointer // passed parameter on wakeup
atomicstatus uint32
2015-07-28 14:33:39 -04:00
stackLock uint32 // sigprof/scang lock; TODO: fold in to atomicstatus
2015-04-20 15:57:52 +03:00
goid int64
waitsince int64 // approx time when the g become blocked
waitreason string // if status==Gwaiting
schedlink guintptr
2016-04-05 15:29:14 +02:00
preempt bool // preemption signal, duplicates stackguard0 = stackpreempt
paniconfault bool // panic (instead of crash) on unexpected fault address
preemptscan bool // preempted g does scan for gc
gcscandone bool // g has scanned stack; protected by _Gscan bit in status
2016-03-04 11:58:26 -05:00
gcscanvalid bool // false at start of gc cycle, true if G has not run since last scan; transition from true to false by calling queueRescan and false to true by calling dequeueRescan
2016-04-05 15:29:14 +02:00
throwsplit bool // must not split stack
raceignore int8 // ignore race detection events
sysblocktraced bool // StartTrace has emitted EvGoInSyscall about this goroutine
sysexitticks int64 // cputicks when syscall has returned (for tracing)
traceseq uint64 // trace event sequencer
tracelastp puintptr // last P emitted an event for this goroutine
2015-04-20 15:57:52 +03:00
lockedm * m
sig uint32
writebuf [ ] byte
sigcode0 uintptr
sigcode1 uintptr
sigpc uintptr
gopc uintptr // pc of go statement that created this goroutine
startpc uintptr // pc of goroutine function
racectx uintptr
2016-04-27 14:18:29 -07:00
waiting * sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order
cgoCtxt [ ] uintptr // cgo traceback context
2015-03-16 14:22:00 -04:00
2016-03-04 11:58:26 -05:00
// Per-G GC state
// gcRescan is this G's index in work.rescan.list. If this is
// -1, this G is not on the rescan list.
//
// If gcphase != _GCoff and this G is visible to the garbage
// collector, writes to this are protected by work.rescan.lock.
gcRescan int32
runtime: directly track GC assist balance
Currently we track the per-G GC assist balance as two monotonically
increasing values: the bytes allocated by the G this cycle (gcalloc)
and the scan work performed by the G this cycle (gcscanwork). The
assist balance is hence assistRatio*gcalloc - gcscanwork.
This works, but has two important downsides:
1) It requires floating-point math to figure out if a G is in debt or
not. This makes it inappropriate to check for assist debt in the
hot path of mallocgc, so we only do this when a G allocates a new
span. As a result, Gs can operate "in the red", leading to
under-assist and extended GC cycle length.
2) Revising the assist ratio during a GC cycle can lead to an "assist
burst". If you think of plotting the scan work performed versus
heaps size, the assist ratio controls the slope of this line.
However, in the current system, the target line always passes
through 0 at the heap size that triggered GC, so if the runtime
increases the assist ratio, there has to be a potentially large
assist to jump from the current amount of scan work up to the new
target scan work for the current heap size.
This commit replaces this approach with directly tracking the GC
assist balance in terms of allocation credit bytes. Allocating N bytes
simply decreases this by N and assisting raises it by the amount of
scan work performed divided by the assist ratio (to get back to
bytes).
This will make it cheap to figure out if a G is in debt, which will
let us efficiently check if an assist is necessary *before* performing
an allocation and hence keep Gs "in the black".
This also fixes assist bursts because the assist ratio is now in terms
of *remaining* work, rather than work from the beginning of the GC
cycle. Hence, the plot of scan work versus heap size becomes
continuous: we can revise the slope, but this slope always starts from
where we are right now, rather than where we were at the beginning of
the cycle.
Change-Id: Ia821c5f07f8a433e8da7f195b52adfedd58bdf2c
Reviewed-on: https://go-review.googlesource.com/15408
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-10-04 20:16:57 -07:00
// gcAssistBytes is this G's GC assist credit in terms of
// bytes allocated. If this is positive, then the G has credit
// to allocate gcAssistBytes bytes without assisting. If this
// is negative, then the G must correct this by performing
// scan work. We track this in bytes to make it fast to update
// and check for debt in the malloc hot path. The assist ratio
// determines how this corresponds to scan work debt.
gcAssistBytes int64
2014-11-11 17:05:19 -05:00
}
type m struct {
2015-07-30 10:45:01 -04:00
g0 * g // goroutine with scheduling stack
morebuf gobuf // gobuf arg to morestack
divmod uint32 // div/mod denominator for arm - known to liblink
2014-11-11 17:05:19 -05:00
// Fields not known to debuggers.
runtime: Remove write barriers during STW.
The GC assumes that there will be no asynchronous write barriers when
the world is stopped. This keeps the synchronization between write
barriers and the GC simple. However, currently, there are a few places
in runtime code where this assumption does not hold.
The GC stops the world by collecting all Ps, which stops all user Go
code, but small parts of the runtime can run without a P. For example,
the code that releases a P must still deschedule its G onto a runnable
queue before stopping. Similarly, when a G returns from a long-running
syscall, it must run code to reacquire a P.
Currently, this code can contain write barriers. This can lead to the
GC collecting reachable objects if something like the following
sequence of events happens:
1. GC stops the world by collecting all Ps.
2. G #1 returns from a syscall (for example), tries to install a
pointer to object X, and calls greyobject on X.
3. greyobject on G #1 marks X, but does not yet add it to a write
buffer. At this point, X is effectively black, not grey, even though
it may point to white objects.
4. GC reaches X through some other path and calls greyobject on X, but
greyobject does nothing because X is already marked.
5. GC completes.
6. greyobject on G #1 adds X to a work buffer, but it's too late.
7. Objects that were reachable only through X are incorrectly collected.
To fix this, we check the invariant that no asynchronous write
barriers happen when the world is stopped by checking that write
barriers always have a P, and modify all currently known sources of
these writes to disable the write barrier. In all modified cases this
is safe because the object in question will always be reachable via
some other path.
Some of the trace code was turned off, in particular the
code that traces returning from a syscall. The GC assumes
that as far as the heap is concerned the thread is stopped
when it is in a syscall. Upon returning the trace code
must not do any heap writes for the same reasons discussed
above.
Fixes #10098
Fixes #9953
Fixes #9951
Fixes #9884
May relate to #9610 #9771
Change-Id: Ic2e70b7caffa053e56156838eb8d89503e3c0c8a
Reviewed-on: https://go-review.googlesource.com/7504
Reviewed-by: Austin Clements <austin@google.com>
2015-03-12 14:19:21 -04:00
procid uint64 // for debuggers, but offset not hard-coded
gsignal * g // signal-handling g
runtime: use a proper type, sigset, for m.sigmask
Replace the cross platform but unsafe [4]uintptr type with a OS
specific type, sigset. Most OSes already define sigset, and this
change defines a suitable sigset for the OSes that don't (darwin,
openbsd). The OSes that don't use m.sigmask (windows, plan9, nacl)
now defines sigset as the empty type, struct{}.
The gain is strongly typed access to m.sigmask, saving a dynamic
size sanity check and unsafe.Pointer casting. Also, some storage is
saved for each M, since [4]uinptr was conservative for most OSes.
The cost is that OSes that don't need m.sigmask has to define sigset.
completes ./all.bash with GOOS linux, on amd64
completes ./make.bash with GOOSes openbsd, android, plan9, windows,
darwin, solaris, netbsd, freebsd, dragonfly, all amd64.
With GOOS=nacl ./make.bash failed with a seemingly unrelated error.
[Replay of CL 16942 by Elias Naur.]
Change-Id: I98f144d626033ae5318576115ed635415ac71b2c
Reviewed-on: https://go-review.googlesource.com/17033
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
2015-11-17 11:41:06 +01:00
sigmask sigset // storage for saved signal mask
2015-11-12 12:55:57 +13:00
tls [ 6 ] uintptr // thread-local storage (for x86 extern register)
2015-04-17 00:21:30 -04:00
mstartfn func ( )
curg * g // current running goroutine
caughtsig guintptr // goroutine running during fatal signal
p puintptr // attached p for executing go code (nil if not executing go code)
nextp puintptr
2014-11-11 17:05:19 -05:00
id int32
mallocing int32
throwing int32
2015-01-30 15:30:41 -05:00
preemptoff string // if != "", keep curg running on this m
2014-11-11 17:05:19 -05:00
locks int32
softfloat int32
dying int32
profilehz int32
helpgc int32
spinning bool // m is out of work and is actively looking for work
blocked bool // m is blocked on a note
2014-11-15 08:00:38 -05:00
inwb bool // m is executing a write barrier
2015-12-23 18:38:18 -08:00
newSigstack bool // minit on C thread called sigaltstack
2014-11-15 08:00:38 -05:00
printlock int8
2017-01-29 15:34:50 +01:00
incgo bool // m is executing a cgo call
2014-11-11 17:05:19 -05:00
fastrand uint32
2015-12-11 17:16:48 -08:00
ncgocall uint64 // number of cgo calls in total
ncgo int32 // number of cgo calls currently in progress
cgoCallersUse uint32 // if non-zero, cgoCallers in use temporarily
cgoCallers * cgoCallers // cgo traceback if crashing in cgo call
2014-11-11 17:05:19 -05:00
park note
alllink * m // on allm
2015-04-17 00:21:30 -04:00
schedlink muintptr
2014-11-11 17:05:19 -05:00
mcache * mcache
lockedg * g
createstack [ 32 ] uintptr // stack that created this thread.
freglo [ 16 ] uint32 // d[i] lsb and f[i]
freghi [ 16 ] uint32 // d[i] msb and f[i+16]
fflag uint32 // floating point compare flags
locked uint32 // tracking for lockosthread
2015-03-17 15:07:05 -04:00
nextwaitm uintptr // next m waiting for lock
2014-11-11 17:05:19 -05:00
gcstats gcstats
needextram bool
traceback uint8
waitunlockf unsafe . Pointer // todo go func(*g, unsafe.pointer) bool
waitlock unsafe . Pointer
2014-12-12 18:41:57 +01:00
waittraceev byte
2015-02-21 21:01:40 +03:00
waittraceskip int
2015-04-20 15:57:52 +03:00
startingtrace bool
2014-12-12 18:41:57 +01:00
syscalltick uint32
2016-04-13 18:16:21 +09:00
thread uintptr // thread handle
2014-11-11 17:05:19 -05:00
// these are here because they are too large to be on the stack
// of low-level NOSPLIT functions.
libcall libcall
libcallpc uintptr // for cpu profiler
libcallsp uintptr
2015-04-17 00:21:30 -04:00
libcallg guintptr
2015-05-22 10:58:57 +10:00
syscall libcall // stores syscall parameters on windows
2016-04-13 18:16:21 +09:00
2015-10-21 12:48:53 -07:00
mOS
2014-11-11 17:05:19 -05:00
}
type p struct {
lock mutex
id int32
status uint32 // one of pidle/prunning/...
2015-04-17 00:21:30 -04:00
link puintptr
schedtick uint32 // incremented on every scheduler call
syscalltick uint32 // incremented on every system call
m muintptr // back-link to associated m (nil if idle)
2014-11-11 17:05:19 -05:00
mcache * mcache
2016-02-26 21:57:16 +01:00
racectx uintptr
2015-02-05 13:35:41 +00:00
2015-03-11 12:58:47 -07:00
deferpool [ 5 ] [ ] * _defer // pool of available defer structs of different sizes (see panic.go)
2015-02-05 13:35:41 +00:00
deferpoolbuf [ 5 ] [ 32 ] * _defer
2014-11-11 17:05:19 -05:00
// Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
goidcache uint64
goidcacheend uint64
runtime: yield time slice to most recently readied G
Currently, when the runtime ready()s a G, it adds it to the end of the
current P's run queue and continues running. If there are many other
things in the run queue, this can result in a significant delay before
the ready()d G actually runs and can hurt fairness when other Gs in
the run queue are CPU hogs. For example, if there are three Gs sharing
a P, one of which is a CPU hog that never voluntarily gives up the P
and the other two of which are doing small amounts of work and
communicating back and forth on an unbuffered channel, the two
communicating Gs will get very little CPU time.
Change this so that when G1 ready()s G2 and then blocks, the scheduler
immediately hands off the remainder of G1's time slice to G2. In the
above example, the two communicating Gs will now act as a unit and
together get half of the CPU time, while the CPU hog gets the other
half of the CPU time.
This fixes the problem demonstrated by the ping-pong benchmark added
in the previous commit:
benchmark old ns/op new ns/op delta
BenchmarkPingPongHog 684287 825 -99.88%
On the x/benchmarks suite, this change improves the performance of
garbage by ~6% (for GOMAXPROCS=1 and 4), and json by 28% and 36% for
GOMAXPROCS=1 and 4. It has negligible effect on heap size.
This has no effect on the go1 benchmark suite since those benchmarks
are mostly single-threaded.
Change-Id: I858a08eaa78f702ea98a5fac99d28a4ac91d339f
Reviewed-on: https://go-review.googlesource.com/9289
Reviewed-by: Rick Hudson <rlh@golang.org>
Reviewed-by: Russ Cox <rsc@golang.org>
2015-04-22 14:42:26 -04:00
// Queue of runnable goroutines. Accessed without lock.
2015-05-13 17:08:16 -04:00
runqhead uint32
runqtail uint32
2015-11-02 16:59:39 -05:00
runq [ 256 ] guintptr
runtime: yield time slice to most recently readied G
Currently, when the runtime ready()s a G, it adds it to the end of the
current P's run queue and continues running. If there are many other
things in the run queue, this can result in a significant delay before
the ready()d G actually runs and can hurt fairness when other Gs in
the run queue are CPU hogs. For example, if there are three Gs sharing
a P, one of which is a CPU hog that never voluntarily gives up the P
and the other two of which are doing small amounts of work and
communicating back and forth on an unbuffered channel, the two
communicating Gs will get very little CPU time.
Change this so that when G1 ready()s G2 and then blocks, the scheduler
immediately hands off the remainder of G1's time slice to G2. In the
above example, the two communicating Gs will now act as a unit and
together get half of the CPU time, while the CPU hog gets the other
half of the CPU time.
This fixes the problem demonstrated by the ping-pong benchmark added
in the previous commit:
benchmark old ns/op new ns/op delta
BenchmarkPingPongHog 684287 825 -99.88%
On the x/benchmarks suite, this change improves the performance of
garbage by ~6% (for GOMAXPROCS=1 and 4), and json by 28% and 36% for
GOMAXPROCS=1 and 4. It has negligible effect on heap size.
This has no effect on the go1 benchmark suite since those benchmarks
are mostly single-threaded.
Change-Id: I858a08eaa78f702ea98a5fac99d28a4ac91d339f
Reviewed-on: https://go-review.googlesource.com/9289
Reviewed-by: Rick Hudson <rlh@golang.org>
Reviewed-by: Russ Cox <rsc@golang.org>
2015-04-22 14:42:26 -04:00
// runnext, if non-nil, is a runnable G that was ready'd by
// the current G and should be run next instead of what's in
// runq if there's time remaining in the running G's time
// slice. It will inherit the time left in the current time
// slice. If a set of goroutines is locked in a
// communicate-and-wait pattern, this schedules that set as a
// unit and eliminates the (potentially large) scheduling
// latency that otherwise arises from adding the ready'd
// goroutines to the end of the run queue.
runnext guintptr
2014-11-11 17:05:19 -05:00
// Available G's (status == Gdead)
gfree * g
gfreecnt int32
2015-02-03 00:33:02 +03:00
sudogcache [ ] * sudog
sudogbuf [ 128 ] * sudog
2015-11-10 14:37:52 -05:00
tracebuf traceBufPtr
2014-12-12 18:11:27 +01:00
2015-03-08 20:56:15 -04:00
palloc persistentAlloc // per-P to avoid mutex
2015-03-17 12:17:47 -04:00
// Per-P GC state
2015-04-15 17:01:30 -04:00
gcAssistTime int64 // Nanoseconds in assistAlloc
2016-01-26 14:44:58 -05:00
gcBgMarkWorker guintptr
2015-04-15 17:01:30 -04:00
gcMarkWorkerMode gcMarkWorkerMode
2015-03-17 12:17:47 -04:00
runtime: replace per-M workbuf cache with per-P gcWork cache
Currently, each M has a cache of the most recently used *workbuf. This
is used primarily by the write barrier so it doesn't have to access
the global workbuf lists on every write barrier. It's also used by
stack scanning because it's convenient.
This cache is important for write barrier performance, but this
particular approach has several downsides. It's faster than no cache,
but far from optimal (as the benchmarks below show). It's complex:
access to the cache is sprinkled through most of the workbuf list
operations and it requires special care to transform into and back out
of the gcWork cache that's actually used for scanning and marking. It
requires atomic exchanges to take ownership of the cached workbuf and
to return it to the M's cache even though it's almost always used by
only the current M. Since it's per-M, flushing these caches is O(# of
Ms), which may be high. And it has some significant subtleties: for
example, in general the cache shouldn't be used after the
harvestwbufs() in mark termination because it could hide work from
mark termination, but stack scanning can happen after this and *will*
use the cache (but it turns out this is okay because it will always be
followed by a getfull(), which drains the cache).
This change replaces this cache with a per-P gcWork object. This
gcWork cache can be used directly by scanning and marking (as long as
preemption is disabled, which is a general requirement of gcWork).
Since it's per-P, it doesn't require synchronization, which simplifies
things and means the only atomic operations in the write barrier are
occasionally fetching new work buffers and setting a mark bit if the
object isn't already marked. This cache can be flushed in O(# of Ps),
which is generally small. It follows a simple flushing rule: the cache
can be used during any phase, but during mark termination it must be
flushed before allowing preemption. This also makes the dispose during
mutator assist no longer necessary, which eliminates the vast majority
of gcWork dispose calls and reduces contention on the global workbuf
lists. And it's a lot faster on some benchmarks:
benchmark old ns/op new ns/op delta
BenchmarkBinaryTree17 11963668673 11206112763 -6.33%
BenchmarkFannkuch11 2643217136 2649182499 +0.23%
BenchmarkFmtFprintfEmpty 70.4 70.2 -0.28%
BenchmarkFmtFprintfString 364 307 -15.66%
BenchmarkFmtFprintfInt 317 282 -11.04%
BenchmarkFmtFprintfIntInt 512 483 -5.66%
BenchmarkFmtFprintfPrefixedInt 404 380 -5.94%
BenchmarkFmtFprintfFloat 521 479 -8.06%
BenchmarkFmtManyArgs 2164 1894 -12.48%
BenchmarkGobDecode 30366146 22429593 -26.14%
BenchmarkGobEncode 29867472 26663152 -10.73%
BenchmarkGzip 391236616 396779490 +1.42%
BenchmarkGunzip 96639491 96297024 -0.35%
BenchmarkHTTPClientServer 100110 70763 -29.31%
BenchmarkJSONEncode 51866051 52511382 +1.24%
BenchmarkJSONDecode 103813138 86094963 -17.07%
BenchmarkMandelbrot200 4121834 4120886 -0.02%
BenchmarkGoParse 16472789 5879949 -64.31%
BenchmarkRegexpMatchEasy0_32 140 140 +0.00%
BenchmarkRegexpMatchEasy0_1K 394 394 +0.00%
BenchmarkRegexpMatchEasy1_32 120 120 +0.00%
BenchmarkRegexpMatchEasy1_1K 621 614 -1.13%
BenchmarkRegexpMatchMedium_32 209 202 -3.35%
BenchmarkRegexpMatchMedium_1K 54889 55175 +0.52%
BenchmarkRegexpMatchHard_32 2682 2675 -0.26%
BenchmarkRegexpMatchHard_1K 79383 79524 +0.18%
BenchmarkRevcomp 584116718 584595320 +0.08%
BenchmarkTemplate 125400565 109620196 -12.58%
BenchmarkTimeParse 386 387 +0.26%
BenchmarkTimeFormat 580 447 -22.93%
(Best out of 10 runs. The delta of averages is similar.)
This also puts us in a good position to flush these caches when
nearing the end of concurrent marking, which will let us increase the
size of the work buffers while still controlling mark termination
pause time.
Change-Id: I2dd94c8517a19297a98ec280203cccaa58792522
Reviewed-on: https://go-review.googlesource.com/9178
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Russ Cox <rsc@golang.org>
2015-04-19 15:22:20 -04:00
// gcw is this P's GC work buffer cache. The work buffer is
// filled by write barriers, drained by mutator assists, and
// disposed on certain GC state transitions.
gcw gcWork
2015-03-27 16:49:12 -04:00
runSafePointFn uint32 // if 1, run sched.safePointFn at next safe point
2016-07-29 14:02:26 -03:00
pad [ sys . CacheLineSize ] byte
2014-11-11 17:05:19 -05:00
}
const (
// The max value of GOMAXPROCS.
// There are no fundamental restrictions on the value.
_MaxGomaxprocs = 1 << 8
)
type schedt struct {
2016-01-06 21:16:01 -05:00
// accessed atomically. keep at top to ensure alignment on 32-bit systems.
goidgen uint64
lastpoll uint64
2014-11-11 17:05:19 -05:00
2016-01-06 21:16:01 -05:00
lock mutex
2014-11-11 17:05:19 -05:00
2015-04-17 00:21:30 -04:00
midle muintptr // idle m's waiting for work
nmidle int32 // number of idle m's waiting for work
nmidlelocked int32 // number of locked m's waiting for work
mcount int32 // number of m's that have been created
maxmcount int32 // maximum number of m's allowed (or die)
2014-11-11 17:05:19 -05:00
2016-01-06 21:16:01 -05:00
ngsys uint32 // number of system goroutines; updated atomically
2015-04-17 00:21:30 -04:00
pidle puintptr // idle p's
2014-11-11 17:05:19 -05:00
npidle uint32
2015-12-08 15:11:27 +01:00
nmspinning uint32 // See "Worker thread parking/unparking" comment in proc.go.
2014-11-11 17:05:19 -05:00
// Global runnable queue.
2015-04-17 00:21:30 -04:00
runqhead guintptr
runqtail guintptr
2014-11-11 17:05:19 -05:00
runqsize int32
// Global cache of dead G's.
2016-03-11 16:27:51 -05:00
gflock mutex
gfreeStack * g
gfreeNoStack * g
ngfree int32
2014-11-11 17:05:19 -05:00
2015-02-03 00:33:02 +03:00
// Central cache of sudog structs.
sudoglock mutex
sudogcache * sudog
2015-02-05 13:35:41 +00:00
// Central pool of available defer structs of different sizes.
deferlock mutex
deferpool [ 5 ] * _defer
2014-11-11 17:05:19 -05:00
gcwaiting uint32 // gc is waiting to run
stopwait int32
stopnote note
sysmonwait uint32
sysmonnote note
2015-03-27 16:49:12 -04:00
// safepointFn should be called on each P at the next GC
// safepoint if p.runSafePointFn is set.
runtime: use separate count and note for forEachP
Currently, forEachP reuses the stopwait and stopnote fields from
stopTheWorld to track how many Ps have not responded to the safe-point
request and to sleep until all Ps have responded.
It was assumed this was safe because both stopTheWorld and forEachP
must occur under the worlsema and hence stopwait and stopnote cannot
be used for both purposes simultaneously and callers could always
determine the appropriate use based on sched.gcwaiting (which is only
set by stopTheWorld). However, this is not the case, since it's
possible for there to be a window between when an M observes that
gcwaiting is set and when it checks stopwait during which stopwait
could have changed meanings. When this happens, the M decrements
stopwait and may wakeup stopnote, but does not otherwise participate
in the forEachP protocol. As a result, stopwait is decremented too
many times, so it may reach zero before all Ps have run the safe-point
function, causing forEachP to wake up early. It will then either
observe that some P has not run the safe-point function and panic with
"P did not run fn", or the remaining P (or Ps) will run the safe-point
function before it wakes up and it will observe that stopwait is
negative and panic with "not stopped".
Fix this problem by giving forEachP its own safePointWait and
safePointNote fields.
One known sequence of events that can cause this race is as
follows. It involves three actors:
G1 is running on M1 on P1. P1 has an empty run queue.
G2/M2 is in a blocked syscall and has lost its P. (The details of this
don't matter, it just needs to be in a position where it needs to grab
an idle P.)
GC just started on G3/M3/P3. (These aren't very involved, they just
have to be separate from the other G's, M's, and P's.)
1. GC calls stopTheWorld(), which sets sched.gcwaiting to 1.
Now G1/M1 begins to enter a syscall:
2. G1/M1 invokes reentersyscall, which sets the P1's status to
_Psyscall.
3. G1/M1's reentersyscall observes gcwaiting != 0 and calls
entersyscall_gcwait.
4. G1/M1's entersyscall_gcwait blocks acquiring sched.lock.
Back on GC:
5. stopTheWorld cas's P1's status to _Pgcstop, does other stuff, and
returns.
6. GC does stuff and then calls startTheWorld().
7. startTheWorld() calls procresize(), which sets P1's status to
_Pidle and puts P1 on the idle list.
Now G2/M2 returns from its syscall and takes over P1:
8. G2/M2 returns from its blocked syscall and gets P1 from the idle
list.
9. G2/M2 acquires P1, which sets P1's status to _Prunning.
10. G2/M2 starts a new syscall and invokes reentersyscall, which sets
P1's status to _Psyscall.
Back on G1/M1:
11. G1/M1 finally acquires sched.lock in entersyscall_gcwait.
At this point, G1/M1 still thinks it's running on P1. P1's status is
_Psyscall, which is consistent with what G1/M1 is doing, but it's
_Psyscall because *G2/M2* put it in to _Psyscall, not G1/M1. This is
basically an ABA race on P1's status.
Because forEachP currently shares stopwait with stopTheWorld. G1/M1's
entersyscall_gcwait observes the non-zero stopwait set by forEachP,
but mistakes it for a stopTheWorld. It cas's P1's status from
_Psyscall (set by G2/M2) to _Pgcstop and proceeds to decrement
stopwait one more time than forEachP was expecting.
Fixes #10618. (See the issue for details on why the above race is safe
when forEachP is not involved.)
Prior to this commit, the command
stress ./runtime.test -test.run TestFutexsleep\|TestGoroutineProfile
would reliably fail after a few hundred runs. With this commit, it
ran for over 2 million runs and never crashed.
Change-Id: I9a91ea20035b34b6e5f07ef135b144115f281f30
Reviewed-on: https://go-review.googlesource.com/10157
Reviewed-by: Russ Cox <rsc@golang.org>
2015-05-15 16:31:17 -04:00
safePointFn func ( * p )
safePointWait int32
safePointNote note
2015-03-27 16:49:12 -04:00
2014-11-11 17:05:19 -05:00
profilehz int32 // cpu profiling rate
2015-04-01 13:47:35 -04:00
procresizetime int64 // nanotime() of last change to gomaxprocs
totaltime int64 // ∫gomaxprocs dt up to procresizetime
2014-11-11 17:05:19 -05:00
}
2016-04-13 18:16:21 +09:00
// The m.locked word holds two pieces of state counting active calls to LockOSThread/lockOSThread.
2014-11-11 17:05:19 -05:00
// The low bit (LockExternal) is a boolean reporting whether any LockOSThread call is active.
// External locks are not recursive; a second lock is silently ignored.
2016-04-13 18:16:21 +09:00
// The upper bits of m.locked record the nesting depth of calls to lockOSThread
2014-11-11 17:05:19 -05:00
// (counting up by LockInternal), popped by unlockOSThread (counting down by LockInternal).
// Internal locks can be recursive. For instance, a lock for cgo can occur while the main
// goroutine is holding the lock during the initialization phase.
const (
_LockExternal = 1
_LockInternal = 2
)
const (
runtime: don't always unblock all signals
Ian proposed an improved way of handling signals masks in Go, motivated
by a problem where the Android java runtime expects certain signals to
be blocked for all JVM threads. Discussion here
https://groups.google.com/forum/#!topic/golang-dev/_TSCkQHJt6g
Ian's text is used in the following:
A Go program always needs to have the synchronous signals enabled.
These are the signals for which _SigPanic is set in sigtable, namely
SIGSEGV, SIGBUS, SIGFPE.
A Go program that uses the os/signal package, and calls signal.Notify,
needs to have at least one thread which is not blocking that signal,
but it doesn't matter much which one.
Unix programs do not change signal mask across execve. They inherit
signal masks across fork. The shell uses this fact to some extent;
for example, the job control signals (SIGTTIN, SIGTTOU, SIGTSTP) are
blocked for commands run due to backquote quoting or $().
Our current position on signal masks was not thought out. We wandered
into step by step, e.g., http://golang.org/cl/7323067 .
This CL does the following:
Introduce a new platform hook, msigsave, that saves the signal mask of
the current thread to m.sigsave.
Call msigsave from needm and newm.
In minit grab set up the signal mask from m.sigsave and unblock the
essential synchronous signals, and SIGILL, SIGTRAP, SIGPROF, SIGSTKFLT
(for systems that have it).
In unminit, restore the signal mask from m.sigsave.
The first time that os/signal.Notify is called, start a new thread whose
only purpose is to update its signal mask to make sure signals for
signal.Notify are unblocked on at least one thread.
The effect on Go programs will be that if they are invoked with some
non-synchronous signals blocked, those signals will normally be
ignored. Previously, those signals would mostly be ignored. A change
in behaviour will occur for programs started with any of these signals
blocked, if they receive the signal: SIGHUP, SIGINT, SIGQUIT, SIGABRT,
SIGTERM. Previously those signals would always cause a crash (unless
using the os/signal package); with this change, they will be ignored
if the program is started with the signal blocked (and does not use
the os/signal package).
./all.bash completes successfully on linux/amd64.
OpenBSD is missing the implementation.
Change-Id: I188098ba7eb85eae4c14861269cc466f2aa40e8c
Reviewed-on: https://go-review.googlesource.com/10173
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2015-05-18 11:00:24 +02:00
_SigNotify = 1 << iota // let signal.Notify have signal, even if from kernel
_SigKill // if signal.Notify doesn't take it, exit quietly
_SigThrow // if signal.Notify doesn't take it, exit loudly
_SigPanic // if the signal is from the kernel, panic
_SigDefault // if the signal isn't explicitly requested, don't monitor it
_SigHandling // our signal handler is registered
_SigGoExit // cause all runtime procs to exit (only used on Plan 9).
_SigSetStack // add SA_ONSTACK to libc handler
_SigUnblock // unblocked in minit
2014-11-11 17:05:19 -05:00
)
// Layout of in-memory per-function information prepared by linker
2015-07-10 17:17:11 -06:00
// See https://golang.org/s/go12symtab.
2016-12-14 13:24:21 -05:00
// Keep in sync with linker (../cmd/link/internal/ld/pcln.go:/pclntab)
2014-11-11 17:05:19 -05:00
// and with package debug/gosym and with symtab.go in package runtime.
type _func struct {
entry uintptr // start pc
nameoff int32 // function name
2015-10-20 13:15:12 +11:00
args int32 // in/out args size
2015-12-11 02:49:16 -05:00
_ int32 // previously legacy frame size; kept for layout compatibility
2014-11-11 17:05:19 -05:00
pcsp int32
pcfile int32
pcln int32
npcdata int32
nfuncdata int32
}
// layout of Itab known to compilers
// allocated in non-garbage-collected memory
2016-03-17 06:18:13 -07:00
// Needs to be in sync with
// ../cmd/compile/internal/gc/reflect.go:/^func.dumptypestructs.
2014-11-11 17:05:19 -05:00
type itab struct {
inter * interfacetype
_type * _type
link * itab
bad int32
2016-12-12 13:31:56 +13:00
inhash int32 // has this itab been added to hash?
2015-01-06 20:38:44 -08:00
fun [ 1 ] uintptr // variable sized
2014-11-11 17:05:19 -05:00
}
// Lock-free stack node.
2014-11-15 08:00:38 -05:00
// // Also known to export_test.go.
2014-11-11 17:05:19 -05:00
type lfnode struct {
2014-11-15 08:00:38 -05:00
next uint64
2014-11-11 17:05:19 -05:00
pushcnt uintptr
}
type forcegcstate struct {
lock mutex
g * g
idle uint32
}
2016-03-01 23:21:55 +00:00
// startup_random_data holds random bytes initialized at startup. These come from
2014-12-09 14:40:40 -08:00
// the ELF AT_RANDOM auxiliary vector (vdso_linux_amd64.go or os_linux_386.go).
var startupRandomData [ ] byte
// extendRandom extends the random numbers in r[:n] to the whole slice r.
// Treats n<0 as n==0.
func extendRandom ( r [ ] byte , n int ) {
if n < 0 {
n = 0
}
for n < len ( r ) {
// Extend random bits using hash function & time seed
w := n
if w > 16 {
w = 16
}
2015-01-06 16:42:48 -08:00
h := memhash ( unsafe . Pointer ( & r [ n - w ] ) , uintptr ( nanotime ( ) ) , uintptr ( w ) )
2015-11-11 12:39:30 -05:00
for i := 0 ; i < sys . PtrSize && n < len ( r ) ; i ++ {
2014-12-09 14:40:40 -08:00
r [ n ] = byte ( h )
n ++
h >>= 8
}
}
}
2014-11-11 17:05:19 -05:00
2016-04-13 18:16:21 +09:00
// deferred subroutine calls
2014-11-11 17:05:19 -05:00
type _defer struct {
siz int32
started bool
2014-12-08 14:18:58 -08:00
sp uintptr // sp at time of defer
2014-11-11 17:05:19 -05:00
pc uintptr
fn * funcval
_panic * _panic // panic that is running defer
link * _defer
}
2016-04-13 18:16:21 +09:00
// panics
2014-11-11 17:05:19 -05:00
type _panic struct {
argp unsafe . Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink
arg interface { } // argument to panic
link * _panic // link to earlier panic
recovered bool // whether this panic is over
aborted bool // the panic was aborted
}
2016-04-13 18:16:21 +09:00
// stack traces
2014-11-11 17:05:19 -05:00
type stkframe struct {
fn * _func // function being run
pc uintptr // program counter within fn
continpc uintptr // program counter where execution can continue, or 0 if not
lr uintptr // program counter at caller aka link register
sp uintptr // stack pointer at pc
fp uintptr // stack pointer at caller aka frame pointer
varp uintptr // top of local variables
argp uintptr // pointer to function arguments
arglen uintptr // number of bytes at argp
argmap * bitvector // force use of this argmap
}
const (
2015-04-30 15:32:54 +01:00
_TraceRuntimeFrames = 1 << iota // include frames for internal runtime functions.
_TraceTrap // the initial PC, SP are from a trap, not a return PC from a call
_TraceJumpStack // if traceback is on a systemstack, resume trace at g that called into it
2014-11-11 17:05:19 -05:00
)
2016-04-13 18:16:21 +09:00
// The maximum number of frames we print for a traceback
const _TracebackMaxFrames = 100
2014-11-11 17:05:19 -05:00
var (
emptystring string
allglen uintptr
allm * m
allp [ _MaxGomaxprocs + 1 ] * p
gomaxprocs int32
panicking uint32
ncpu int32
forcegc forcegcstate
sched schedt
newprocs int32
2015-02-17 14:25:49 +03:00
// Information about what cpu features are available.
// Set on startup in asm_{x86,amd64}.s.
cpuid_ecx uint32
cpuid_edx uint32
2016-03-29 21:25:33 -07:00
cpuid_ebx7 uint32
2015-02-17 14:25:49 +03:00
lfenceBeforeRdtsc bool
2015-10-28 23:20:26 +03:00
support_avx bool
support_avx2 bool
2017-01-05 05:13:53 +08:00
support_bmi1 bool
support_bmi2 bool
2015-08-07 11:48:52 -04:00
2016-05-25 14:37:43 -04:00
goarm uint8 // set by cmd/link on arm systems
framepointer_enabled bool // set by cmd/link
2015-04-09 15:09:52 -04:00
)
2015-03-25 17:50:35 -07:00
2015-04-09 15:09:52 -04:00
// Set by the linker so the runtime can determine the buildmode.
var (
islibrary bool // -buildmode=c-shared
isarchive bool // -buildmode=c-archive
2014-11-11 17:05:19 -05:00
)