mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime: replace func-based write barrier skipping with type-based
This CL revises CL 7504 to use explicitly uintptr types for the struct fields that are going to be updated sometimes without write barriers. The result is that the fields are now updated *always* without write barriers. This approach has two important properties: 1) Now the GC never looks at the field, so if the missing reference could cause a problem, it will do so all the time, not just when the write barrier is missed at just the right moment. 2) Now a write barrier never happens for the field, avoiding the (correct) detection of inconsistent write barriers when GODEBUG=wbshadow=1. Change-Id: Iebd3962c727c0046495cc08914a8dc0808460e0e Reviewed-on: https://go-review.googlesource.com/9019 Reviewed-by: Austin Clements <austin@google.com> Run-TryBot: Russ Cox <rsc@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org>
This commit is contained in:
parent
c776592a4f
commit
181e26b9fa
21 changed files with 263 additions and 278 deletions
|
|
@ -817,8 +817,8 @@ func persistentalloc(size, align uintptr, stat *uint64) unsafe.Pointer {
|
||||||
|
|
||||||
mp := acquirem()
|
mp := acquirem()
|
||||||
var persistent *persistentAlloc
|
var persistent *persistentAlloc
|
||||||
if mp != nil && mp.p != nil {
|
if mp != nil && mp.p != 0 {
|
||||||
persistent = &mp.p.palloc
|
persistent = &mp.p.ptr().palloc
|
||||||
} else {
|
} else {
|
||||||
lock(&globalAlloc.mutex)
|
lock(&globalAlloc.mutex)
|
||||||
persistent = &globalAlloc.persistentAlloc
|
persistent = &globalAlloc.persistentAlloc
|
||||||
|
|
|
||||||
|
|
@ -92,10 +92,6 @@ func needwb() bool {
|
||||||
// the p associated with an m. We use the fact that m.p == nil to indicate
|
// the p associated with an m. We use the fact that m.p == nil to indicate
|
||||||
// that we are in one these critical section and throw if the write is of
|
// that we are in one these critical section and throw if the write is of
|
||||||
// a pointer to a heap object.
|
// a pointer to a heap object.
|
||||||
// The p, m, and g pointers are the pointers that are used by the scheduler
|
|
||||||
// and need to be operated on without write barriers. We use
|
|
||||||
// the setPNoWriteBarrier, setMNoWriteBarrier and setGNowriteBarrier to
|
|
||||||
// avoid having to do the write barrier.
|
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
func writebarrierptr_nostore1(dst *uintptr, src uintptr) {
|
func writebarrierptr_nostore1(dst *uintptr, src uintptr) {
|
||||||
mp := acquirem()
|
mp := acquirem()
|
||||||
|
|
@ -104,7 +100,7 @@ func writebarrierptr_nostore1(dst *uintptr, src uintptr) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
systemstack(func() {
|
systemstack(func() {
|
||||||
if mp.p == nil && memstats.enablegc && !mp.inwb && inheap(src) {
|
if mp.p == 0 && memstats.enablegc && !mp.inwb && inheap(src) {
|
||||||
throw("writebarrierptr_nostore1 called with mp.p == nil")
|
throw("writebarrierptr_nostore1 called with mp.p == nil")
|
||||||
}
|
}
|
||||||
mp.inwb = true
|
mp.inwb = true
|
||||||
|
|
|
||||||
|
|
@ -275,24 +275,22 @@ func net_runtime_pollUnblock(pd *pollDesc) {
|
||||||
|
|
||||||
// make pd ready, newly runnable goroutines (if any) are returned in rg/wg
|
// make pd ready, newly runnable goroutines (if any) are returned in rg/wg
|
||||||
// May run during STW, so write barriers are not allowed.
|
// May run during STW, so write barriers are not allowed.
|
||||||
// Eliminating WB calls using setGNoWriteBarrier are safe since the gs are
|
|
||||||
// reachable through allg.
|
|
||||||
//go:nowritebarrier
|
//go:nowritebarrier
|
||||||
func netpollready(gpp **g, pd *pollDesc, mode int32) {
|
func netpollready(gpp *guintptr, pd *pollDesc, mode int32) {
|
||||||
var rg, wg *g
|
var rg, wg guintptr
|
||||||
if mode == 'r' || mode == 'r'+'w' {
|
if mode == 'r' || mode == 'r'+'w' {
|
||||||
setGNoWriteBarrier(&rg, netpollunblock(pd, 'r', true))
|
rg.set(netpollunblock(pd, 'r', true))
|
||||||
}
|
}
|
||||||
if mode == 'w' || mode == 'r'+'w' {
|
if mode == 'w' || mode == 'r'+'w' {
|
||||||
setGNoWriteBarrier(&wg, netpollunblock(pd, 'w', true))
|
wg.set(netpollunblock(pd, 'w', true))
|
||||||
}
|
}
|
||||||
if rg != nil {
|
if rg != 0 {
|
||||||
setGNoWriteBarrier(&rg.schedlink, *gpp)
|
rg.ptr().schedlink = *gpp
|
||||||
setGNoWriteBarrier(gpp, rg)
|
*gpp = rg
|
||||||
}
|
}
|
||||||
if wg != nil {
|
if wg != 0 {
|
||||||
setGNoWriteBarrier(&wg.schedlink, *gpp)
|
wg.ptr().schedlink = *gpp
|
||||||
setGNoWriteBarrier(gpp, wg)
|
*gpp = wg
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -55,9 +55,9 @@ func netpollarm(pd *pollDesc, mode int) {
|
||||||
|
|
||||||
// polls for ready network connections
|
// polls for ready network connections
|
||||||
// returns list of goroutines that become runnable
|
// returns list of goroutines that become runnable
|
||||||
func netpoll(block bool) (gp *g) {
|
func netpoll(block bool) *g {
|
||||||
if epfd == -1 {
|
if epfd == -1 {
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
waitms := int32(-1)
|
waitms := int32(-1)
|
||||||
if !block {
|
if !block {
|
||||||
|
|
@ -73,6 +73,7 @@ retry:
|
||||||
}
|
}
|
||||||
goto retry
|
goto retry
|
||||||
}
|
}
|
||||||
|
var gp guintptr
|
||||||
for i := int32(0); i < n; i++ {
|
for i := int32(0); i < n; i++ {
|
||||||
ev := &events[i]
|
ev := &events[i]
|
||||||
if ev.events == 0 {
|
if ev.events == 0 {
|
||||||
|
|
@ -87,11 +88,12 @@ retry:
|
||||||
}
|
}
|
||||||
if mode != 0 {
|
if mode != 0 {
|
||||||
pd := *(**pollDesc)(unsafe.Pointer(&ev.data))
|
pd := *(**pollDesc)(unsafe.Pointer(&ev.data))
|
||||||
netpollready((**g)(noescape(unsafe.Pointer(&gp))), pd, mode)
|
|
||||||
|
netpollready(&gp, pd, mode)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if block && gp == nil {
|
if block && gp == 0 {
|
||||||
goto retry
|
goto retry
|
||||||
}
|
}
|
||||||
return gp
|
return gp.ptr()
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -62,9 +62,9 @@ func netpollarm(pd *pollDesc, mode int) {
|
||||||
|
|
||||||
// Polls for ready network connections.
|
// Polls for ready network connections.
|
||||||
// Returns list of goroutines that become runnable.
|
// Returns list of goroutines that become runnable.
|
||||||
func netpoll(block bool) (gp *g) {
|
func netpoll(block bool) *g {
|
||||||
if kq == -1 {
|
if kq == -1 {
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
var tp *timespec
|
var tp *timespec
|
||||||
var ts timespec
|
var ts timespec
|
||||||
|
|
@ -81,6 +81,7 @@ retry:
|
||||||
}
|
}
|
||||||
goto retry
|
goto retry
|
||||||
}
|
}
|
||||||
|
var gp guintptr
|
||||||
for i := 0; i < int(n); i++ {
|
for i := 0; i < int(n); i++ {
|
||||||
ev := &events[i]
|
ev := &events[i]
|
||||||
var mode int32
|
var mode int32
|
||||||
|
|
@ -91,11 +92,11 @@ retry:
|
||||||
mode += 'w'
|
mode += 'w'
|
||||||
}
|
}
|
||||||
if mode != 0 {
|
if mode != 0 {
|
||||||
netpollready((**g)(noescape(unsafe.Pointer(&gp))), (*pollDesc)(unsafe.Pointer(ev.udata)), mode)
|
netpollready(&gp, (*pollDesc)(unsafe.Pointer(ev.udata)), mode)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if block && gp == nil {
|
if block && gp == 0 {
|
||||||
goto retry
|
goto retry
|
||||||
}
|
}
|
||||||
return gp
|
return gp.ptr()
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -179,9 +179,9 @@ var netpolllasterr int32
|
||||||
|
|
||||||
// polls for ready network connections
|
// polls for ready network connections
|
||||||
// returns list of goroutines that become runnable
|
// returns list of goroutines that become runnable
|
||||||
func netpoll(block bool) (gp *g) {
|
func netpoll(block bool) *g {
|
||||||
if portfd == -1 {
|
if portfd == -1 {
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var wait *timespec
|
var wait *timespec
|
||||||
|
|
@ -201,7 +201,7 @@ retry:
|
||||||
goto retry
|
goto retry
|
||||||
}
|
}
|
||||||
|
|
||||||
gp = nil
|
var gp guintptr
|
||||||
for i := 0; i < int(n); i++ {
|
for i := 0; i < int(n); i++ {
|
||||||
ev := &events[i]
|
ev := &events[i]
|
||||||
|
|
||||||
|
|
@ -232,12 +232,12 @@ retry:
|
||||||
}
|
}
|
||||||
|
|
||||||
if mode != 0 {
|
if mode != 0 {
|
||||||
netpollready((**g)(noescape(unsafe.Pointer(&gp))), pd, mode)
|
netpollready(&gp, pd, mode)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if block && gp == nil {
|
if block && gp == 0 {
|
||||||
goto retry
|
goto retry
|
||||||
}
|
}
|
||||||
return gp
|
return gp.ptr()
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -63,14 +63,13 @@ func netpoll(block bool) *g {
|
||||||
var wait, qty, key, flags, n, i uint32
|
var wait, qty, key, flags, n, i uint32
|
||||||
var errno int32
|
var errno int32
|
||||||
var op *net_op
|
var op *net_op
|
||||||
var gp *g
|
var gp guintptr
|
||||||
|
|
||||||
mp := getg().m
|
mp := getg().m
|
||||||
|
|
||||||
if iocphandle == _INVALID_HANDLE_VALUE {
|
if iocphandle == _INVALID_HANDLE_VALUE {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
gp = nil
|
|
||||||
wait = 0
|
wait = 0
|
||||||
if block {
|
if block {
|
||||||
wait = _INFINITE
|
wait = _INFINITE
|
||||||
|
|
@ -125,13 +124,13 @@ retry:
|
||||||
mp.blocked = false
|
mp.blocked = false
|
||||||
handlecompletion(&gp, op, errno, qty)
|
handlecompletion(&gp, op, errno, qty)
|
||||||
}
|
}
|
||||||
if block && gp == nil {
|
if block && gp == 0 {
|
||||||
goto retry
|
goto retry
|
||||||
}
|
}
|
||||||
return gp
|
return gp.ptr()
|
||||||
}
|
}
|
||||||
|
|
||||||
func handlecompletion(gpp **g, op *net_op, errno int32, qty uint32) {
|
func handlecompletion(gpp *guintptr, op *net_op, errno int32, qty uint32) {
|
||||||
if op == nil {
|
if op == nil {
|
||||||
throw("netpoll: GetQueuedCompletionStatus returned op == nil")
|
throw("netpoll: GetQueuedCompletionStatus returned op == nil")
|
||||||
}
|
}
|
||||||
|
|
@ -142,5 +141,5 @@ func handlecompletion(gpp **g, op *net_op, errno int32, qty uint32) {
|
||||||
}
|
}
|
||||||
op.errno = errno
|
op.errno = errno
|
||||||
op.qty = qty
|
op.qty = qty
|
||||||
netpollready((**g)(noescape(unsafe.Pointer(gpp))), op.pd, mode)
|
netpollready(gpp, op.pd, mode)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -363,8 +363,7 @@ func stdcall(fn stdFunction) uintptr {
|
||||||
|
|
||||||
if mp.profilehz != 0 {
|
if mp.profilehz != 0 {
|
||||||
// leave pc/sp for cpu profiler
|
// leave pc/sp for cpu profiler
|
||||||
// gp is on allg, so this WB can be eliminated.
|
mp.libcallg.set(gp)
|
||||||
setGNoWriteBarrier(&mp.libcallg, gp)
|
|
||||||
mp.libcallpc = getcallerpc(unsafe.Pointer(&fn))
|
mp.libcallpc = getcallerpc(unsafe.Pointer(&fn))
|
||||||
// sp must be the last, because once async cpu profiler finds
|
// sp must be the last, because once async cpu profiler finds
|
||||||
// all three values to be non-zero, it will use them
|
// all three values to be non-zero, it will use them
|
||||||
|
|
|
||||||
|
|
@ -81,7 +81,7 @@ func sighandler(_ureg *ureg, note *byte, gp *g) int {
|
||||||
}
|
}
|
||||||
Throw:
|
Throw:
|
||||||
_g_.m.throwing = 1
|
_g_.m.throwing = 1
|
||||||
setGNoWriteBarrier(&_g_.m.caughtsig, gp)
|
_g_.m.caughtsig.set(gp)
|
||||||
startpanic()
|
startpanic()
|
||||||
print(notestr, "\n")
|
print(notestr, "\n")
|
||||||
print("PC=", hex(c.pc()), "\n")
|
print("PC=", hex(c.pc()), "\n")
|
||||||
|
|
|
||||||
|
|
@ -165,7 +165,7 @@ func newdefer(siz int32) *_defer {
|
||||||
sc := deferclass(uintptr(siz))
|
sc := deferclass(uintptr(siz))
|
||||||
mp := acquirem()
|
mp := acquirem()
|
||||||
if sc < uintptr(len(p{}.deferpool)) {
|
if sc < uintptr(len(p{}.deferpool)) {
|
||||||
pp := mp.p
|
pp := mp.p.ptr()
|
||||||
if len(pp.deferpool[sc]) == 0 && sched.deferpool[sc] != nil {
|
if len(pp.deferpool[sc]) == 0 && sched.deferpool[sc] != nil {
|
||||||
lock(&sched.deferlock)
|
lock(&sched.deferlock)
|
||||||
for len(pp.deferpool[sc]) < cap(pp.deferpool[sc])/2 && sched.deferpool[sc] != nil {
|
for len(pp.deferpool[sc]) < cap(pp.deferpool[sc])/2 && sched.deferpool[sc] != nil {
|
||||||
|
|
@ -223,7 +223,7 @@ func freedefer(d *_defer) {
|
||||||
sc := deferclass(uintptr(d.siz))
|
sc := deferclass(uintptr(d.siz))
|
||||||
if sc < uintptr(len(p{}.deferpool)) {
|
if sc < uintptr(len(p{}.deferpool)) {
|
||||||
mp := acquirem()
|
mp := acquirem()
|
||||||
pp := mp.p
|
pp := mp.p.ptr()
|
||||||
if len(pp.deferpool[sc]) == cap(pp.deferpool[sc]) {
|
if len(pp.deferpool[sc]) == cap(pp.deferpool[sc]) {
|
||||||
// Transfer half of local cache to the central cache.
|
// Transfer half of local cache to the central cache.
|
||||||
var first, last *_defer
|
var first, last *_defer
|
||||||
|
|
|
||||||
|
|
@ -208,7 +208,7 @@ func acquireSudog() *sudog {
|
||||||
// The acquirem/releasem increments m.locks during new(sudog),
|
// The acquirem/releasem increments m.locks during new(sudog),
|
||||||
// which keeps the garbage collector from being invoked.
|
// which keeps the garbage collector from being invoked.
|
||||||
mp := acquirem()
|
mp := acquirem()
|
||||||
pp := mp.p
|
pp := mp.p.ptr()
|
||||||
if len(pp.sudogcache) == 0 {
|
if len(pp.sudogcache) == 0 {
|
||||||
lock(&sched.sudoglock)
|
lock(&sched.sudoglock)
|
||||||
// First, try to grab a batch from central cache.
|
// First, try to grab a batch from central cache.
|
||||||
|
|
@ -257,7 +257,7 @@ func releaseSudog(s *sudog) {
|
||||||
throw("runtime: releaseSudog with non-nil gp.param")
|
throw("runtime: releaseSudog with non-nil gp.param")
|
||||||
}
|
}
|
||||||
mp := acquirem() // avoid rescheduling to another P
|
mp := acquirem() // avoid rescheduling to another P
|
||||||
pp := mp.p
|
pp := mp.p.ptr()
|
||||||
if len(pp.sudogcache) == cap(pp.sudogcache) {
|
if len(pp.sudogcache) == cap(pp.sudogcache) {
|
||||||
// Transfer half of local cache to the central cache.
|
// Transfer half of local cache to the central cache.
|
||||||
var first, last *sudog
|
var first, last *sudog
|
||||||
|
|
|
||||||
|
|
@ -145,7 +145,7 @@ func ready(gp *g, traceskip int) {
|
||||||
|
|
||||||
// status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
|
// status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
|
||||||
casgstatus(gp, _Gwaiting, _Grunnable)
|
casgstatus(gp, _Gwaiting, _Grunnable)
|
||||||
runqput(_g_.m.p, gp)
|
runqput(_g_.m.p.ptr(), gp)
|
||||||
if atomicload(&sched.npidle) != 0 && atomicload(&sched.nmspinning) == 0 { // TODO: fast atomic
|
if atomicload(&sched.npidle) != 0 && atomicload(&sched.nmspinning) == 0 { // TODO: fast atomic
|
||||||
wakep()
|
wakep()
|
||||||
}
|
}
|
||||||
|
|
@ -185,7 +185,7 @@ func readyExecute(gp *g, traceskip int) {
|
||||||
|
|
||||||
// Preempt the current g
|
// Preempt the current g
|
||||||
casgstatus(_g_, _Grunning, _Grunnable)
|
casgstatus(_g_, _Grunning, _Grunnable)
|
||||||
runqput(_g_.m.p, _g_)
|
runqput(_g_.m.p.ptr(), _g_)
|
||||||
dropg()
|
dropg()
|
||||||
|
|
||||||
// Ready gp and switch to it
|
// Ready gp and switch to it
|
||||||
|
|
@ -239,7 +239,7 @@ func helpgc(nproc int32) {
|
||||||
throw("gcprocs inconsistency")
|
throw("gcprocs inconsistency")
|
||||||
}
|
}
|
||||||
mp.helpgc = n
|
mp.helpgc = n
|
||||||
mp.p = allp[pos]
|
mp.p.set(allp[pos])
|
||||||
mp.mcache = allp[pos].mcache
|
mp.mcache = allp[pos].mcache
|
||||||
pos++
|
pos++
|
||||||
notewakeup(&mp.park)
|
notewakeup(&mp.park)
|
||||||
|
|
@ -603,7 +603,7 @@ func stoptheworld() {
|
||||||
atomicstore(&sched.gcwaiting, 1)
|
atomicstore(&sched.gcwaiting, 1)
|
||||||
preemptall()
|
preemptall()
|
||||||
// stop current P
|
// stop current P
|
||||||
_g_.m.p.status = _Pgcstop // Pgcstop is only diagnostic.
|
_g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic.
|
||||||
sched.stopwait--
|
sched.stopwait--
|
||||||
// try to retake all P's in Psyscall status
|
// try to retake all P's in Psyscall status
|
||||||
for i := 0; i < int(gomaxprocs); i++ {
|
for i := 0; i < int(gomaxprocs); i++ {
|
||||||
|
|
@ -681,14 +681,14 @@ func starttheworld() {
|
||||||
|
|
||||||
for p1 != nil {
|
for p1 != nil {
|
||||||
p := p1
|
p := p1
|
||||||
p1 = p1.link
|
p1 = p1.link.ptr()
|
||||||
if p.m != nil {
|
if p.m != 0 {
|
||||||
mp := p.m
|
mp := p.m.ptr()
|
||||||
p.m = nil
|
p.m = 0
|
||||||
if mp.nextp != nil {
|
if mp.nextp != 0 {
|
||||||
throw("starttheworld: inconsistent mp->nextp")
|
throw("starttheworld: inconsistent mp->nextp")
|
||||||
}
|
}
|
||||||
mp.nextp = p
|
mp.nextp.set(p)
|
||||||
notewakeup(&mp.park)
|
notewakeup(&mp.park)
|
||||||
} else {
|
} else {
|
||||||
// Start M to run P. Do not start another M below.
|
// Start M to run P. Do not start another M below.
|
||||||
|
|
@ -768,8 +768,7 @@ func mstart1() {
|
||||||
initsig()
|
initsig()
|
||||||
}
|
}
|
||||||
|
|
||||||
if _g_.m.mstartfn != 0 {
|
if fn := _g_.m.mstartfn; fn != nil {
|
||||||
fn := *(*func())(unsafe.Pointer(&_g_.m.mstartfn))
|
|
||||||
fn()
|
fn()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -777,8 +776,8 @@ func mstart1() {
|
||||||
_g_.m.helpgc = 0
|
_g_.m.helpgc = 0
|
||||||
stopm()
|
stopm()
|
||||||
} else if _g_.m != &m0 {
|
} else if _g_.m != &m0 {
|
||||||
acquirep(_g_.m.nextp)
|
acquirep(_g_.m.nextp.ptr())
|
||||||
_g_.m.nextp = nil
|
_g_.m.nextp = 0
|
||||||
}
|
}
|
||||||
schedule()
|
schedule()
|
||||||
}
|
}
|
||||||
|
|
@ -789,20 +788,22 @@ func mstart1() {
|
||||||
var cgoThreadStart unsafe.Pointer
|
var cgoThreadStart unsafe.Pointer
|
||||||
|
|
||||||
type cgothreadstart struct {
|
type cgothreadstart struct {
|
||||||
g *g
|
g guintptr
|
||||||
tls *uint64
|
tls *uint64
|
||||||
fn unsafe.Pointer
|
fn unsafe.Pointer
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allocate a new m unassociated with any thread.
|
// Allocate a new m unassociated with any thread.
|
||||||
// Can use p for allocation context if needed.
|
// Can use p for allocation context if needed.
|
||||||
func allocm(_p_ *p) *m {
|
// fn is recorded as the new m's m.mstartfn.
|
||||||
|
func allocm(_p_ *p, fn func()) *m {
|
||||||
_g_ := getg()
|
_g_ := getg()
|
||||||
_g_.m.locks++ // disable GC because it can be called from sysmon
|
_g_.m.locks++ // disable GC because it can be called from sysmon
|
||||||
if _g_.m.p == nil {
|
if _g_.m.p == 0 {
|
||||||
acquirep(_p_) // temporarily borrow p for mallocs in this function
|
acquirep(_p_) // temporarily borrow p for mallocs in this function
|
||||||
}
|
}
|
||||||
mp := new(m)
|
mp := new(m)
|
||||||
|
mp.mstartfn = fn
|
||||||
mcommoninit(mp)
|
mcommoninit(mp)
|
||||||
|
|
||||||
// In case of cgo or Solaris, pthread_create will make us a stack.
|
// In case of cgo or Solaris, pthread_create will make us a stack.
|
||||||
|
|
@ -814,7 +815,7 @@ func allocm(_p_ *p) *m {
|
||||||
}
|
}
|
||||||
mp.g0.m = mp
|
mp.g0.m = mp
|
||||||
|
|
||||||
if _p_ == _g_.m.p {
|
if _p_ == _g_.m.p.ptr() {
|
||||||
releasep()
|
releasep()
|
||||||
}
|
}
|
||||||
_g_.m.locks--
|
_g_.m.locks--
|
||||||
|
|
@ -880,8 +881,8 @@ func needm(x byte) {
|
||||||
// after exitsyscall makes sure it is okay to be
|
// after exitsyscall makes sure it is okay to be
|
||||||
// running at all (that is, there's no garbage collection
|
// running at all (that is, there's no garbage collection
|
||||||
// running right now).
|
// running right now).
|
||||||
mp.needextram = mp.schedlink == nil
|
mp.needextram = mp.schedlink == 0
|
||||||
unlockextra(mp.schedlink)
|
unlockextra(mp.schedlink.ptr())
|
||||||
|
|
||||||
// Install g (= m->g0) and set the stack bounds
|
// Install g (= m->g0) and set the stack bounds
|
||||||
// to match the current stack. We don't actually know
|
// to match the current stack. We don't actually know
|
||||||
|
|
@ -910,7 +911,7 @@ func newextram() {
|
||||||
// The sched.pc will never be returned to, but setting it to
|
// The sched.pc will never be returned to, but setting it to
|
||||||
// goexit makes clear to the traceback routines where
|
// goexit makes clear to the traceback routines where
|
||||||
// the goroutine stack ends.
|
// the goroutine stack ends.
|
||||||
mp := allocm(nil)
|
mp := allocm(nil, nil)
|
||||||
gp := malg(4096)
|
gp := malg(4096)
|
||||||
gp.sched.pc = funcPC(goexit) + _PCQuantum
|
gp.sched.pc = funcPC(goexit) + _PCQuantum
|
||||||
gp.sched.sp = gp.stack.hi
|
gp.sched.sp = gp.stack.hi
|
||||||
|
|
@ -936,7 +937,7 @@ func newextram() {
|
||||||
|
|
||||||
// Add m to the extra list.
|
// Add m to the extra list.
|
||||||
mnext := lockextra(true)
|
mnext := lockextra(true)
|
||||||
mp.schedlink = mnext
|
mp.schedlink.set(mnext)
|
||||||
unlockextra(mp)
|
unlockextra(mp)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -972,7 +973,7 @@ func dropm() {
|
||||||
// with no pointer manipulation.
|
// with no pointer manipulation.
|
||||||
mp := getg().m
|
mp := getg().m
|
||||||
mnext := lockextra(true)
|
mnext := lockextra(true)
|
||||||
mp.schedlink = mnext
|
mp.schedlink.set(mnext)
|
||||||
|
|
||||||
setg(nil)
|
setg(nil)
|
||||||
unlockextra(mp)
|
unlockextra(mp)
|
||||||
|
|
@ -1019,18 +1020,14 @@ func unlockextra(mp *m) {
|
||||||
// May run with m.p==nil, so write barriers are not allowed.
|
// May run with m.p==nil, so write barriers are not allowed.
|
||||||
//go:nowritebarrier
|
//go:nowritebarrier
|
||||||
func newm(fn func(), _p_ *p) {
|
func newm(fn func(), _p_ *p) {
|
||||||
mp := allocm(_p_)
|
mp := allocm(_p_, fn)
|
||||||
// procresize made _p_ reachable through allp, which doesn't change during GC, so WB can be eliminated
|
mp.nextp.set(_p_)
|
||||||
setPNoWriteBarrier(&mp.nextp, _p_)
|
|
||||||
// Store &fn as a uintptr since it is not heap allocated so the WB can be eliminated
|
|
||||||
mp.mstartfn = *(*uintptr)(unsafe.Pointer(&fn))
|
|
||||||
if iscgo {
|
if iscgo {
|
||||||
var ts cgothreadstart
|
var ts cgothreadstart
|
||||||
if _cgo_thread_start == nil {
|
if _cgo_thread_start == nil {
|
||||||
throw("_cgo_thread_start missing")
|
throw("_cgo_thread_start missing")
|
||||||
}
|
}
|
||||||
// mp is reachable via allm and mp.g0 never changes, so WB can be eliminated.
|
ts.g.set(mp.g0)
|
||||||
setGNoWriteBarrier(&ts.g, mp.g0)
|
|
||||||
ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
|
ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
|
||||||
ts.fn = unsafe.Pointer(funcPC(mstart))
|
ts.fn = unsafe.Pointer(funcPC(mstart))
|
||||||
asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
|
asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
|
||||||
|
|
@ -1047,7 +1044,7 @@ func stopm() {
|
||||||
if _g_.m.locks != 0 {
|
if _g_.m.locks != 0 {
|
||||||
throw("stopm holding locks")
|
throw("stopm holding locks")
|
||||||
}
|
}
|
||||||
if _g_.m.p != nil {
|
if _g_.m.p != 0 {
|
||||||
throw("stopm holding p")
|
throw("stopm holding p")
|
||||||
}
|
}
|
||||||
if _g_.m.spinning {
|
if _g_.m.spinning {
|
||||||
|
|
@ -1065,11 +1062,11 @@ retry:
|
||||||
gchelper()
|
gchelper()
|
||||||
_g_.m.helpgc = 0
|
_g_.m.helpgc = 0
|
||||||
_g_.m.mcache = nil
|
_g_.m.mcache = nil
|
||||||
_g_.m.p = nil
|
_g_.m.p = 0
|
||||||
goto retry
|
goto retry
|
||||||
}
|
}
|
||||||
acquirep(_g_.m.nextp)
|
acquirep(_g_.m.nextp.ptr())
|
||||||
_g_.m.nextp = nil
|
_g_.m.nextp = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func mspinning() {
|
func mspinning() {
|
||||||
|
|
@ -1105,12 +1102,11 @@ func startm(_p_ *p, spinning bool) {
|
||||||
if mp.spinning {
|
if mp.spinning {
|
||||||
throw("startm: m is spinning")
|
throw("startm: m is spinning")
|
||||||
}
|
}
|
||||||
if mp.nextp != nil {
|
if mp.nextp != 0 {
|
||||||
throw("startm: m has p")
|
throw("startm: m has p")
|
||||||
}
|
}
|
||||||
mp.spinning = spinning
|
mp.spinning = spinning
|
||||||
// procresize made _p_ reachable through allp, which doesn't change during GC, so WB can be eliminated
|
mp.nextp.set(_p_)
|
||||||
setPNoWriteBarrier(&mp.nextp, _p_)
|
|
||||||
notewakeup(&mp.park)
|
notewakeup(&mp.park)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1173,7 +1169,7 @@ func stoplockedm() {
|
||||||
if _g_.m.lockedg == nil || _g_.m.lockedg.lockedm != _g_.m {
|
if _g_.m.lockedg == nil || _g_.m.lockedg.lockedm != _g_.m {
|
||||||
throw("stoplockedm: inconsistent locking")
|
throw("stoplockedm: inconsistent locking")
|
||||||
}
|
}
|
||||||
if _g_.m.p != nil {
|
if _g_.m.p != 0 {
|
||||||
// Schedule another M to run this p.
|
// Schedule another M to run this p.
|
||||||
_p_ := releasep()
|
_p_ := releasep()
|
||||||
handoffp(_p_)
|
handoffp(_p_)
|
||||||
|
|
@ -1188,8 +1184,8 @@ func stoplockedm() {
|
||||||
dumpgstatus(_g_)
|
dumpgstatus(_g_)
|
||||||
throw("stoplockedm: not runnable")
|
throw("stoplockedm: not runnable")
|
||||||
}
|
}
|
||||||
acquirep(_g_.m.nextp)
|
acquirep(_g_.m.nextp.ptr())
|
||||||
_g_.m.nextp = nil
|
_g_.m.nextp = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Schedules the locked m to run the locked gp.
|
// Schedules the locked m to run the locked gp.
|
||||||
|
|
@ -1202,14 +1198,13 @@ func startlockedm(gp *g) {
|
||||||
if mp == _g_.m {
|
if mp == _g_.m {
|
||||||
throw("startlockedm: locked to me")
|
throw("startlockedm: locked to me")
|
||||||
}
|
}
|
||||||
if mp.nextp != nil {
|
if mp.nextp != 0 {
|
||||||
throw("startlockedm: m has p")
|
throw("startlockedm: m has p")
|
||||||
}
|
}
|
||||||
// directly handoff current P to the locked m
|
// directly handoff current P to the locked m
|
||||||
incidlelocked(-1)
|
incidlelocked(-1)
|
||||||
_p_ := releasep()
|
_p_ := releasep()
|
||||||
// procresize made _p_ reachable through allp, which doesn't change during GC, so WB can be eliminated
|
mp.nextp.set(_p_)
|
||||||
setPNoWriteBarrier(&mp.nextp, _p_)
|
|
||||||
notewakeup(&mp.park)
|
notewakeup(&mp.park)
|
||||||
stopm()
|
stopm()
|
||||||
}
|
}
|
||||||
|
|
@ -1246,7 +1241,7 @@ func execute(gp *g) {
|
||||||
gp.waitsince = 0
|
gp.waitsince = 0
|
||||||
gp.preempt = false
|
gp.preempt = false
|
||||||
gp.stackguard0 = gp.stack.lo + _StackGuard
|
gp.stackguard0 = gp.stack.lo + _StackGuard
|
||||||
_g_.m.p.schedtick++
|
_g_.m.p.ptr().schedtick++
|
||||||
_g_.m.curg = gp
|
_g_.m.curg = gp
|
||||||
gp.m = _g_.m
|
gp.m = _g_.m
|
||||||
|
|
||||||
|
|
@ -1280,14 +1275,14 @@ top:
|
||||||
}
|
}
|
||||||
|
|
||||||
// local runq
|
// local runq
|
||||||
if gp := runqget(_g_.m.p); gp != nil {
|
if gp := runqget(_g_.m.p.ptr()); gp != nil {
|
||||||
return gp
|
return gp
|
||||||
}
|
}
|
||||||
|
|
||||||
// global runq
|
// global runq
|
||||||
if sched.runqsize != 0 {
|
if sched.runqsize != 0 {
|
||||||
lock(&sched.lock)
|
lock(&sched.lock)
|
||||||
gp := globrunqget(_g_.m.p, 0)
|
gp := globrunqget(_g_.m.p.ptr(), 0)
|
||||||
unlock(&sched.lock)
|
unlock(&sched.lock)
|
||||||
if gp != nil {
|
if gp != nil {
|
||||||
return gp
|
return gp
|
||||||
|
|
@ -1303,7 +1298,7 @@ top:
|
||||||
if netpollinited() && sched.lastpoll != 0 {
|
if netpollinited() && sched.lastpoll != 0 {
|
||||||
if gp := netpoll(false); gp != nil { // non-blocking
|
if gp := netpoll(false); gp != nil { // non-blocking
|
||||||
// netpoll returns list of goroutines linked by schedlink.
|
// netpoll returns list of goroutines linked by schedlink.
|
||||||
injectglist(gp.schedlink)
|
injectglist(gp.schedlink.ptr())
|
||||||
casgstatus(gp, _Gwaiting, _Grunnable)
|
casgstatus(gp, _Gwaiting, _Grunnable)
|
||||||
if trace.enabled {
|
if trace.enabled {
|
||||||
traceGoUnpark(gp, 0)
|
traceGoUnpark(gp, 0)
|
||||||
|
|
@ -1329,10 +1324,10 @@ top:
|
||||||
}
|
}
|
||||||
_p_ := allp[fastrand1()%uint32(gomaxprocs)]
|
_p_ := allp[fastrand1()%uint32(gomaxprocs)]
|
||||||
var gp *g
|
var gp *g
|
||||||
if _p_ == _g_.m.p {
|
if _p_ == _g_.m.p.ptr() {
|
||||||
gp = runqget(_p_)
|
gp = runqget(_p_)
|
||||||
} else {
|
} else {
|
||||||
gp = runqsteal(_g_.m.p, _p_)
|
gp = runqsteal(_g_.m.p.ptr(), _p_)
|
||||||
}
|
}
|
||||||
if gp != nil {
|
if gp != nil {
|
||||||
return gp
|
return gp
|
||||||
|
|
@ -1347,7 +1342,7 @@ stop:
|
||||||
goto top
|
goto top
|
||||||
}
|
}
|
||||||
if sched.runqsize != 0 {
|
if sched.runqsize != 0 {
|
||||||
gp := globrunqget(_g_.m.p, 0)
|
gp := globrunqget(_g_.m.p.ptr(), 0)
|
||||||
unlock(&sched.lock)
|
unlock(&sched.lock)
|
||||||
return gp
|
return gp
|
||||||
}
|
}
|
||||||
|
|
@ -1376,7 +1371,7 @@ stop:
|
||||||
|
|
||||||
// poll network
|
// poll network
|
||||||
if netpollinited() && xchg64(&sched.lastpoll, 0) != 0 {
|
if netpollinited() && xchg64(&sched.lastpoll, 0) != 0 {
|
||||||
if _g_.m.p != nil {
|
if _g_.m.p != 0 {
|
||||||
throw("findrunnable: netpoll with p")
|
throw("findrunnable: netpoll with p")
|
||||||
}
|
}
|
||||||
if _g_.m.spinning {
|
if _g_.m.spinning {
|
||||||
|
|
@ -1390,7 +1385,7 @@ stop:
|
||||||
unlock(&sched.lock)
|
unlock(&sched.lock)
|
||||||
if _p_ != nil {
|
if _p_ != nil {
|
||||||
acquirep(_p_)
|
acquirep(_p_)
|
||||||
injectglist(gp.schedlink)
|
injectglist(gp.schedlink.ptr())
|
||||||
casgstatus(gp, _Gwaiting, _Grunnable)
|
casgstatus(gp, _Gwaiting, _Grunnable)
|
||||||
if trace.enabled {
|
if trace.enabled {
|
||||||
traceGoUnpark(gp, 0)
|
traceGoUnpark(gp, 0)
|
||||||
|
|
@ -1432,7 +1427,7 @@ func injectglist(glist *g) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if trace.enabled {
|
if trace.enabled {
|
||||||
for gp := glist; gp != nil; gp = gp.schedlink {
|
for gp := glist; gp != nil; gp = gp.schedlink.ptr() {
|
||||||
traceGoUnpark(gp, 0)
|
traceGoUnpark(gp, 0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -1440,7 +1435,7 @@ func injectglist(glist *g) {
|
||||||
var n int
|
var n int
|
||||||
for n = 0; glist != nil; n++ {
|
for n = 0; glist != nil; n++ {
|
||||||
gp := glist
|
gp := glist
|
||||||
glist = gp.schedlink
|
glist = gp.schedlink.ptr()
|
||||||
casgstatus(gp, _Gwaiting, _Grunnable)
|
casgstatus(gp, _Gwaiting, _Grunnable)
|
||||||
globrunqput(gp)
|
globrunqput(gp)
|
||||||
}
|
}
|
||||||
|
|
@ -1483,9 +1478,9 @@ top:
|
||||||
// Check the global runnable queue once in a while to ensure fairness.
|
// Check the global runnable queue once in a while to ensure fairness.
|
||||||
// Otherwise two goroutines can completely occupy the local runqueue
|
// Otherwise two goroutines can completely occupy the local runqueue
|
||||||
// by constantly respawning each other.
|
// by constantly respawning each other.
|
||||||
if _g_.m.p.schedtick%61 == 0 && sched.runqsize > 0 {
|
if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 {
|
||||||
lock(&sched.lock)
|
lock(&sched.lock)
|
||||||
gp = globrunqget(_g_.m.p, 1)
|
gp = globrunqget(_g_.m.p.ptr(), 1)
|
||||||
unlock(&sched.lock)
|
unlock(&sched.lock)
|
||||||
if gp != nil {
|
if gp != nil {
|
||||||
resetspinning()
|
resetspinning()
|
||||||
|
|
@ -1493,7 +1488,7 @@ top:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if gp == nil {
|
if gp == nil {
|
||||||
gp = runqget(_g_.m.p)
|
gp = runqget(_g_.m.p.ptr())
|
||||||
if gp != nil && _g_.m.spinning {
|
if gp != nil && _g_.m.spinning {
|
||||||
throw("schedule: spinning with local work")
|
throw("schedule: spinning with local work")
|
||||||
}
|
}
|
||||||
|
|
@ -1624,7 +1619,7 @@ func goexit0(gp *g) {
|
||||||
throw("internal lockOSThread error")
|
throw("internal lockOSThread error")
|
||||||
}
|
}
|
||||||
_g_.m.locked = 0
|
_g_.m.locked = 0
|
||||||
gfput(_g_.m.p, gp)
|
gfput(_g_.m.p.ptr(), gp)
|
||||||
schedule()
|
schedule()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1671,7 +1666,7 @@ func save(pc, sp uintptr) {
|
||||||
// when syscall returns we emit traceGoSysExit and when the goroutine starts running
|
// when syscall returns we emit traceGoSysExit and when the goroutine starts running
|
||||||
// (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart.
|
// (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart.
|
||||||
// To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock,
|
// To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock,
|
||||||
// we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.syscalltick),
|
// we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick),
|
||||||
// whoever emits traceGoSysBlock increments p.syscalltick afterwards;
|
// whoever emits traceGoSysBlock increments p.syscalltick afterwards;
|
||||||
// and we wait for the increment before emitting traceGoSysExit.
|
// and we wait for the increment before emitting traceGoSysExit.
|
||||||
// Note that the increment is done even if tracing is not enabled,
|
// Note that the increment is done even if tracing is not enabled,
|
||||||
|
|
@ -1713,10 +1708,10 @@ func reentersyscall(pc, sp uintptr) {
|
||||||
save(pc, sp)
|
save(pc, sp)
|
||||||
}
|
}
|
||||||
|
|
||||||
_g_.m.syscalltick = _g_.m.p.syscalltick
|
_g_.m.syscalltick = _g_.m.p.ptr().syscalltick
|
||||||
_g_.m.mcache = nil
|
_g_.m.mcache = nil
|
||||||
_g_.m.p.m = nil
|
_g_.m.p.ptr().m = 0
|
||||||
atomicstore(&_g_.m.p.status, _Psyscall)
|
atomicstore(&_g_.m.p.ptr().status, _Psyscall)
|
||||||
if sched.gcwaiting != 0 {
|
if sched.gcwaiting != 0 {
|
||||||
systemstack(entersyscall_gcwait)
|
systemstack(entersyscall_gcwait)
|
||||||
save(pc, sp)
|
save(pc, sp)
|
||||||
|
|
@ -1746,7 +1741,7 @@ func entersyscall_sysmon() {
|
||||||
|
|
||||||
func entersyscall_gcwait() {
|
func entersyscall_gcwait() {
|
||||||
_g_ := getg()
|
_g_ := getg()
|
||||||
_p_ := _g_.m.p
|
_p_ := _g_.m.p.ptr()
|
||||||
|
|
||||||
lock(&sched.lock)
|
lock(&sched.lock)
|
||||||
if sched.stopwait > 0 && cas(&_p_.status, _Psyscall, _Pgcstop) {
|
if sched.stopwait > 0 && cas(&_p_.status, _Psyscall, _Pgcstop) {
|
||||||
|
|
@ -1770,8 +1765,8 @@ func entersyscallblock(dummy int32) {
|
||||||
_g_.m.locks++ // see comment in entersyscall
|
_g_.m.locks++ // see comment in entersyscall
|
||||||
_g_.throwsplit = true
|
_g_.throwsplit = true
|
||||||
_g_.stackguard0 = stackPreempt // see comment in entersyscall
|
_g_.stackguard0 = stackPreempt // see comment in entersyscall
|
||||||
_g_.m.syscalltick = _g_.m.p.syscalltick
|
_g_.m.syscalltick = _g_.m.p.ptr().syscalltick
|
||||||
_g_.m.p.syscalltick++
|
_g_.m.p.ptr().syscalltick++
|
||||||
|
|
||||||
// Leave SP around for GC and traceback.
|
// Leave SP around for GC and traceback.
|
||||||
pc := getcallerpc(unsafe.Pointer(&dummy))
|
pc := getcallerpc(unsafe.Pointer(&dummy))
|
||||||
|
|
@ -1807,7 +1802,7 @@ func entersyscallblock(dummy int32) {
|
||||||
func entersyscallblock_handoff() {
|
func entersyscallblock_handoff() {
|
||||||
if trace.enabled {
|
if trace.enabled {
|
||||||
traceGoSysCall()
|
traceGoSysCall()
|
||||||
traceGoSysBlock(getg().m.p)
|
traceGoSysBlock(getg().m.p.ptr())
|
||||||
}
|
}
|
||||||
handoffp(releasep())
|
handoffp(releasep())
|
||||||
}
|
}
|
||||||
|
|
@ -1826,18 +1821,18 @@ func exitsyscall(dummy int32) {
|
||||||
}
|
}
|
||||||
|
|
||||||
_g_.waitsince = 0
|
_g_.waitsince = 0
|
||||||
oldp := _g_.m.p
|
oldp := _g_.m.p.ptr()
|
||||||
if exitsyscallfast() {
|
if exitsyscallfast() {
|
||||||
if _g_.m.mcache == nil {
|
if _g_.m.mcache == nil {
|
||||||
throw("lost mcache")
|
throw("lost mcache")
|
||||||
}
|
}
|
||||||
if trace.enabled {
|
if trace.enabled {
|
||||||
if oldp != _g_.m.p || _g_.m.syscalltick != _g_.m.p.syscalltick {
|
if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
|
||||||
systemstack(traceGoStart)
|
systemstack(traceGoStart)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// There's a cpu for us, so we can run.
|
// There's a cpu for us, so we can run.
|
||||||
_g_.m.p.syscalltick++
|
_g_.m.p.ptr().syscalltick++
|
||||||
// We need to cas the status and scan before resuming...
|
// We need to cas the status and scan before resuming...
|
||||||
casgstatus(_g_, _Gsyscall, _Grunning)
|
casgstatus(_g_, _Gsyscall, _Grunning)
|
||||||
|
|
||||||
|
|
@ -1891,7 +1886,7 @@ func exitsyscall(dummy int32) {
|
||||||
// we don't know for sure that the garbage collector
|
// we don't know for sure that the garbage collector
|
||||||
// is not running.
|
// is not running.
|
||||||
_g_.syscallsp = 0
|
_g_.syscallsp = 0
|
||||||
_g_.m.p.syscalltick++
|
_g_.m.p.ptr().syscalltick++
|
||||||
_g_.throwsplit = false
|
_g_.throwsplit = false
|
||||||
|
|
||||||
if exitTicks != 0 {
|
if exitTicks != 0 {
|
||||||
|
|
@ -1909,37 +1904,37 @@ func exitsyscallfast() bool {
|
||||||
// Freezetheworld sets stopwait but does not retake P's.
|
// Freezetheworld sets stopwait but does not retake P's.
|
||||||
if sched.stopwait == freezeStopWait {
|
if sched.stopwait == freezeStopWait {
|
||||||
_g_.m.mcache = nil
|
_g_.m.mcache = nil
|
||||||
_g_.m.p = nil
|
_g_.m.p = 0
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try to re-acquire the last P.
|
// Try to re-acquire the last P.
|
||||||
if _g_.m.p != nil && _g_.m.p.status == _Psyscall && cas(&_g_.m.p.status, _Psyscall, _Prunning) {
|
if _g_.m.p != 0 && _g_.m.p.ptr().status == _Psyscall && cas(&_g_.m.p.ptr().status, _Psyscall, _Prunning) {
|
||||||
// There's a cpu for us, so we can run.
|
// There's a cpu for us, so we can run.
|
||||||
_g_.m.mcache = _g_.m.p.mcache
|
_g_.m.mcache = _g_.m.p.ptr().mcache
|
||||||
_g_.m.p.m = _g_.m
|
_g_.m.p.ptr().m.set(_g_.m)
|
||||||
if _g_.m.syscalltick != _g_.m.p.syscalltick {
|
if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
|
||||||
if trace.enabled {
|
if trace.enabled {
|
||||||
// The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed).
|
// The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed).
|
||||||
// traceGoSysBlock for this syscall was already emitted,
|
// traceGoSysBlock for this syscall was already emitted,
|
||||||
// but here we effectively retake the p from the new syscall running on the same p.
|
// but here we effectively retake the p from the new syscall running on the same p.
|
||||||
systemstack(func() {
|
systemstack(func() {
|
||||||
// Denote blocking of the new syscall.
|
// Denote blocking of the new syscall.
|
||||||
traceGoSysBlock(_g_.m.p)
|
traceGoSysBlock(_g_.m.p.ptr())
|
||||||
// Denote completion of the current syscall.
|
// Denote completion of the current syscall.
|
||||||
traceGoSysExit(0)
|
traceGoSysExit(0)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
_g_.m.p.syscalltick++
|
_g_.m.p.ptr().syscalltick++
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try to get any other idle P.
|
// Try to get any other idle P.
|
||||||
oldp := _g_.m.p
|
oldp := _g_.m.p.ptr()
|
||||||
_g_.m.mcache = nil
|
_g_.m.mcache = nil
|
||||||
_g_.m.p = nil
|
_g_.m.p = 0
|
||||||
if sched.pidle != nil {
|
if sched.pidle != 0 {
|
||||||
var ok bool
|
var ok bool
|
||||||
systemstack(func() {
|
systemstack(func() {
|
||||||
ok = exitsyscallfast_pidle()
|
ok = exitsyscallfast_pidle()
|
||||||
|
|
@ -2101,7 +2096,7 @@ func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr
|
||||||
throw("newproc: function arguments too large for new goroutine")
|
throw("newproc: function arguments too large for new goroutine")
|
||||||
}
|
}
|
||||||
|
|
||||||
_p_ := _g_.m.p
|
_p_ := _g_.m.p.ptr()
|
||||||
newg := gfget(_p_)
|
newg := gfget(_p_)
|
||||||
if newg == nil {
|
if newg == nil {
|
||||||
newg = malg(_StackMin)
|
newg = malg(_StackMin)
|
||||||
|
|
@ -2184,7 +2179,7 @@ func gfput(_p_ *p, gp *g) {
|
||||||
gp.stackguard0 = 0
|
gp.stackguard0 = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
gp.schedlink = _p_.gfree
|
gp.schedlink.set(_p_.gfree)
|
||||||
_p_.gfree = gp
|
_p_.gfree = gp
|
||||||
_p_.gfreecnt++
|
_p_.gfreecnt++
|
||||||
if _p_.gfreecnt >= 64 {
|
if _p_.gfreecnt >= 64 {
|
||||||
|
|
@ -2192,8 +2187,8 @@ func gfput(_p_ *p, gp *g) {
|
||||||
for _p_.gfreecnt >= 32 {
|
for _p_.gfreecnt >= 32 {
|
||||||
_p_.gfreecnt--
|
_p_.gfreecnt--
|
||||||
gp = _p_.gfree
|
gp = _p_.gfree
|
||||||
_p_.gfree = gp.schedlink
|
_p_.gfree = gp.schedlink.ptr()
|
||||||
gp.schedlink = sched.gfree
|
gp.schedlink.set(sched.gfree)
|
||||||
sched.gfree = gp
|
sched.gfree = gp
|
||||||
sched.ngfree++
|
sched.ngfree++
|
||||||
}
|
}
|
||||||
|
|
@ -2211,16 +2206,16 @@ retry:
|
||||||
for _p_.gfreecnt < 32 && sched.gfree != nil {
|
for _p_.gfreecnt < 32 && sched.gfree != nil {
|
||||||
_p_.gfreecnt++
|
_p_.gfreecnt++
|
||||||
gp = sched.gfree
|
gp = sched.gfree
|
||||||
sched.gfree = gp.schedlink
|
sched.gfree = gp.schedlink.ptr()
|
||||||
sched.ngfree--
|
sched.ngfree--
|
||||||
gp.schedlink = _p_.gfree
|
gp.schedlink.set(_p_.gfree)
|
||||||
_p_.gfree = gp
|
_p_.gfree = gp
|
||||||
}
|
}
|
||||||
unlock(&sched.gflock)
|
unlock(&sched.gflock)
|
||||||
goto retry
|
goto retry
|
||||||
}
|
}
|
||||||
if gp != nil {
|
if gp != nil {
|
||||||
_p_.gfree = gp.schedlink
|
_p_.gfree = gp.schedlink.ptr()
|
||||||
_p_.gfreecnt--
|
_p_.gfreecnt--
|
||||||
if gp.stack.lo == 0 {
|
if gp.stack.lo == 0 {
|
||||||
// Stack was deallocated in gfput. Allocate a new one.
|
// Stack was deallocated in gfput. Allocate a new one.
|
||||||
|
|
@ -2243,8 +2238,8 @@ func gfpurge(_p_ *p) {
|
||||||
for _p_.gfreecnt != 0 {
|
for _p_.gfreecnt != 0 {
|
||||||
_p_.gfreecnt--
|
_p_.gfreecnt--
|
||||||
gp := _p_.gfree
|
gp := _p_.gfree
|
||||||
_p_.gfree = gp.schedlink
|
_p_.gfree = gp.schedlink.ptr()
|
||||||
gp.schedlink = sched.gfree
|
gp.schedlink.set(sched.gfree)
|
||||||
sched.gfree = gp
|
sched.gfree = gp
|
||||||
sched.ngfree++
|
sched.ngfree++
|
||||||
}
|
}
|
||||||
|
|
@ -2453,10 +2448,10 @@ func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
|
||||||
// This is especially important on windows, since all syscalls are cgo calls.
|
// This is especially important on windows, since all syscalls are cgo calls.
|
||||||
n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[0], len(stk), nil, nil, 0)
|
n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[0], len(stk), nil, nil, 0)
|
||||||
}
|
}
|
||||||
if GOOS == "windows" && n == 0 && mp.libcallg != nil && mp.libcallpc != 0 && mp.libcallsp != 0 {
|
if GOOS == "windows" && n == 0 && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
|
||||||
// Libcall, i.e. runtime syscall on windows.
|
// Libcall, i.e. runtime syscall on windows.
|
||||||
// Collect Go stack that leads to the call.
|
// Collect Go stack that leads to the call.
|
||||||
n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg, 0, &stk[0], len(stk), nil, nil, 0)
|
n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0)
|
||||||
}
|
}
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
// If all of the above has failed, account it against abstract "System" or "GC".
|
// If all of the above has failed, account it against abstract "System" or "GC".
|
||||||
|
|
@ -2570,7 +2565,7 @@ func procresize(nprocs int32) *p {
|
||||||
for i := nprocs; i < old; i++ {
|
for i := nprocs; i < old; i++ {
|
||||||
p := allp[i]
|
p := allp[i]
|
||||||
if trace.enabled {
|
if trace.enabled {
|
||||||
if p == getg().m.p {
|
if p == getg().m.p.ptr() {
|
||||||
// moving to p[0], pretend that we were descheduled
|
// moving to p[0], pretend that we were descheduled
|
||||||
// and then scheduled again to keep the trace sane.
|
// and then scheduled again to keep the trace sane.
|
||||||
traceGoSched()
|
traceGoSched()
|
||||||
|
|
@ -2584,9 +2579,9 @@ func procresize(nprocs int32) *p {
|
||||||
gp := p.runq[p.runqtail%uint32(len(p.runq))]
|
gp := p.runq[p.runqtail%uint32(len(p.runq))]
|
||||||
// push onto head of global queue
|
// push onto head of global queue
|
||||||
gp.schedlink = sched.runqhead
|
gp.schedlink = sched.runqhead
|
||||||
sched.runqhead = gp
|
sched.runqhead.set(gp)
|
||||||
if sched.runqtail == nil {
|
if sched.runqtail == 0 {
|
||||||
sched.runqtail = gp
|
sched.runqtail.set(gp)
|
||||||
}
|
}
|
||||||
sched.runqsize++
|
sched.runqsize++
|
||||||
}
|
}
|
||||||
|
|
@ -2609,18 +2604,18 @@ func procresize(nprocs int32) *p {
|
||||||
}
|
}
|
||||||
|
|
||||||
_g_ := getg()
|
_g_ := getg()
|
||||||
if _g_.m.p != nil && _g_.m.p.id < nprocs {
|
if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs {
|
||||||
// continue to use the current P
|
// continue to use the current P
|
||||||
_g_.m.p.status = _Prunning
|
_g_.m.p.ptr().status = _Prunning
|
||||||
} else {
|
} else {
|
||||||
// release the current P and acquire allp[0]
|
// release the current P and acquire allp[0]
|
||||||
if _g_.m.p != nil {
|
if _g_.m.p != 0 {
|
||||||
_g_.m.p.m = nil
|
_g_.m.p.ptr().m = 0
|
||||||
}
|
}
|
||||||
_g_.m.p = nil
|
_g_.m.p = 0
|
||||||
_g_.m.mcache = nil
|
_g_.m.mcache = nil
|
||||||
p := allp[0]
|
p := allp[0]
|
||||||
p.m = nil
|
p.m = 0
|
||||||
p.status = _Pidle
|
p.status = _Pidle
|
||||||
acquirep(p)
|
acquirep(p)
|
||||||
if trace.enabled {
|
if trace.enabled {
|
||||||
|
|
@ -2630,15 +2625,15 @@ func procresize(nprocs int32) *p {
|
||||||
var runnablePs *p
|
var runnablePs *p
|
||||||
for i := nprocs - 1; i >= 0; i-- {
|
for i := nprocs - 1; i >= 0; i-- {
|
||||||
p := allp[i]
|
p := allp[i]
|
||||||
if _g_.m.p == p {
|
if _g_.m.p.ptr() == p {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
p.status = _Pidle
|
p.status = _Pidle
|
||||||
if p.runqhead == p.runqtail {
|
if p.runqhead == p.runqtail {
|
||||||
pidleput(p)
|
pidleput(p)
|
||||||
} else {
|
} else {
|
||||||
p.m = mget()
|
p.m.set(mget())
|
||||||
p.link = runnablePs
|
p.link.set(runnablePs)
|
||||||
runnablePs = p
|
runnablePs = p
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -2648,53 +2643,57 @@ func procresize(nprocs int32) *p {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Associate p and the current m.
|
// Associate p and the current m.
|
||||||
// May run during STW, so write barriers are not allowed.
|
|
||||||
//go:nowritebarrier
|
|
||||||
func acquirep(_p_ *p) {
|
func acquirep(_p_ *p) {
|
||||||
_g_ := getg()
|
acquirep1(_p_)
|
||||||
|
|
||||||
if _g_.m.p != nil || _g_.m.mcache != nil {
|
// have p; write barriers now allowed
|
||||||
throw("acquirep: already in go")
|
_g_ := getg()
|
||||||
}
|
_g_.m.mcache = _p_.mcache
|
||||||
if _p_.m != nil || _p_.status != _Pidle {
|
|
||||||
id := int32(0)
|
|
||||||
if _p_.m != nil {
|
|
||||||
id = _p_.m.id
|
|
||||||
}
|
|
||||||
print("acquirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
|
|
||||||
throw("acquirep: invalid p state")
|
|
||||||
}
|
|
||||||
// _p_.mcache holds the mcache and _p_ is in allp, so WB can be eliminated
|
|
||||||
setMcacheNoWriteBarrier(&_g_.m.mcache, _p_.mcache)
|
|
||||||
// _p_ is in allp so WB can be eliminated
|
|
||||||
setPNoWriteBarrier(&_g_.m.p, _p_)
|
|
||||||
// m is in _g_.m and is reachable through allg, so WB can be eliminated
|
|
||||||
setMNoWriteBarrier(&_p_.m, _g_.m)
|
|
||||||
_p_.status = _Prunning
|
|
||||||
|
|
||||||
if trace.enabled {
|
if trace.enabled {
|
||||||
traceProcStart()
|
traceProcStart()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// May run during STW, so write barriers are not allowed.
|
||||||
|
//go:nowritebarrier
|
||||||
|
func acquirep1(_p_ *p) {
|
||||||
|
_g_ := getg()
|
||||||
|
|
||||||
|
if _g_.m.p != 0 || _g_.m.mcache != nil {
|
||||||
|
throw("acquirep: already in go")
|
||||||
|
}
|
||||||
|
if _p_.m != 0 || _p_.status != _Pidle {
|
||||||
|
id := int32(0)
|
||||||
|
if _p_.m != 0 {
|
||||||
|
id = _p_.m.ptr().id
|
||||||
|
}
|
||||||
|
print("acquirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
|
||||||
|
throw("acquirep: invalid p state")
|
||||||
|
}
|
||||||
|
_g_.m.p.set(_p_)
|
||||||
|
_p_.m.set(_g_.m)
|
||||||
|
_p_.status = _Prunning
|
||||||
|
}
|
||||||
|
|
||||||
// Disassociate p and the current m.
|
// Disassociate p and the current m.
|
||||||
func releasep() *p {
|
func releasep() *p {
|
||||||
_g_ := getg()
|
_g_ := getg()
|
||||||
|
|
||||||
if _g_.m.p == nil || _g_.m.mcache == nil {
|
if _g_.m.p == 0 || _g_.m.mcache == nil {
|
||||||
throw("releasep: invalid arg")
|
throw("releasep: invalid arg")
|
||||||
}
|
}
|
||||||
_p_ := _g_.m.p
|
_p_ := _g_.m.p.ptr()
|
||||||
if _p_.m != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning {
|
if _p_.m.ptr() != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning {
|
||||||
print("releasep: m=", _g_.m, " m->p=", _g_.m.p, " p->m=", _p_.m, " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n")
|
print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", _p_.m, " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n")
|
||||||
throw("releasep: invalid p state")
|
throw("releasep: invalid p state")
|
||||||
}
|
}
|
||||||
if trace.enabled {
|
if trace.enabled {
|
||||||
traceProcStop(_g_.m.p)
|
traceProcStop(_g_.m.p.ptr())
|
||||||
}
|
}
|
||||||
_g_.m.p = nil
|
_g_.m.p = 0
|
||||||
_g_.m.mcache = nil
|
_g_.m.mcache = nil
|
||||||
_p_.m = nil
|
_p_.m = 0
|
||||||
_p_.status = _Pidle
|
_p_.status = _Pidle
|
||||||
return _p_
|
return _p_
|
||||||
}
|
}
|
||||||
|
|
@ -2773,7 +2772,7 @@ func checkdead() {
|
||||||
if mp == nil {
|
if mp == nil {
|
||||||
newm(nil, _p_)
|
newm(nil, _p_)
|
||||||
} else {
|
} else {
|
||||||
mp.nextp = _p_
|
mp.nextp.set(_p_)
|
||||||
notewakeup(&mp.park)
|
notewakeup(&mp.park)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
|
|
@ -2865,7 +2864,7 @@ func sysmon() {
|
||||||
if lastgc != 0 && unixnow-lastgc > forcegcperiod && atomicload(&forcegc.idle) != 0 {
|
if lastgc != 0 && unixnow-lastgc > forcegcperiod && atomicload(&forcegc.idle) != 0 {
|
||||||
lock(&forcegc.lock)
|
lock(&forcegc.lock)
|
||||||
forcegc.idle = 0
|
forcegc.idle = 0
|
||||||
forcegc.g.schedlink = nil
|
forcegc.g.schedlink = 0
|
||||||
injectglist(forcegc.g)
|
injectglist(forcegc.g)
|
||||||
unlock(&forcegc.lock)
|
unlock(&forcegc.lock)
|
||||||
}
|
}
|
||||||
|
|
@ -2978,7 +2977,7 @@ func preemptall() bool {
|
||||||
// and will be indicated by the gp->status no longer being
|
// and will be indicated by the gp->status no longer being
|
||||||
// Grunning
|
// Grunning
|
||||||
func preemptone(_p_ *p) bool {
|
func preemptone(_p_ *p) bool {
|
||||||
mp := _p_.m
|
mp := _p_.m.ptr()
|
||||||
if mp == nil || mp == getg().m {
|
if mp == nil || mp == getg().m {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
@ -3018,7 +3017,7 @@ func schedtrace(detailed bool) {
|
||||||
if _p_ == nil {
|
if _p_ == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
mp := _p_.m
|
mp := _p_.m.ptr()
|
||||||
h := atomicload(&_p_.runqhead)
|
h := atomicload(&_p_.runqhead)
|
||||||
t := atomicload(&_p_.runqtail)
|
t := atomicload(&_p_.runqtail)
|
||||||
if detailed {
|
if detailed {
|
||||||
|
|
@ -3047,7 +3046,7 @@ func schedtrace(detailed bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for mp := allm; mp != nil; mp = mp.alllink {
|
for mp := allm; mp != nil; mp = mp.alllink {
|
||||||
_p_ := mp.p
|
_p_ := mp.p.ptr()
|
||||||
gp := mp.curg
|
gp := mp.curg
|
||||||
lockedg := mp.lockedg
|
lockedg := mp.lockedg
|
||||||
id1 := int32(-1)
|
id1 := int32(-1)
|
||||||
|
|
@ -3089,10 +3088,8 @@ func schedtrace(detailed bool) {
|
||||||
// May run during STW, so write barriers are not allowed.
|
// May run during STW, so write barriers are not allowed.
|
||||||
//go:nowritebarrier
|
//go:nowritebarrier
|
||||||
func mput(mp *m) {
|
func mput(mp *m) {
|
||||||
// sched.midle is reachable via allm, so WB can be eliminated.
|
mp.schedlink = sched.midle
|
||||||
setMNoWriteBarrier(&mp.schedlink, sched.midle)
|
sched.midle.set(mp)
|
||||||
// mp is reachable via allm, so WB can be eliminated.
|
|
||||||
setMNoWriteBarrier(&sched.midle, mp)
|
|
||||||
sched.nmidle++
|
sched.nmidle++
|
||||||
checkdead()
|
checkdead()
|
||||||
}
|
}
|
||||||
|
|
@ -3102,10 +3099,9 @@ func mput(mp *m) {
|
||||||
// May run during STW, so write barriers are not allowed.
|
// May run during STW, so write barriers are not allowed.
|
||||||
//go:nowritebarrier
|
//go:nowritebarrier
|
||||||
func mget() *m {
|
func mget() *m {
|
||||||
mp := sched.midle
|
mp := sched.midle.ptr()
|
||||||
if mp != nil {
|
if mp != nil {
|
||||||
// mp.schedlink is reachable via mp, which is on allm, so WB can be eliminated.
|
sched.midle = mp.schedlink
|
||||||
setMNoWriteBarrier(&sched.midle, mp.schedlink)
|
|
||||||
sched.nmidle--
|
sched.nmidle--
|
||||||
}
|
}
|
||||||
return mp
|
return mp
|
||||||
|
|
@ -3116,27 +3112,26 @@ func mget() *m {
|
||||||
// May run during STW, so write barriers are not allowed.
|
// May run during STW, so write barriers are not allowed.
|
||||||
//go:nowritebarrier
|
//go:nowritebarrier
|
||||||
func globrunqput(gp *g) {
|
func globrunqput(gp *g) {
|
||||||
gp.schedlink = nil
|
gp.schedlink = 0
|
||||||
if sched.runqtail != nil {
|
if sched.runqtail != 0 {
|
||||||
// gp is on allg, so these three WBs can be eliminated.
|
sched.runqtail.ptr().schedlink.set(gp)
|
||||||
setGNoWriteBarrier(&sched.runqtail.schedlink, gp)
|
|
||||||
} else {
|
} else {
|
||||||
setGNoWriteBarrier(&sched.runqhead, gp)
|
sched.runqhead.set(gp)
|
||||||
}
|
}
|
||||||
setGNoWriteBarrier(&sched.runqtail, gp)
|
sched.runqtail.set(gp)
|
||||||
sched.runqsize++
|
sched.runqsize++
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put a batch of runnable goroutines on the global runnable queue.
|
// Put a batch of runnable goroutines on the global runnable queue.
|
||||||
// Sched must be locked.
|
// Sched must be locked.
|
||||||
func globrunqputbatch(ghead *g, gtail *g, n int32) {
|
func globrunqputbatch(ghead *g, gtail *g, n int32) {
|
||||||
gtail.schedlink = nil
|
gtail.schedlink = 0
|
||||||
if sched.runqtail != nil {
|
if sched.runqtail != 0 {
|
||||||
sched.runqtail.schedlink = ghead
|
sched.runqtail.ptr().schedlink.set(ghead)
|
||||||
} else {
|
} else {
|
||||||
sched.runqhead = ghead
|
sched.runqhead.set(ghead)
|
||||||
}
|
}
|
||||||
sched.runqtail = gtail
|
sched.runqtail.set(gtail)
|
||||||
sched.runqsize += n
|
sched.runqsize += n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -3160,14 +3155,14 @@ func globrunqget(_p_ *p, max int32) *g {
|
||||||
|
|
||||||
sched.runqsize -= n
|
sched.runqsize -= n
|
||||||
if sched.runqsize == 0 {
|
if sched.runqsize == 0 {
|
||||||
sched.runqtail = nil
|
sched.runqtail = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
gp := sched.runqhead
|
gp := sched.runqhead.ptr()
|
||||||
sched.runqhead = gp.schedlink
|
sched.runqhead = gp.schedlink
|
||||||
n--
|
n--
|
||||||
for ; n > 0; n-- {
|
for ; n > 0; n-- {
|
||||||
gp1 := sched.runqhead
|
gp1 := sched.runqhead.ptr()
|
||||||
sched.runqhead = gp1.schedlink
|
sched.runqhead = gp1.schedlink
|
||||||
runqput(_p_, gp1)
|
runqput(_p_, gp1)
|
||||||
}
|
}
|
||||||
|
|
@ -3179,9 +3174,8 @@ func globrunqget(_p_ *p, max int32) *g {
|
||||||
// May run during STW, so write barriers are not allowed.
|
// May run during STW, so write barriers are not allowed.
|
||||||
//go:nowritebarrier
|
//go:nowritebarrier
|
||||||
func pidleput(_p_ *p) {
|
func pidleput(_p_ *p) {
|
||||||
// sched.pidle, _p_.link and _p_ are reachable via allp, so WB can be eliminated.
|
_p_.link = sched.pidle
|
||||||
setPNoWriteBarrier(&_p_.link, sched.pidle)
|
sched.pidle.set(_p_)
|
||||||
setPNoWriteBarrier(&sched.pidle, _p_)
|
|
||||||
xadd(&sched.npidle, 1) // TODO: fast atomic
|
xadd(&sched.npidle, 1) // TODO: fast atomic
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -3190,10 +3184,9 @@ func pidleput(_p_ *p) {
|
||||||
// May run during STW, so write barriers are not allowed.
|
// May run during STW, so write barriers are not allowed.
|
||||||
//go:nowritebarrier
|
//go:nowritebarrier
|
||||||
func pidleget() *p {
|
func pidleget() *p {
|
||||||
_p_ := sched.pidle
|
_p_ := sched.pidle.ptr()
|
||||||
if _p_ != nil {
|
if _p_ != nil {
|
||||||
// _p_.link is reachable via a _p_ in allp, so WB can be eliminated.
|
sched.pidle = _p_.link
|
||||||
setPNoWriteBarrier(&sched.pidle, _p_.link)
|
|
||||||
xadd(&sched.npidle, -1) // TODO: fast atomic
|
xadd(&sched.npidle, -1) // TODO: fast atomic
|
||||||
}
|
}
|
||||||
return _p_
|
return _p_
|
||||||
|
|
@ -3239,7 +3232,7 @@ func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
|
||||||
|
|
||||||
// Link the goroutines.
|
// Link the goroutines.
|
||||||
for i := uint32(0); i < n; i++ {
|
for i := uint32(0); i < n; i++ {
|
||||||
batch[i].schedlink = batch[i+1]
|
batch[i].schedlink.set(batch[i+1])
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now put the batch on global queue.
|
// Now put the batch on global queue.
|
||||||
|
|
@ -3413,7 +3406,7 @@ func procPin() int {
|
||||||
mp := _g_.m
|
mp := _g_.m
|
||||||
|
|
||||||
mp.locks++
|
mp.locks++
|
||||||
return int(mp.p.id)
|
return int(mp.p.ptr().id)
|
||||||
}
|
}
|
||||||
|
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
|
|
@ -3458,7 +3451,7 @@ func sync_runtime_canSpin(i int) bool {
|
||||||
if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 {
|
if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if p := getg().m.p; p.runqhead != p.runqtail {
|
if p := getg().m.p.ptr(); p.runqhead != p.runqtail {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
|
|
|
||||||
|
|
@ -87,8 +87,28 @@ type eface struct {
|
||||||
data unsafe.Pointer
|
data unsafe.Pointer
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The guintptr, muintptr, and puintptr are all used to bypass write barriers.
|
||||||
|
// It is particularly important to avoid write barriers when the current P has
|
||||||
|
// been released, because the GC thinks the world is stopped, and an
|
||||||
|
// unexpected write barrier would not be synchronized with the GC,
|
||||||
|
// which can lead to a half-executed write barrier that has marked the object
|
||||||
|
// but not queued it. If the GC skips the object and completes before the
|
||||||
|
// queuing can occur, it will incorrectly free the object.
|
||||||
|
//
|
||||||
|
// We tried using special assignment functions invoked only when not
|
||||||
|
// holding a running P, but then some updates to a particular memory
|
||||||
|
// word went through write barriers and some did not. This breaks the
|
||||||
|
// write barrier shadow checking mode, and it is also scary: better to have
|
||||||
|
// a word that is completely ignored by the GC than to have one for which
|
||||||
|
// only a few updates are ignored.
|
||||||
|
//
|
||||||
|
// Gs, Ms, and Ps are always reachable via true pointers in the
|
||||||
|
// allgs, allm, and allp lists or (during allocation before they reach those lists)
|
||||||
|
// from stack variables.
|
||||||
|
|
||||||
// A guintptr holds a goroutine pointer, but typed as a uintptr
|
// A guintptr holds a goroutine pointer, but typed as a uintptr
|
||||||
// to bypass write barriers. It is used in the Gobuf goroutine state.
|
// to bypass write barriers. It is used in the Gobuf goroutine state
|
||||||
|
// and in scheduling lists that are manipulated without a P.
|
||||||
//
|
//
|
||||||
// The Gobuf.g goroutine pointer is almost always updated by assembly code.
|
// The Gobuf.g goroutine pointer is almost always updated by assembly code.
|
||||||
// In one of the few places it is updated by Go code - func save - it must be
|
// In one of the few places it is updated by Go code - func save - it must be
|
||||||
|
|
@ -107,41 +127,18 @@ type eface struct {
|
||||||
// alternate arena. Using guintptr doesn't make that problem any worse.
|
// alternate arena. Using guintptr doesn't make that problem any worse.
|
||||||
type guintptr uintptr
|
type guintptr uintptr
|
||||||
|
|
||||||
func (gp guintptr) ptr() *g {
|
func (gp guintptr) ptr() *g { return (*g)(unsafe.Pointer(gp)) }
|
||||||
return (*g)(unsafe.Pointer(gp))
|
func (gp *guintptr) set(g *g) { *gp = guintptr(unsafe.Pointer(g)) }
|
||||||
}
|
|
||||||
|
|
||||||
// ps, ms, gs, and mcache are structures that must be manipulated at a level
|
type puintptr uintptr
|
||||||
// lower than that of the normal Go language. For example the routine that
|
|
||||||
// stops the world removes the p from the m structure informing the GC that
|
|
||||||
// this P is stopped and then it moves the g to the global runnable queue.
|
|
||||||
// If write barriers were allowed to happen at this point not only does
|
|
||||||
// the GC think the thread is stopped but the underlying structures
|
|
||||||
// like a p or m are not in a state that is not coherent enough to
|
|
||||||
// support the write barrier actions.
|
|
||||||
// This is particularly painful since a partially executed write barrier
|
|
||||||
// may mark the object but be delinquent in informing the GC that the
|
|
||||||
// object needs to be scanned.
|
|
||||||
|
|
||||||
// setGNoWriteBarriers does *gdst = gval without a write barrier.
|
func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) }
|
||||||
func setGNoWriteBarrier(gdst **g, gval *g) {
|
func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) }
|
||||||
*(*uintptr)(unsafe.Pointer(gdst)) = uintptr(unsafe.Pointer(gval))
|
|
||||||
}
|
|
||||||
|
|
||||||
// setMNoWriteBarriers does *mdst = mval without a write barrier.
|
type muintptr uintptr
|
||||||
func setMNoWriteBarrier(mdst **m, mval *m) {
|
|
||||||
*(*uintptr)(unsafe.Pointer(mdst)) = uintptr(unsafe.Pointer(mval))
|
|
||||||
}
|
|
||||||
|
|
||||||
// setPNoWriteBarriers does *pdst = pval without a write barrier.
|
func (mp muintptr) ptr() *m { return (*m)(unsafe.Pointer(mp)) }
|
||||||
func setPNoWriteBarrier(pdst **p, pval *p) {
|
func (mp *muintptr) set(m *m) { *mp = muintptr(unsafe.Pointer(m)) }
|
||||||
*(*uintptr)(unsafe.Pointer(pdst)) = uintptr(unsafe.Pointer(pval))
|
|
||||||
}
|
|
||||||
|
|
||||||
// setMcacheNoWriteBarriers does *mcachedst = mcacheval without a write barrier.
|
|
||||||
func setMcacheNoWriteBarrier(mcachedst **mcache, mcacheval *mcache) {
|
|
||||||
*(*uintptr)(unsafe.Pointer(mcachedst)) = uintptr(unsafe.Pointer(mcacheval))
|
|
||||||
}
|
|
||||||
|
|
||||||
type gobuf struct {
|
type gobuf struct {
|
||||||
// The offsets of sp, pc, and g are known to (hard-coded in) libmach.
|
// The offsets of sp, pc, and g are known to (hard-coded in) libmach.
|
||||||
|
|
@ -224,7 +221,7 @@ type g struct {
|
||||||
goid int64
|
goid int64
|
||||||
waitsince int64 // approx time when the g become blocked
|
waitsince int64 // approx time when the g become blocked
|
||||||
waitreason string // if status==gwaiting
|
waitreason string // if status==gwaiting
|
||||||
schedlink *g
|
schedlink guintptr
|
||||||
preempt bool // preemption signal, duplicates stackguard0 = stackpreempt
|
preempt bool // preemption signal, duplicates stackguard0 = stackpreempt
|
||||||
paniconfault bool // panic (instead of crash) on unexpected fault address
|
paniconfault bool // panic (instead of crash) on unexpected fault address
|
||||||
preemptscan bool // preempted g does scan for gc
|
preemptscan bool // preempted g does scan for gc
|
||||||
|
|
@ -263,11 +260,11 @@ type m struct {
|
||||||
procid uint64 // for debuggers, but offset not hard-coded
|
procid uint64 // for debuggers, but offset not hard-coded
|
||||||
gsignal *g // signal-handling g
|
gsignal *g // signal-handling g
|
||||||
tls [4]uintptr // thread-local storage (for x86 extern register)
|
tls [4]uintptr // thread-local storage (for x86 extern register)
|
||||||
mstartfn uintptr // TODO: type as func(); note: this is a non-heap allocated func()
|
mstartfn func()
|
||||||
curg *g // current running goroutine
|
curg *g // current running goroutine
|
||||||
caughtsig *g // goroutine running during fatal signal
|
caughtsig guintptr // goroutine running during fatal signal
|
||||||
p *p // attached p for executing go code (nil if not executing go code)
|
p puintptr // attached p for executing go code (nil if not executing go code)
|
||||||
nextp *p
|
nextp puintptr
|
||||||
id int32
|
id int32
|
||||||
mallocing int32
|
mallocing int32
|
||||||
throwing int32
|
throwing int32
|
||||||
|
|
@ -286,7 +283,7 @@ type m struct {
|
||||||
ncgo int32 // number of cgo calls currently in progress
|
ncgo int32 // number of cgo calls currently in progress
|
||||||
park note
|
park note
|
||||||
alllink *m // on allm
|
alllink *m // on allm
|
||||||
schedlink *m
|
schedlink muintptr
|
||||||
machport uint32 // return address for mach ipc (os x)
|
machport uint32 // return address for mach ipc (os x)
|
||||||
mcache *mcache
|
mcache *mcache
|
||||||
lockedg *g
|
lockedg *g
|
||||||
|
|
@ -315,7 +312,7 @@ type m struct {
|
||||||
libcall libcall
|
libcall libcall
|
||||||
libcallpc uintptr // for cpu profiler
|
libcallpc uintptr // for cpu profiler
|
||||||
libcallsp uintptr
|
libcallsp uintptr
|
||||||
libcallg *g
|
libcallg guintptr
|
||||||
//#endif
|
//#endif
|
||||||
//#ifdef GOOS_solaris
|
//#ifdef GOOS_solaris
|
||||||
perrno *int32 // pointer to tls errno
|
perrno *int32 // pointer to tls errno
|
||||||
|
|
@ -336,10 +333,10 @@ type p struct {
|
||||||
|
|
||||||
id int32
|
id int32
|
||||||
status uint32 // one of pidle/prunning/...
|
status uint32 // one of pidle/prunning/...
|
||||||
link *p
|
link puintptr
|
||||||
schedtick uint32 // incremented on every scheduler call
|
schedtick uint32 // incremented on every scheduler call
|
||||||
syscalltick uint32 // incremented on every system call
|
syscalltick uint32 // incremented on every system call
|
||||||
m *m // back-link to associated m (nil if idle)
|
m muintptr // back-link to associated m (nil if idle)
|
||||||
mcache *mcache
|
mcache *mcache
|
||||||
|
|
||||||
deferpool [5][]*_defer // pool of available defer structs of different sizes (see panic.go)
|
deferpool [5][]*_defer // pool of available defer structs of different sizes (see panic.go)
|
||||||
|
|
@ -379,19 +376,19 @@ type schedt struct {
|
||||||
|
|
||||||
goidgen uint64
|
goidgen uint64
|
||||||
|
|
||||||
midle *m // idle m's waiting for work
|
midle muintptr // idle m's waiting for work
|
||||||
nmidle int32 // number of idle m's waiting for work
|
nmidle int32 // number of idle m's waiting for work
|
||||||
nmidlelocked int32 // number of locked m's waiting for work
|
nmidlelocked int32 // number of locked m's waiting for work
|
||||||
mcount int32 // number of m's that have been created
|
mcount int32 // number of m's that have been created
|
||||||
maxmcount int32 // maximum number of m's allowed (or die)
|
maxmcount int32 // maximum number of m's allowed (or die)
|
||||||
|
|
||||||
pidle *p // idle p's
|
pidle puintptr // idle p's
|
||||||
npidle uint32
|
npidle uint32
|
||||||
nmspinning uint32
|
nmspinning uint32
|
||||||
|
|
||||||
// Global runnable queue.
|
// Global runnable queue.
|
||||||
runqhead *g
|
runqhead guintptr
|
||||||
runqtail *g
|
runqtail guintptr
|
||||||
runqsize int32
|
runqsize int32
|
||||||
|
|
||||||
// Global cache of dead G's.
|
// Global cache of dead G's.
|
||||||
|
|
|
||||||
|
|
@ -100,7 +100,7 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
|
||||||
}
|
}
|
||||||
|
|
||||||
_g_.m.throwing = 1
|
_g_.m.throwing = 1
|
||||||
setGNoWriteBarrier(&_g_.m.caughtsig, gp)
|
_g_.m.caughtsig.set(gp)
|
||||||
startpanic()
|
startpanic()
|
||||||
|
|
||||||
if sig < uint32(len(sigtable)) {
|
if sig < uint32(len(sigtable)) {
|
||||||
|
|
|
||||||
|
|
@ -136,7 +136,7 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
|
||||||
}
|
}
|
||||||
|
|
||||||
_g_.m.throwing = 1
|
_g_.m.throwing = 1
|
||||||
setGNoWriteBarrier(&_g_.m.caughtsig, gp)
|
_g_.m.caughtsig.set(gp)
|
||||||
|
|
||||||
if crashing == 0 {
|
if crashing == 0 {
|
||||||
startpanic()
|
startpanic()
|
||||||
|
|
|
||||||
|
|
@ -95,7 +95,7 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
|
||||||
}
|
}
|
||||||
|
|
||||||
_g_.m.throwing = 1
|
_g_.m.throwing = 1
|
||||||
setGNoWriteBarrier(&_g_.m.caughtsig, gp)
|
_g_.m.caughtsig.set(gp)
|
||||||
startpanic()
|
startpanic()
|
||||||
|
|
||||||
if sig < uint32(len(sigtable)) {
|
if sig < uint32(len(sigtable)) {
|
||||||
|
|
|
||||||
|
|
@ -108,7 +108,7 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
|
||||||
}
|
}
|
||||||
|
|
||||||
_g_.m.throwing = 1
|
_g_.m.throwing = 1
|
||||||
setGNoWriteBarrier(&_g_.m.caughtsig, gp)
|
_g_.m.caughtsig.set(gp)
|
||||||
startpanic()
|
startpanic()
|
||||||
|
|
||||||
if sig < uint32(len(sigtable)) {
|
if sig < uint32(len(sigtable)) {
|
||||||
|
|
|
||||||
|
|
@ -113,7 +113,7 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
|
||||||
}
|
}
|
||||||
|
|
||||||
_g_.m.throwing = 1
|
_g_.m.throwing = 1
|
||||||
setGNoWriteBarrier(&_g_.m.caughtsig, gp)
|
_g_.m.caughtsig.set(gp)
|
||||||
startpanic()
|
startpanic()
|
||||||
|
|
||||||
if sig < uint32(len(sigtable)) {
|
if sig < uint32(len(sigtable)) {
|
||||||
|
|
|
||||||
|
|
@ -680,7 +680,7 @@ func newstack() {
|
||||||
// it needs a lock held by the goroutine), that small preemption turns
|
// it needs a lock held by the goroutine), that small preemption turns
|
||||||
// into a real deadlock.
|
// into a real deadlock.
|
||||||
if preempt {
|
if preempt {
|
||||||
if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.status != _Prunning {
|
if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.ptr().status != _Prunning {
|
||||||
// Let the goroutine keep running for now.
|
// Let the goroutine keep running for now.
|
||||||
// gp->preempt is set, so it will be preempted next time.
|
// gp->preempt is set, so it will be preempted next time.
|
||||||
gp.stackguard0 = gp.stack.lo + _StackGuard
|
gp.stackguard0 = gp.stack.lo + _StackGuard
|
||||||
|
|
@ -724,7 +724,7 @@ func newstack() {
|
||||||
if gp == thisg.m.g0 {
|
if gp == thisg.m.g0 {
|
||||||
throw("runtime: preempt g0")
|
throw("runtime: preempt g0")
|
||||||
}
|
}
|
||||||
if thisg.m.p == nil && thisg.m.locks == 0 {
|
if thisg.m.p == 0 && thisg.m.locks == 0 {
|
||||||
throw("runtime: g is running but p is not")
|
throw("runtime: g is running but p is not")
|
||||||
}
|
}
|
||||||
if gp.preemptscan {
|
if gp.preemptscan {
|
||||||
|
|
|
||||||
|
|
@ -506,7 +506,7 @@ func traceEvent(ev byte, skip int, args ...uint64) {
|
||||||
// traceAcquireBuffer returns trace buffer to use and, if necessary, locks it.
|
// traceAcquireBuffer returns trace buffer to use and, if necessary, locks it.
|
||||||
func traceAcquireBuffer() (mp *m, pid int32, bufp **traceBuf) {
|
func traceAcquireBuffer() (mp *m, pid int32, bufp **traceBuf) {
|
||||||
mp = acquirem()
|
mp = acquirem()
|
||||||
if p := mp.p; p != nil {
|
if p := mp.p.ptr(); p != nil {
|
||||||
return mp, p.id, &p.tracebuf
|
return mp, p.id, &p.tracebuf
|
||||||
}
|
}
|
||||||
lock(&trace.bufLock)
|
lock(&trace.bufLock)
|
||||||
|
|
@ -732,7 +732,7 @@ func traceProcStop(pp *p) {
|
||||||
// to handle this we temporary employ the P.
|
// to handle this we temporary employ the P.
|
||||||
mp := acquirem()
|
mp := acquirem()
|
||||||
oldp := mp.p
|
oldp := mp.p
|
||||||
mp.p = pp
|
mp.p.set(pp)
|
||||||
traceEvent(traceEvProcStop, -1)
|
traceEvent(traceEvProcStop, -1)
|
||||||
mp.p = oldp
|
mp.p = oldp
|
||||||
releasem(mp)
|
releasem(mp)
|
||||||
|
|
@ -806,7 +806,7 @@ func traceGoSysBlock(pp *p) {
|
||||||
// to handle this we temporary employ the P.
|
// to handle this we temporary employ the P.
|
||||||
mp := acquirem()
|
mp := acquirem()
|
||||||
oldp := mp.p
|
oldp := mp.p
|
||||||
mp.p = pp
|
mp.p.set(pp)
|
||||||
traceEvent(traceEvGoSysBlock, -1)
|
traceEvent(traceEvGoSysBlock, -1)
|
||||||
mp.p = oldp
|
mp.p = oldp
|
||||||
releasem(mp)
|
releasem(mp)
|
||||||
|
|
|
||||||
|
|
@ -528,7 +528,7 @@ func gcallers(gp *g, skip int, pcbuf []uintptr) int {
|
||||||
|
|
||||||
func showframe(f *_func, gp *g) bool {
|
func showframe(f *_func, gp *g) bool {
|
||||||
g := getg()
|
g := getg()
|
||||||
if g.m.throwing > 0 && gp != nil && (gp == g.m.curg || gp == g.m.caughtsig) {
|
if g.m.throwing > 0 && gp != nil && (gp == g.m.curg || gp == g.m.caughtsig.ptr()) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
traceback := gotraceback(nil)
|
traceback := gotraceback(nil)
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue