mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime: convert schedt.lastpoll to atomic type
Note that this changes the type from uint64 to int64, the type used by nanotime(). It also adds an atomic load in pollWork(), which used to use a non-atomic load. For #53821. Change-Id: I6173c90f20bfdc0e0a4bc3a7b1c798d1c429fff5 Reviewed-on: https://go-review.googlesource.com/c/go/+/419442 Run-TryBot: Michael Pratt <mpratt@google.com> Reviewed-by: Austin Clements <austin@google.com> TryBot-Result: Gopher Robot <gobot@golang.org>
This commit is contained in:
parent
98e1648823
commit
5d7d50111f
3 changed files with 10 additions and 11 deletions
|
|
@ -17,7 +17,6 @@ var AtomicFields = []uintptr{
|
||||||
unsafe.Offsetof(p{}.timer0When),
|
unsafe.Offsetof(p{}.timer0When),
|
||||||
unsafe.Offsetof(p{}.timerModifiedEarliest),
|
unsafe.Offsetof(p{}.timerModifiedEarliest),
|
||||||
unsafe.Offsetof(p{}.gcFractionalMarkTime),
|
unsafe.Offsetof(p{}.gcFractionalMarkTime),
|
||||||
unsafe.Offsetof(schedt{}.lastpoll),
|
|
||||||
unsafe.Offsetof(schedt{}.pollUntil),
|
unsafe.Offsetof(schedt{}.pollUntil),
|
||||||
unsafe.Offsetof(schedt{}.timeToRun),
|
unsafe.Offsetof(schedt{}.timeToRun),
|
||||||
unsafe.Offsetof(timeHistogram{}.underflow),
|
unsafe.Offsetof(timeHistogram{}.underflow),
|
||||||
|
|
|
||||||
|
|
@ -714,7 +714,7 @@ func schedinit() {
|
||||||
gcinit()
|
gcinit()
|
||||||
|
|
||||||
lock(&sched.lock)
|
lock(&sched.lock)
|
||||||
sched.lastpoll = uint64(nanotime())
|
sched.lastpoll.Store(nanotime())
|
||||||
procs := ncpu
|
procs := ncpu
|
||||||
if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
|
if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
|
||||||
procs = n
|
procs = n
|
||||||
|
|
@ -2390,7 +2390,7 @@ func handoffp(pp *p) {
|
||||||
}
|
}
|
||||||
// If this is the last running P and nobody is polling network,
|
// If this is the last running P and nobody is polling network,
|
||||||
// need to wakeup another M to poll network.
|
// need to wakeup another M to poll network.
|
||||||
if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 {
|
if sched.npidle == uint32(gomaxprocs-1) && sched.lastpoll.Load() != 0 {
|
||||||
unlock(&sched.lock)
|
unlock(&sched.lock)
|
||||||
startm(pp, false)
|
startm(pp, false)
|
||||||
return
|
return
|
||||||
|
|
@ -2632,7 +2632,7 @@ top:
|
||||||
// blocked thread (e.g. it has already returned from netpoll, but does
|
// blocked thread (e.g. it has already returned from netpoll, but does
|
||||||
// not set lastpoll yet), this thread will do blocking netpoll below
|
// not set lastpoll yet), this thread will do blocking netpoll below
|
||||||
// anyway.
|
// anyway.
|
||||||
if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Load64(&sched.lastpoll) != 0 {
|
if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll.Load() != 0 {
|
||||||
if list := netpoll(0); !list.empty() { // non-blocking
|
if list := netpoll(0); !list.empty() { // non-blocking
|
||||||
gp := list.pop()
|
gp := list.pop()
|
||||||
injectglist(&list)
|
injectglist(&list)
|
||||||
|
|
@ -2803,7 +2803,7 @@ top:
|
||||||
}
|
}
|
||||||
|
|
||||||
// Poll network until next timer.
|
// Poll network until next timer.
|
||||||
if netpollinited() && (atomic.Load(&netpollWaiters) > 0 || pollUntil != 0) && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
|
if netpollinited() && (atomic.Load(&netpollWaiters) > 0 || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
|
||||||
atomic.Store64(&sched.pollUntil, uint64(pollUntil))
|
atomic.Store64(&sched.pollUntil, uint64(pollUntil))
|
||||||
if mp.p != 0 {
|
if mp.p != 0 {
|
||||||
throw("findrunnable: netpoll with p")
|
throw("findrunnable: netpoll with p")
|
||||||
|
|
@ -2826,7 +2826,7 @@ top:
|
||||||
}
|
}
|
||||||
list := netpoll(delay) // block until new work is available
|
list := netpoll(delay) // block until new work is available
|
||||||
atomic.Store64(&sched.pollUntil, 0)
|
atomic.Store64(&sched.pollUntil, 0)
|
||||||
atomic.Store64(&sched.lastpoll, uint64(now))
|
sched.lastpoll.Store(now)
|
||||||
if faketime != 0 && list.empty() {
|
if faketime != 0 && list.empty() {
|
||||||
// Using fake time and nothing is ready; stop M.
|
// Using fake time and nothing is ready; stop M.
|
||||||
// When all M's stop, checkdead will call timejump.
|
// When all M's stop, checkdead will call timejump.
|
||||||
|
|
@ -2877,7 +2877,7 @@ func pollWork() bool {
|
||||||
if !runqempty(p) {
|
if !runqempty(p) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 {
|
if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll.Load() != 0 {
|
||||||
if list := netpoll(0); !list.empty() {
|
if list := netpoll(0); !list.empty() {
|
||||||
injectglist(&list)
|
injectglist(&list)
|
||||||
return true
|
return true
|
||||||
|
|
@ -3066,7 +3066,7 @@ func checkIdleGCNoP() (*p, *g) {
|
||||||
// going to wake up before the when argument; or it wakes an idle P to service
|
// going to wake up before the when argument; or it wakes an idle P to service
|
||||||
// timers and the network poller if there isn't one already.
|
// timers and the network poller if there isn't one already.
|
||||||
func wakeNetPoller(when int64) {
|
func wakeNetPoller(when int64) {
|
||||||
if atomic.Load64(&sched.lastpoll) == 0 {
|
if sched.lastpoll.Load() == 0 {
|
||||||
// In findrunnable we ensure that when polling the pollUntil
|
// In findrunnable we ensure that when polling the pollUntil
|
||||||
// field is either zero or the time to which the current
|
// field is either zero or the time to which the current
|
||||||
// poll is expected to run. This can have a spurious wakeup
|
// poll is expected to run. This can have a spurious wakeup
|
||||||
|
|
@ -5200,9 +5200,9 @@ func sysmon() {
|
||||||
asmcgocall(*cgo_yield, nil)
|
asmcgocall(*cgo_yield, nil)
|
||||||
}
|
}
|
||||||
// poll network if not polled for more than 10ms
|
// poll network if not polled for more than 10ms
|
||||||
lastpoll := int64(atomic.Load64(&sched.lastpoll))
|
lastpoll := sched.lastpoll.Load()
|
||||||
if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
|
if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
|
||||||
atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))
|
sched.lastpoll.CompareAndSwap(lastpoll, now)
|
||||||
list := netpoll(0) // non-blocking - returns list of goroutines
|
list := netpoll(0) // non-blocking - returns list of goroutines
|
||||||
if !list.empty() {
|
if !list.empty() {
|
||||||
// Need to decrement number of idle locked M's
|
// Need to decrement number of idle locked M's
|
||||||
|
|
|
||||||
|
|
@ -760,7 +760,7 @@ type p struct {
|
||||||
type schedt struct {
|
type schedt struct {
|
||||||
// accessed atomically. keep at top to ensure alignment on 32-bit systems.
|
// accessed atomically. keep at top to ensure alignment on 32-bit systems.
|
||||||
goidgen atomic.Uint64
|
goidgen atomic.Uint64
|
||||||
lastpoll uint64 // time of last network poll, 0 if currently polling
|
lastpoll atomic.Int64 // time of last network poll, 0 if currently polling
|
||||||
pollUntil uint64 // time to which current poll is sleeping
|
pollUntil uint64 // time to which current poll is sleeping
|
||||||
|
|
||||||
lock mutex
|
lock mutex
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue