mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
Revert "sync: yield to the waiter when unlocking a starving mutex"
This reverts CL 200577. Reason for revert: broke linux-arm64-packet and solaris-amd64-oraclerel builders Fixes #35424 Updates #33747 Change-Id: I2575fd84d37995d458183caae54704f15d8b8426 Reviewed-on: https://go-review.googlesource.com/c/go/+/205817 Run-TryBot: Bryan C. Mills <bcmills@google.com> Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
This commit is contained in:
parent
e8f01d591f
commit
73d57bf80f
5 changed files with 2 additions and 125 deletions
|
|
@ -730,11 +730,3 @@ func RunGetgThreadSwitchTest() {
|
||||||
panic("g1 != g3")
|
panic("g1 != g3")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var Semacquire = semacquire
|
|
||||||
var Semrelease1 = semrelease1
|
|
||||||
|
|
||||||
func SemNwait(addr *uint32) uint32 {
|
|
||||||
root := semroot(addr)
|
|
||||||
return atomic.Load(&root.nwait)
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -2753,22 +2753,7 @@ func preemptPark(gp *g) {
|
||||||
casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
|
casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
|
||||||
dropg()
|
dropg()
|
||||||
casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
|
casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
|
||||||
schedule()
|
|
||||||
}
|
|
||||||
|
|
||||||
// goyield is like Gosched, but it:
|
|
||||||
// - does not emit a GoSched trace event
|
|
||||||
// - puts the current G on the runq of the current P instead of the globrunq
|
|
||||||
func goyield() {
|
|
||||||
checkTimeouts()
|
|
||||||
mcall(goyield_m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func goyield_m(gp *g) {
|
|
||||||
pp := gp.m.p.ptr()
|
|
||||||
casgstatus(gp, _Grunning, _Grunnable)
|
|
||||||
dropg()
|
|
||||||
runqput(pp, gp, false)
|
|
||||||
schedule()
|
schedule()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -180,7 +180,7 @@ func semrelease1(addr *uint32, handoff bool, skipframes int) {
|
||||||
atomic.Xadd(&root.nwait, -1)
|
atomic.Xadd(&root.nwait, -1)
|
||||||
}
|
}
|
||||||
unlock(&root.lock)
|
unlock(&root.lock)
|
||||||
if s != nil { // May be slow or even yield, so unlock first
|
if s != nil { // May be slow, so unlock first
|
||||||
acquiretime := s.acquiretime
|
acquiretime := s.acquiretime
|
||||||
if acquiretime != 0 {
|
if acquiretime != 0 {
|
||||||
mutexevent(t0-acquiretime, 3+skipframes)
|
mutexevent(t0-acquiretime, 3+skipframes)
|
||||||
|
|
@ -192,25 +192,6 @@ func semrelease1(addr *uint32, handoff bool, skipframes int) {
|
||||||
s.ticket = 1
|
s.ticket = 1
|
||||||
}
|
}
|
||||||
readyWithTime(s, 5+skipframes)
|
readyWithTime(s, 5+skipframes)
|
||||||
if s.ticket == 1 {
|
|
||||||
// Direct G handoff
|
|
||||||
// readyWithTime has added the waiter G as runnext in the
|
|
||||||
// current P; we now call the scheduler so that we start running
|
|
||||||
// the waiter G immediately.
|
|
||||||
// Note that waiter inherits our time slice: this is desirable
|
|
||||||
// to avoid having a highly contended semaphore hog the P
|
|
||||||
// indefinitely. goyield is like Gosched, but it does not emit a
|
|
||||||
// GoSched trace event and, more importantly, puts the current G
|
|
||||||
// on the local runq instead of the global one.
|
|
||||||
// We only do this in the starving regime (handoff=true), as in
|
|
||||||
// the non-starving case it is possible for a different waiter
|
|
||||||
// to acquire the semaphore while we are yielding/scheduling,
|
|
||||||
// and this would be wasteful. We wait instead to enter starving
|
|
||||||
// regime, and then we start to do direct handoffs of ticket and
|
|
||||||
// P.
|
|
||||||
// See issue 33747 for discussion.
|
|
||||||
goyield()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,80 +0,0 @@
|
||||||
// Copyright 2019 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package runtime_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
. "runtime"
|
|
||||||
"sync/atomic"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestSemaHandoff checks that when semrelease+handoff is
|
|
||||||
// requested, the G that releases the semaphore yields its
|
|
||||||
// P directly to the first waiter in line.
|
|
||||||
// See issue 33747 for discussion.
|
|
||||||
func TestSemaHandoff(t *testing.T) {
|
|
||||||
const iter = 10000
|
|
||||||
ok := 0
|
|
||||||
for i := 0; i < iter; i++ {
|
|
||||||
if testSemaHandoff() {
|
|
||||||
ok++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// As long as two thirds of handoffs are direct, we
|
|
||||||
// consider the test successful. The scheduler is
|
|
||||||
// nondeterministic, so this test checks that we get the
|
|
||||||
// desired outcome in a significant majority of cases.
|
|
||||||
// The actual ratio of direct handoffs is much higher
|
|
||||||
// (>90%) but we use a lower threshold to minimize the
|
|
||||||
// chances that unrelated changes in the runtime will
|
|
||||||
// cause the test to fail or become flaky.
|
|
||||||
if ok < iter*2/3 {
|
|
||||||
t.Fatal("direct handoff < 2/3:", ok, iter)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSemaHandoff1(t *testing.T) {
|
|
||||||
if GOMAXPROCS(-1) <= 1 {
|
|
||||||
t.Skip("GOMAXPROCS <= 1")
|
|
||||||
}
|
|
||||||
defer GOMAXPROCS(GOMAXPROCS(-1))
|
|
||||||
GOMAXPROCS(1)
|
|
||||||
TestSemaHandoff(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSemaHandoff2(t *testing.T) {
|
|
||||||
if GOMAXPROCS(-1) <= 2 {
|
|
||||||
t.Skip("GOMAXPROCS <= 2")
|
|
||||||
}
|
|
||||||
defer GOMAXPROCS(GOMAXPROCS(-1))
|
|
||||||
GOMAXPROCS(2)
|
|
||||||
TestSemaHandoff(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testSemaHandoff() bool {
|
|
||||||
var sema, res uint32
|
|
||||||
done := make(chan struct{})
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
Semacquire(&sema)
|
|
||||||
atomic.CompareAndSwapUint32(&res, 0, 1)
|
|
||||||
|
|
||||||
Semrelease1(&sema, true, 0)
|
|
||||||
close(done)
|
|
||||||
}()
|
|
||||||
for SemNwait(&sema) == 0 {
|
|
||||||
Gosched() // wait for goroutine to block in Semacquire
|
|
||||||
}
|
|
||||||
|
|
||||||
// The crux of the test: we release the semaphore with handoff
|
|
||||||
// and immediately perform a CAS both here and in the waiter; we
|
|
||||||
// want the CAS in the waiter to execute first.
|
|
||||||
Semrelease1(&sema, true, 0)
|
|
||||||
atomic.CompareAndSwapUint32(&res, 0, 2)
|
|
||||||
|
|
||||||
<-done // wait for goroutines to finish to avoid data races
|
|
||||||
|
|
||||||
return res == 1 // did the waiter run first?
|
|
||||||
}
|
|
||||||
|
|
@ -216,8 +216,7 @@ func (m *Mutex) unlockSlow(new int32) {
|
||||||
old = m.state
|
old = m.state
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Starving mode: handoff mutex ownership to the next waiter, and yield
|
// Starving mode: handoff mutex ownership to the next waiter.
|
||||||
// our time slice so that the next waiter can start to run immediately.
|
|
||||||
// Note: mutexLocked is not set, the waiter will set it after wakeup.
|
// Note: mutexLocked is not set, the waiter will set it after wakeup.
|
||||||
// But mutex is still considered locked if mutexStarving is set,
|
// But mutex is still considered locked if mutexStarving is set,
|
||||||
// so new coming goroutines won't acquire it.
|
// so new coming goroutines won't acquire it.
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue