mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime: yield time slice to most recently readied G
Currently, when the runtime ready()s a G, it adds it to the end of the current P's run queue and continues running. If there are many other things in the run queue, this can result in a significant delay before the ready()d G actually runs and can hurt fairness when other Gs in the run queue are CPU hogs. For example, if there are three Gs sharing a P, one of which is a CPU hog that never voluntarily gives up the P and the other two of which are doing small amounts of work and communicating back and forth on an unbuffered channel, the two communicating Gs will get very little CPU time. Change this so that when G1 ready()s G2 and then blocks, the scheduler immediately hands off the remainder of G1's time slice to G2. In the above example, the two communicating Gs will now act as a unit and together get half of the CPU time, while the CPU hog gets the other half of the CPU time. This fixes the problem demonstrated by the ping-pong benchmark added in the previous commit: benchmark old ns/op new ns/op delta BenchmarkPingPongHog 684287 825 -99.88% On the x/benchmarks suite, this change improves the performance of garbage by ~6% (for GOMAXPROCS=1 and 4), and json by 28% and 36% for GOMAXPROCS=1 and 4. It has negligible effect on heap size. This has no effect on the go1 benchmark suite since those benchmarks are mostly single-threaded. Change-Id: I858a08eaa78f702ea98a5fac99d28a4ac91d339f Reviewed-on: https://go-review.googlesource.com/9289 Reviewed-by: Rick Hudson <rlh@golang.org> Reviewed-by: Russ Cox <rsc@golang.org>
This commit is contained in:
parent
da0e37fa8d
commit
e870f06c3f
3 changed files with 162 additions and 45 deletions
|
|
@ -292,6 +292,57 @@ func main() {
|
|||
}
|
||||
`
|
||||
|
||||
func TestPingPongHog(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping in -short mode")
|
||||
}
|
||||
|
||||
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
|
||||
done := make(chan bool)
|
||||
hogChan, lightChan := make(chan bool), make(chan bool)
|
||||
hogCount, lightCount := 0, 0
|
||||
|
||||
run := func(limit int, counter *int, wake chan bool) {
|
||||
for {
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
|
||||
case <-wake:
|
||||
for i := 0; i < limit; i++ {
|
||||
*counter++
|
||||
}
|
||||
wake <- true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Start two co-scheduled hog goroutines.
|
||||
for i := 0; i < 2; i++ {
|
||||
go run(1e6, &hogCount, hogChan)
|
||||
}
|
||||
|
||||
// Start two co-scheduled light goroutines.
|
||||
for i := 0; i < 2; i++ {
|
||||
go run(1e3, &lightCount, lightChan)
|
||||
}
|
||||
|
||||
// Start goroutine pairs and wait for a few preemption rounds.
|
||||
hogChan <- true
|
||||
lightChan <- true
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
close(done)
|
||||
<-hogChan
|
||||
<-lightChan
|
||||
|
||||
// Check that hogCount and lightCount are within a factor of
|
||||
// 2, which indicates that both pairs of goroutines handed off
|
||||
// the P within a time-slice to their buddy.
|
||||
if hogCount > lightCount*2 || lightCount > hogCount*2 {
|
||||
t.Fatalf("want hogCount/lightCount in [0.5, 2]; got %d/%d = %g", hogCount, lightCount, float64(hogCount)/float64(lightCount))
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkPingPongHog(b *testing.B) {
|
||||
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue