2016-03-01 22:57:46 +00:00
|
|
|
// Copyright 2015 The Go Authors. All rights reserved.
|
2015-12-21 10:29:21 -05:00
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
|
|
package main
|
|
|
|
|
|
|
|
|
|
import (
|
|
|
|
|
"fmt"
|
|
|
|
|
"os"
|
|
|
|
|
"runtime"
|
runtime: fix goroutine priority elevation
Currently it's possible for user code to exploit the high scheduler
priority of the GC worker in conjunction with the runnext optimization
to elevate a user goroutine to high priority so it will always run
even if there are other runnable goroutines.
For example, if a goroutine is in a tight allocation loop, the
following can happen:
1. Goroutine 1 allocates, triggering a GC.
2. G 1 attempts an assist, but fails and blocks.
3. The scheduler runs the GC worker, since it is high priority.
Note that this also starts a new scheduler quantum.
4. The GC worker does enough work to satisfy the assist.
5. The GC worker readies G 1, putting it in runnext.
6. GC finishes and the scheduler runs G 1 from runnext, giving it
the rest of the GC worker's quantum.
7. Go to 1.
Even if there are other goroutines on the run queue, they never get a
chance to run in the above sequence. This requires a confluence of
circumstances that make it unlikely, though not impossible, that it
would happen in "real" code. In the test added by this commit, we
force this confluence by setting GOMAXPROCS to 1 and GOGC to 1 so it's
easy for the test to repeated trigger GC and wake from a blocked
assist.
We fix this by making GC always put user goroutines at the end of the
run queue, instead of in runnext. This makes it so user code can't
piggy-back on the GC's high priority to make a user goroutine act like
it has high priority. The only other situation where GC wakes user
goroutines is waking all blocked assists at the end, but this uses the
global run queue and hence doesn't have this problem.
Fixes #15706.
Change-Id: I1589dee4b7b7d0c9c8575ed3472226084dfce8bc
Reviewed-on: https://go-review.googlesource.com/23172
Reviewed-by: Rick Hudson <rlh@golang.org>
2016-05-17 18:46:03 -04:00
|
|
|
"runtime/debug"
|
|
|
|
|
"sync/atomic"
|
2015-12-21 10:29:21 -05:00
|
|
|
"time"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
|
register("GCFairness", GCFairness)
|
runtime: fix goroutine priority elevation
Currently it's possible for user code to exploit the high scheduler
priority of the GC worker in conjunction with the runnext optimization
to elevate a user goroutine to high priority so it will always run
even if there are other runnable goroutines.
For example, if a goroutine is in a tight allocation loop, the
following can happen:
1. Goroutine 1 allocates, triggering a GC.
2. G 1 attempts an assist, but fails and blocks.
3. The scheduler runs the GC worker, since it is high priority.
Note that this also starts a new scheduler quantum.
4. The GC worker does enough work to satisfy the assist.
5. The GC worker readies G 1, putting it in runnext.
6. GC finishes and the scheduler runs G 1 from runnext, giving it
the rest of the GC worker's quantum.
7. Go to 1.
Even if there are other goroutines on the run queue, they never get a
chance to run in the above sequence. This requires a confluence of
circumstances that make it unlikely, though not impossible, that it
would happen in "real" code. In the test added by this commit, we
force this confluence by setting GOMAXPROCS to 1 and GOGC to 1 so it's
easy for the test to repeated trigger GC and wake from a blocked
assist.
We fix this by making GC always put user goroutines at the end of the
run queue, instead of in runnext. This makes it so user code can't
piggy-back on the GC's high priority to make a user goroutine act like
it has high priority. The only other situation where GC wakes user
goroutines is waking all blocked assists at the end, but this uses the
global run queue and hence doesn't have this problem.
Fixes #15706.
Change-Id: I1589dee4b7b7d0c9c8575ed3472226084dfce8bc
Reviewed-on: https://go-review.googlesource.com/23172
Reviewed-by: Rick Hudson <rlh@golang.org>
2016-05-17 18:46:03 -04:00
|
|
|
register("GCFairness2", GCFairness2)
|
2015-12-21 10:29:21 -05:00
|
|
|
register("GCSys", GCSys)
|
2018-10-01 19:58:01 +00:00
|
|
|
register("GCPhys", GCPhys)
|
2015-12-21 10:29:21 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func GCSys() {
|
|
|
|
|
runtime.GOMAXPROCS(1)
|
|
|
|
|
memstats := new(runtime.MemStats)
|
|
|
|
|
runtime.GC()
|
|
|
|
|
runtime.ReadMemStats(memstats)
|
|
|
|
|
sys := memstats.Sys
|
|
|
|
|
|
|
|
|
|
runtime.MemProfileRate = 0 // disable profiler
|
|
|
|
|
|
|
|
|
|
itercount := 100000
|
|
|
|
|
for i := 0; i < itercount; i++ {
|
|
|
|
|
workthegc()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Should only be using a few MB.
|
|
|
|
|
// We allocated 100 MB or (if not short) 1 GB.
|
|
|
|
|
runtime.ReadMemStats(memstats)
|
|
|
|
|
if sys > memstats.Sys {
|
|
|
|
|
sys = 0
|
|
|
|
|
} else {
|
|
|
|
|
sys = memstats.Sys - sys
|
|
|
|
|
}
|
|
|
|
|
if sys > 16<<20 {
|
|
|
|
|
fmt.Printf("using too much memory: %d bytes\n", sys)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
fmt.Printf("OK\n")
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-06 22:57:35 -07:00
|
|
|
var sink []byte
|
|
|
|
|
|
2015-12-21 10:29:21 -05:00
|
|
|
func workthegc() []byte {
|
2018-07-06 22:57:35 -07:00
|
|
|
sink = make([]byte, 1029)
|
|
|
|
|
return sink
|
2015-12-21 10:29:21 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func GCFairness() {
|
|
|
|
|
runtime.GOMAXPROCS(1)
|
|
|
|
|
f, err := os.Open("/dev/null")
|
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
|
// This test tests what it is intended to test only if writes are fast.
|
|
|
|
|
// If there is no /dev/null, we just don't execute the test.
|
|
|
|
|
fmt.Println("OK")
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
if err != nil {
|
|
|
|
|
fmt.Println(err)
|
|
|
|
|
os.Exit(1)
|
|
|
|
|
}
|
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
|
go func() {
|
|
|
|
|
for {
|
|
|
|
|
f.Write([]byte("."))
|
|
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
|
}
|
|
|
|
|
time.Sleep(10 * time.Millisecond)
|
|
|
|
|
fmt.Println("OK")
|
|
|
|
|
}
|
runtime: fix goroutine priority elevation
Currently it's possible for user code to exploit the high scheduler
priority of the GC worker in conjunction with the runnext optimization
to elevate a user goroutine to high priority so it will always run
even if there are other runnable goroutines.
For example, if a goroutine is in a tight allocation loop, the
following can happen:
1. Goroutine 1 allocates, triggering a GC.
2. G 1 attempts an assist, but fails and blocks.
3. The scheduler runs the GC worker, since it is high priority.
Note that this also starts a new scheduler quantum.
4. The GC worker does enough work to satisfy the assist.
5. The GC worker readies G 1, putting it in runnext.
6. GC finishes and the scheduler runs G 1 from runnext, giving it
the rest of the GC worker's quantum.
7. Go to 1.
Even if there are other goroutines on the run queue, they never get a
chance to run in the above sequence. This requires a confluence of
circumstances that make it unlikely, though not impossible, that it
would happen in "real" code. In the test added by this commit, we
force this confluence by setting GOMAXPROCS to 1 and GOGC to 1 so it's
easy for the test to repeated trigger GC and wake from a blocked
assist.
We fix this by making GC always put user goroutines at the end of the
run queue, instead of in runnext. This makes it so user code can't
piggy-back on the GC's high priority to make a user goroutine act like
it has high priority. The only other situation where GC wakes user
goroutines is waking all blocked assists at the end, but this uses the
global run queue and hence doesn't have this problem.
Fixes #15706.
Change-Id: I1589dee4b7b7d0c9c8575ed3472226084dfce8bc
Reviewed-on: https://go-review.googlesource.com/23172
Reviewed-by: Rick Hudson <rlh@golang.org>
2016-05-17 18:46:03 -04:00
|
|
|
|
|
|
|
|
func GCFairness2() {
|
|
|
|
|
// Make sure user code can't exploit the GC's high priority
|
|
|
|
|
// scheduling to make scheduling of user code unfair. See
|
|
|
|
|
// issue #15706.
|
|
|
|
|
runtime.GOMAXPROCS(1)
|
|
|
|
|
debug.SetGCPercent(1)
|
|
|
|
|
var count [3]int64
|
|
|
|
|
var sink [3]interface{}
|
|
|
|
|
for i := range count {
|
|
|
|
|
go func(i int) {
|
|
|
|
|
for {
|
|
|
|
|
sink[i] = make([]byte, 1024)
|
|
|
|
|
atomic.AddInt64(&count[i], 1)
|
|
|
|
|
}
|
|
|
|
|
}(i)
|
|
|
|
|
}
|
|
|
|
|
// Note: If the unfairness is really bad, it may not even get
|
|
|
|
|
// past the sleep.
|
|
|
|
|
//
|
|
|
|
|
// If the scheduling rules change, this may not be enough time
|
|
|
|
|
// to let all goroutines run, but for now we cycle through
|
|
|
|
|
// them rapidly.
|
2016-11-02 16:18:22 -04:00
|
|
|
//
|
|
|
|
|
// OpenBSD's scheduler makes every usleep() take at least
|
|
|
|
|
// 20ms, so we need a long time to ensure all goroutines have
|
|
|
|
|
// run. If they haven't run after 30ms, give it another 1000ms
|
|
|
|
|
// and check again.
|
runtime: fix goroutine priority elevation
Currently it's possible for user code to exploit the high scheduler
priority of the GC worker in conjunction with the runnext optimization
to elevate a user goroutine to high priority so it will always run
even if there are other runnable goroutines.
For example, if a goroutine is in a tight allocation loop, the
following can happen:
1. Goroutine 1 allocates, triggering a GC.
2. G 1 attempts an assist, but fails and blocks.
3. The scheduler runs the GC worker, since it is high priority.
Note that this also starts a new scheduler quantum.
4. The GC worker does enough work to satisfy the assist.
5. The GC worker readies G 1, putting it in runnext.
6. GC finishes and the scheduler runs G 1 from runnext, giving it
the rest of the GC worker's quantum.
7. Go to 1.
Even if there are other goroutines on the run queue, they never get a
chance to run in the above sequence. This requires a confluence of
circumstances that make it unlikely, though not impossible, that it
would happen in "real" code. In the test added by this commit, we
force this confluence by setting GOMAXPROCS to 1 and GOGC to 1 so it's
easy for the test to repeated trigger GC and wake from a blocked
assist.
We fix this by making GC always put user goroutines at the end of the
run queue, instead of in runnext. This makes it so user code can't
piggy-back on the GC's high priority to make a user goroutine act like
it has high priority. The only other situation where GC wakes user
goroutines is waking all blocked assists at the end, but this uses the
global run queue and hence doesn't have this problem.
Fixes #15706.
Change-Id: I1589dee4b7b7d0c9c8575ed3472226084dfce8bc
Reviewed-on: https://go-review.googlesource.com/23172
Reviewed-by: Rick Hudson <rlh@golang.org>
2016-05-17 18:46:03 -04:00
|
|
|
time.Sleep(30 * time.Millisecond)
|
2016-11-02 16:18:22 -04:00
|
|
|
var fail bool
|
runtime: fix goroutine priority elevation
Currently it's possible for user code to exploit the high scheduler
priority of the GC worker in conjunction with the runnext optimization
to elevate a user goroutine to high priority so it will always run
even if there are other runnable goroutines.
For example, if a goroutine is in a tight allocation loop, the
following can happen:
1. Goroutine 1 allocates, triggering a GC.
2. G 1 attempts an assist, but fails and blocks.
3. The scheduler runs the GC worker, since it is high priority.
Note that this also starts a new scheduler quantum.
4. The GC worker does enough work to satisfy the assist.
5. The GC worker readies G 1, putting it in runnext.
6. GC finishes and the scheduler runs G 1 from runnext, giving it
the rest of the GC worker's quantum.
7. Go to 1.
Even if there are other goroutines on the run queue, they never get a
chance to run in the above sequence. This requires a confluence of
circumstances that make it unlikely, though not impossible, that it
would happen in "real" code. In the test added by this commit, we
force this confluence by setting GOMAXPROCS to 1 and GOGC to 1 so it's
easy for the test to repeated trigger GC and wake from a blocked
assist.
We fix this by making GC always put user goroutines at the end of the
run queue, instead of in runnext. This makes it so user code can't
piggy-back on the GC's high priority to make a user goroutine act like
it has high priority. The only other situation where GC wakes user
goroutines is waking all blocked assists at the end, but this uses the
global run queue and hence doesn't have this problem.
Fixes #15706.
Change-Id: I1589dee4b7b7d0c9c8575ed3472226084dfce8bc
Reviewed-on: https://go-review.googlesource.com/23172
Reviewed-by: Rick Hudson <rlh@golang.org>
2016-05-17 18:46:03 -04:00
|
|
|
for i := range count {
|
|
|
|
|
if atomic.LoadInt64(&count[i]) == 0 {
|
2016-11-02 16:18:22 -04:00
|
|
|
fail = true
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if fail {
|
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
|
for i := range count {
|
|
|
|
|
if atomic.LoadInt64(&count[i]) == 0 {
|
|
|
|
|
fmt.Printf("goroutine %d did not run\n", i)
|
|
|
|
|
return
|
|
|
|
|
}
|
runtime: fix goroutine priority elevation
Currently it's possible for user code to exploit the high scheduler
priority of the GC worker in conjunction with the runnext optimization
to elevate a user goroutine to high priority so it will always run
even if there are other runnable goroutines.
For example, if a goroutine is in a tight allocation loop, the
following can happen:
1. Goroutine 1 allocates, triggering a GC.
2. G 1 attempts an assist, but fails and blocks.
3. The scheduler runs the GC worker, since it is high priority.
Note that this also starts a new scheduler quantum.
4. The GC worker does enough work to satisfy the assist.
5. The GC worker readies G 1, putting it in runnext.
6. GC finishes and the scheduler runs G 1 from runnext, giving it
the rest of the GC worker's quantum.
7. Go to 1.
Even if there are other goroutines on the run queue, they never get a
chance to run in the above sequence. This requires a confluence of
circumstances that make it unlikely, though not impossible, that it
would happen in "real" code. In the test added by this commit, we
force this confluence by setting GOMAXPROCS to 1 and GOGC to 1 so it's
easy for the test to repeated trigger GC and wake from a blocked
assist.
We fix this by making GC always put user goroutines at the end of the
run queue, instead of in runnext. This makes it so user code can't
piggy-back on the GC's high priority to make a user goroutine act like
it has high priority. The only other situation where GC wakes user
goroutines is waking all blocked assists at the end, but this uses the
global run queue and hence doesn't have this problem.
Fixes #15706.
Change-Id: I1589dee4b7b7d0c9c8575ed3472226084dfce8bc
Reviewed-on: https://go-review.googlesource.com/23172
Reviewed-by: Rick Hudson <rlh@golang.org>
2016-05-17 18:46:03 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
fmt.Println("OK")
|
|
|
|
|
}
|
2018-10-01 19:58:01 +00:00
|
|
|
|
|
|
|
|
var maybeSaved []byte
|
|
|
|
|
|
|
|
|
|
func GCPhys() {
|
|
|
|
|
// In this test, we construct a very specific scenario. We first
|
|
|
|
|
// allocate N objects and drop half of their pointers on the floor,
|
|
|
|
|
// effectively creating N/2 'holes' in our allocated arenas. We then
|
|
|
|
|
// try to allocate objects twice as big. At the end, we measure the
|
|
|
|
|
// physical memory overhead of large objects.
|
|
|
|
|
//
|
|
|
|
|
// The purpose of this test is to ensure that the GC scavenges free
|
|
|
|
|
// spans eagerly to ensure high physical memory utilization even
|
|
|
|
|
// during fragmentation.
|
|
|
|
|
const (
|
|
|
|
|
// Unfortunately, measuring actual used physical pages is
|
|
|
|
|
// difficult because HeapReleased doesn't include the parts
|
|
|
|
|
// of an arena that haven't yet been touched. So, we just
|
|
|
|
|
// make objects and size sufficiently large such that even
|
|
|
|
|
// 64 MB overhead is relatively small in the final
|
|
|
|
|
// calculation.
|
|
|
|
|
//
|
|
|
|
|
// Currently, we target 480MiB worth of memory for our test,
|
|
|
|
|
// computed as size * objects + (size*2) * (objects/2)
|
|
|
|
|
// = 2 * size * objects
|
|
|
|
|
//
|
|
|
|
|
// Size must be also large enough to be considered a large
|
|
|
|
|
// object (not in any size-segregated span).
|
|
|
|
|
size = 1 << 20
|
|
|
|
|
objects = 240
|
|
|
|
|
)
|
|
|
|
|
// Save objects which we want to survive, and condemn objects which we don't.
|
|
|
|
|
// Note that we condemn objects in this way and release them all at once in
|
|
|
|
|
// order to avoid having the GC start freeing up these objects while the loop
|
|
|
|
|
// is still running and filling in the holes we intend to make.
|
|
|
|
|
saved := make([][]byte, 0, objects)
|
|
|
|
|
condemned := make([][]byte, 0, objects/2+1)
|
|
|
|
|
for i := 0; i < objects; i++ {
|
|
|
|
|
// Write into a global, to prevent this from being optimized away by
|
|
|
|
|
// the compiler in the future.
|
|
|
|
|
maybeSaved = make([]byte, size)
|
|
|
|
|
if i%2 == 0 {
|
|
|
|
|
saved = append(saved, maybeSaved)
|
|
|
|
|
} else {
|
|
|
|
|
condemned = append(condemned, maybeSaved)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
condemned = nil
|
|
|
|
|
// Clean up the heap. This will free up every other object created above
|
|
|
|
|
// (i.e. everything in condemned) creating holes in the heap.
|
|
|
|
|
runtime.GC()
|
|
|
|
|
// Allocate many new objects of 2x size.
|
|
|
|
|
for i := 0; i < objects/2; i++ {
|
|
|
|
|
saved = append(saved, make([]byte, size*2))
|
|
|
|
|
}
|
|
|
|
|
// Clean up the heap again just to put it in a known state.
|
|
|
|
|
runtime.GC()
|
|
|
|
|
// heapBacked is an estimate of the amount of physical memory used by
|
|
|
|
|
// this test. HeapSys is an estimate of the size of the mapped virtual
|
|
|
|
|
// address space (which may or may not be backed by physical pages)
|
|
|
|
|
// whereas HeapReleased is an estimate of the amount of bytes returned
|
|
|
|
|
// to the OS. Their difference then roughly corresponds to the amount
|
|
|
|
|
// of virtual address space that is backed by physical pages.
|
|
|
|
|
var stats runtime.MemStats
|
|
|
|
|
runtime.ReadMemStats(&stats)
|
|
|
|
|
heapBacked := stats.HeapSys - stats.HeapReleased
|
|
|
|
|
// If heapBacked exceeds the amount of memory actually used for heap
|
|
|
|
|
// allocated objects by 10% (post-GC HeapAlloc should be quite close to
|
|
|
|
|
// the size of the working set), then fail.
|
|
|
|
|
//
|
|
|
|
|
// In the context of this test, that indicates a large amount of
|
|
|
|
|
// fragmentation with physical pages that are otherwise unused but not
|
|
|
|
|
// returned to the OS.
|
|
|
|
|
overuse := (float64(heapBacked) - float64(stats.HeapAlloc)) / float64(stats.HeapAlloc)
|
|
|
|
|
if overuse > 0.1 {
|
|
|
|
|
fmt.Printf("exceeded physical memory overuse threshold of 10%%: %3.2f%%\n"+
|
|
|
|
|
"(alloc: %d, sys: %d, rel: %d, objs: %d)\n", overuse*100, stats.HeapAlloc,
|
|
|
|
|
stats.HeapSys, stats.HeapReleased, len(saved))
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
fmt.Println("OK")
|
|
|
|
|
runtime.KeepAlive(saved)
|
|
|
|
|
}
|