2016-03-01 22:57:46 +00:00
|
|
|
// Copyright 2015 The Go Authors. All rights reserved.
|
2015-12-21 10:29:21 -05:00
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2022-03-30 22:18:43 +00:00
|
|
|
"math"
|
2015-12-21 10:29:21 -05:00
|
|
|
"os"
|
|
|
|
"runtime"
|
runtime: fix goroutine priority elevation
Currently it's possible for user code to exploit the high scheduler
priority of the GC worker in conjunction with the runnext optimization
to elevate a user goroutine to high priority so it will always run
even if there are other runnable goroutines.
For example, if a goroutine is in a tight allocation loop, the
following can happen:
1. Goroutine 1 allocates, triggering a GC.
2. G 1 attempts an assist, but fails and blocks.
3. The scheduler runs the GC worker, since it is high priority.
Note that this also starts a new scheduler quantum.
4. The GC worker does enough work to satisfy the assist.
5. The GC worker readies G 1, putting it in runnext.
6. GC finishes and the scheduler runs G 1 from runnext, giving it
the rest of the GC worker's quantum.
7. Go to 1.
Even if there are other goroutines on the run queue, they never get a
chance to run in the above sequence. This requires a confluence of
circumstances that make it unlikely, though not impossible, that it
would happen in "real" code. In the test added by this commit, we
force this confluence by setting GOMAXPROCS to 1 and GOGC to 1 so it's
easy for the test to repeated trigger GC and wake from a blocked
assist.
We fix this by making GC always put user goroutines at the end of the
run queue, instead of in runnext. This makes it so user code can't
piggy-back on the GC's high priority to make a user goroutine act like
it has high priority. The only other situation where GC wakes user
goroutines is waking all blocked assists at the end, but this uses the
global run queue and hence doesn't have this problem.
Fixes #15706.
Change-Id: I1589dee4b7b7d0c9c8575ed3472226084dfce8bc
Reviewed-on: https://go-review.googlesource.com/23172
Reviewed-by: Rick Hudson <rlh@golang.org>
2016-05-17 18:46:03 -04:00
|
|
|
"runtime/debug"
|
2022-03-30 22:18:43 +00:00
|
|
|
"runtime/metrics"
|
|
|
|
"sync"
|
runtime: fix goroutine priority elevation
Currently it's possible for user code to exploit the high scheduler
priority of the GC worker in conjunction with the runnext optimization
to elevate a user goroutine to high priority so it will always run
even if there are other runnable goroutines.
For example, if a goroutine is in a tight allocation loop, the
following can happen:
1. Goroutine 1 allocates, triggering a GC.
2. G 1 attempts an assist, but fails and blocks.
3. The scheduler runs the GC worker, since it is high priority.
Note that this also starts a new scheduler quantum.
4. The GC worker does enough work to satisfy the assist.
5. The GC worker readies G 1, putting it in runnext.
6. GC finishes and the scheduler runs G 1 from runnext, giving it
the rest of the GC worker's quantum.
7. Go to 1.
Even if there are other goroutines on the run queue, they never get a
chance to run in the above sequence. This requires a confluence of
circumstances that make it unlikely, though not impossible, that it
would happen in "real" code. In the test added by this commit, we
force this confluence by setting GOMAXPROCS to 1 and GOGC to 1 so it's
easy for the test to repeated trigger GC and wake from a blocked
assist.
We fix this by making GC always put user goroutines at the end of the
run queue, instead of in runnext. This makes it so user code can't
piggy-back on the GC's high priority to make a user goroutine act like
it has high priority. The only other situation where GC wakes user
goroutines is waking all blocked assists at the end, but this uses the
global run queue and hence doesn't have this problem.
Fixes #15706.
Change-Id: I1589dee4b7b7d0c9c8575ed3472226084dfce8bc
Reviewed-on: https://go-review.googlesource.com/23172
Reviewed-by: Rick Hudson <rlh@golang.org>
2016-05-17 18:46:03 -04:00
|
|
|
"sync/atomic"
|
2015-12-21 10:29:21 -05:00
|
|
|
"time"
|
2020-05-14 16:55:39 -04:00
|
|
|
"unsafe"
|
2015-12-21 10:29:21 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
register("GCFairness", GCFairness)
|
runtime: fix goroutine priority elevation
Currently it's possible for user code to exploit the high scheduler
priority of the GC worker in conjunction with the runnext optimization
to elevate a user goroutine to high priority so it will always run
even if there are other runnable goroutines.
For example, if a goroutine is in a tight allocation loop, the
following can happen:
1. Goroutine 1 allocates, triggering a GC.
2. G 1 attempts an assist, but fails and blocks.
3. The scheduler runs the GC worker, since it is high priority.
Note that this also starts a new scheduler quantum.
4. The GC worker does enough work to satisfy the assist.
5. The GC worker readies G 1, putting it in runnext.
6. GC finishes and the scheduler runs G 1 from runnext, giving it
the rest of the GC worker's quantum.
7. Go to 1.
Even if there are other goroutines on the run queue, they never get a
chance to run in the above sequence. This requires a confluence of
circumstances that make it unlikely, though not impossible, that it
would happen in "real" code. In the test added by this commit, we
force this confluence by setting GOMAXPROCS to 1 and GOGC to 1 so it's
easy for the test to repeated trigger GC and wake from a blocked
assist.
We fix this by making GC always put user goroutines at the end of the
run queue, instead of in runnext. This makes it so user code can't
piggy-back on the GC's high priority to make a user goroutine act like
it has high priority. The only other situation where GC wakes user
goroutines is waking all blocked assists at the end, but this uses the
global run queue and hence doesn't have this problem.
Fixes #15706.
Change-Id: I1589dee4b7b7d0c9c8575ed3472226084dfce8bc
Reviewed-on: https://go-review.googlesource.com/23172
Reviewed-by: Rick Hudson <rlh@golang.org>
2016-05-17 18:46:03 -04:00
|
|
|
register("GCFairness2", GCFairness2)
|
2015-12-21 10:29:21 -05:00
|
|
|
register("GCSys", GCSys)
|
2018-10-01 19:58:01 +00:00
|
|
|
register("GCPhys", GCPhys)
|
2019-02-27 12:34:20 -05:00
|
|
|
register("DeferLiveness", DeferLiveness)
|
2020-05-14 16:55:39 -04:00
|
|
|
register("GCZombie", GCZombie)
|
2022-03-30 22:18:43 +00:00
|
|
|
register("GCMemoryLimit", GCMemoryLimit)
|
|
|
|
register("GCMemoryLimitNoGCPercent", GCMemoryLimitNoGCPercent)
|
2015-12-21 10:29:21 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func GCSys() {
|
|
|
|
runtime.GOMAXPROCS(1)
|
|
|
|
memstats := new(runtime.MemStats)
|
|
|
|
runtime.GC()
|
|
|
|
runtime.ReadMemStats(memstats)
|
|
|
|
sys := memstats.Sys
|
|
|
|
|
|
|
|
runtime.MemProfileRate = 0 // disable profiler
|
|
|
|
|
|
|
|
itercount := 100000
|
|
|
|
for i := 0; i < itercount; i++ {
|
|
|
|
workthegc()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should only be using a few MB.
|
|
|
|
// We allocated 100 MB or (if not short) 1 GB.
|
|
|
|
runtime.ReadMemStats(memstats)
|
|
|
|
if sys > memstats.Sys {
|
|
|
|
sys = 0
|
|
|
|
} else {
|
|
|
|
sys = memstats.Sys - sys
|
|
|
|
}
|
|
|
|
if sys > 16<<20 {
|
|
|
|
fmt.Printf("using too much memory: %d bytes\n", sys)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
fmt.Printf("OK\n")
|
|
|
|
}
|
|
|
|
|
2018-07-06 22:57:35 -07:00
|
|
|
var sink []byte
|
|
|
|
|
2015-12-21 10:29:21 -05:00
|
|
|
func workthegc() []byte {
|
2018-07-06 22:57:35 -07:00
|
|
|
sink = make([]byte, 1029)
|
|
|
|
return sink
|
2015-12-21 10:29:21 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func GCFairness() {
|
|
|
|
runtime.GOMAXPROCS(1)
|
|
|
|
f, err := os.Open("/dev/null")
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
// This test tests what it is intended to test only if writes are fast.
|
|
|
|
// If there is no /dev/null, we just don't execute the test.
|
|
|
|
fmt.Println("OK")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
fmt.Println(err)
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
f.Write([]byte("."))
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
time.Sleep(10 * time.Millisecond)
|
|
|
|
fmt.Println("OK")
|
|
|
|
}
|
runtime: fix goroutine priority elevation
Currently it's possible for user code to exploit the high scheduler
priority of the GC worker in conjunction with the runnext optimization
to elevate a user goroutine to high priority so it will always run
even if there are other runnable goroutines.
For example, if a goroutine is in a tight allocation loop, the
following can happen:
1. Goroutine 1 allocates, triggering a GC.
2. G 1 attempts an assist, but fails and blocks.
3. The scheduler runs the GC worker, since it is high priority.
Note that this also starts a new scheduler quantum.
4. The GC worker does enough work to satisfy the assist.
5. The GC worker readies G 1, putting it in runnext.
6. GC finishes and the scheduler runs G 1 from runnext, giving it
the rest of the GC worker's quantum.
7. Go to 1.
Even if there are other goroutines on the run queue, they never get a
chance to run in the above sequence. This requires a confluence of
circumstances that make it unlikely, though not impossible, that it
would happen in "real" code. In the test added by this commit, we
force this confluence by setting GOMAXPROCS to 1 and GOGC to 1 so it's
easy for the test to repeated trigger GC and wake from a blocked
assist.
We fix this by making GC always put user goroutines at the end of the
run queue, instead of in runnext. This makes it so user code can't
piggy-back on the GC's high priority to make a user goroutine act like
it has high priority. The only other situation where GC wakes user
goroutines is waking all blocked assists at the end, but this uses the
global run queue and hence doesn't have this problem.
Fixes #15706.
Change-Id: I1589dee4b7b7d0c9c8575ed3472226084dfce8bc
Reviewed-on: https://go-review.googlesource.com/23172
Reviewed-by: Rick Hudson <rlh@golang.org>
2016-05-17 18:46:03 -04:00
|
|
|
|
|
|
|
func GCFairness2() {
|
|
|
|
// Make sure user code can't exploit the GC's high priority
|
|
|
|
// scheduling to make scheduling of user code unfair. See
|
|
|
|
// issue #15706.
|
|
|
|
runtime.GOMAXPROCS(1)
|
|
|
|
debug.SetGCPercent(1)
|
|
|
|
var count [3]int64
|
2021-12-01 12:15:45 -05:00
|
|
|
var sink [3]any
|
runtime: fix goroutine priority elevation
Currently it's possible for user code to exploit the high scheduler
priority of the GC worker in conjunction with the runnext optimization
to elevate a user goroutine to high priority so it will always run
even if there are other runnable goroutines.
For example, if a goroutine is in a tight allocation loop, the
following can happen:
1. Goroutine 1 allocates, triggering a GC.
2. G 1 attempts an assist, but fails and blocks.
3. The scheduler runs the GC worker, since it is high priority.
Note that this also starts a new scheduler quantum.
4. The GC worker does enough work to satisfy the assist.
5. The GC worker readies G 1, putting it in runnext.
6. GC finishes and the scheduler runs G 1 from runnext, giving it
the rest of the GC worker's quantum.
7. Go to 1.
Even if there are other goroutines on the run queue, they never get a
chance to run in the above sequence. This requires a confluence of
circumstances that make it unlikely, though not impossible, that it
would happen in "real" code. In the test added by this commit, we
force this confluence by setting GOMAXPROCS to 1 and GOGC to 1 so it's
easy for the test to repeated trigger GC and wake from a blocked
assist.
We fix this by making GC always put user goroutines at the end of the
run queue, instead of in runnext. This makes it so user code can't
piggy-back on the GC's high priority to make a user goroutine act like
it has high priority. The only other situation where GC wakes user
goroutines is waking all blocked assists at the end, but this uses the
global run queue and hence doesn't have this problem.
Fixes #15706.
Change-Id: I1589dee4b7b7d0c9c8575ed3472226084dfce8bc
Reviewed-on: https://go-review.googlesource.com/23172
Reviewed-by: Rick Hudson <rlh@golang.org>
2016-05-17 18:46:03 -04:00
|
|
|
for i := range count {
|
|
|
|
go func(i int) {
|
|
|
|
for {
|
|
|
|
sink[i] = make([]byte, 1024)
|
|
|
|
atomic.AddInt64(&count[i], 1)
|
|
|
|
}
|
|
|
|
}(i)
|
|
|
|
}
|
|
|
|
// Note: If the unfairness is really bad, it may not even get
|
|
|
|
// past the sleep.
|
|
|
|
//
|
|
|
|
// If the scheduling rules change, this may not be enough time
|
|
|
|
// to let all goroutines run, but for now we cycle through
|
|
|
|
// them rapidly.
|
2016-11-02 16:18:22 -04:00
|
|
|
//
|
|
|
|
// OpenBSD's scheduler makes every usleep() take at least
|
|
|
|
// 20ms, so we need a long time to ensure all goroutines have
|
|
|
|
// run. If they haven't run after 30ms, give it another 1000ms
|
|
|
|
// and check again.
|
runtime: fix goroutine priority elevation
Currently it's possible for user code to exploit the high scheduler
priority of the GC worker in conjunction with the runnext optimization
to elevate a user goroutine to high priority so it will always run
even if there are other runnable goroutines.
For example, if a goroutine is in a tight allocation loop, the
following can happen:
1. Goroutine 1 allocates, triggering a GC.
2. G 1 attempts an assist, but fails and blocks.
3. The scheduler runs the GC worker, since it is high priority.
Note that this also starts a new scheduler quantum.
4. The GC worker does enough work to satisfy the assist.
5. The GC worker readies G 1, putting it in runnext.
6. GC finishes and the scheduler runs G 1 from runnext, giving it
the rest of the GC worker's quantum.
7. Go to 1.
Even if there are other goroutines on the run queue, they never get a
chance to run in the above sequence. This requires a confluence of
circumstances that make it unlikely, though not impossible, that it
would happen in "real" code. In the test added by this commit, we
force this confluence by setting GOMAXPROCS to 1 and GOGC to 1 so it's
easy for the test to repeated trigger GC and wake from a blocked
assist.
We fix this by making GC always put user goroutines at the end of the
run queue, instead of in runnext. This makes it so user code can't
piggy-back on the GC's high priority to make a user goroutine act like
it has high priority. The only other situation where GC wakes user
goroutines is waking all blocked assists at the end, but this uses the
global run queue and hence doesn't have this problem.
Fixes #15706.
Change-Id: I1589dee4b7b7d0c9c8575ed3472226084dfce8bc
Reviewed-on: https://go-review.googlesource.com/23172
Reviewed-by: Rick Hudson <rlh@golang.org>
2016-05-17 18:46:03 -04:00
|
|
|
time.Sleep(30 * time.Millisecond)
|
2016-11-02 16:18:22 -04:00
|
|
|
var fail bool
|
runtime: fix goroutine priority elevation
Currently it's possible for user code to exploit the high scheduler
priority of the GC worker in conjunction with the runnext optimization
to elevate a user goroutine to high priority so it will always run
even if there are other runnable goroutines.
For example, if a goroutine is in a tight allocation loop, the
following can happen:
1. Goroutine 1 allocates, triggering a GC.
2. G 1 attempts an assist, but fails and blocks.
3. The scheduler runs the GC worker, since it is high priority.
Note that this also starts a new scheduler quantum.
4. The GC worker does enough work to satisfy the assist.
5. The GC worker readies G 1, putting it in runnext.
6. GC finishes and the scheduler runs G 1 from runnext, giving it
the rest of the GC worker's quantum.
7. Go to 1.
Even if there are other goroutines on the run queue, they never get a
chance to run in the above sequence. This requires a confluence of
circumstances that make it unlikely, though not impossible, that it
would happen in "real" code. In the test added by this commit, we
force this confluence by setting GOMAXPROCS to 1 and GOGC to 1 so it's
easy for the test to repeated trigger GC and wake from a blocked
assist.
We fix this by making GC always put user goroutines at the end of the
run queue, instead of in runnext. This makes it so user code can't
piggy-back on the GC's high priority to make a user goroutine act like
it has high priority. The only other situation where GC wakes user
goroutines is waking all blocked assists at the end, but this uses the
global run queue and hence doesn't have this problem.
Fixes #15706.
Change-Id: I1589dee4b7b7d0c9c8575ed3472226084dfce8bc
Reviewed-on: https://go-review.googlesource.com/23172
Reviewed-by: Rick Hudson <rlh@golang.org>
2016-05-17 18:46:03 -04:00
|
|
|
for i := range count {
|
|
|
|
if atomic.LoadInt64(&count[i]) == 0 {
|
2016-11-02 16:18:22 -04:00
|
|
|
fail = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if fail {
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
for i := range count {
|
|
|
|
if atomic.LoadInt64(&count[i]) == 0 {
|
|
|
|
fmt.Printf("goroutine %d did not run\n", i)
|
|
|
|
return
|
|
|
|
}
|
runtime: fix goroutine priority elevation
Currently it's possible for user code to exploit the high scheduler
priority of the GC worker in conjunction with the runnext optimization
to elevate a user goroutine to high priority so it will always run
even if there are other runnable goroutines.
For example, if a goroutine is in a tight allocation loop, the
following can happen:
1. Goroutine 1 allocates, triggering a GC.
2. G 1 attempts an assist, but fails and blocks.
3. The scheduler runs the GC worker, since it is high priority.
Note that this also starts a new scheduler quantum.
4. The GC worker does enough work to satisfy the assist.
5. The GC worker readies G 1, putting it in runnext.
6. GC finishes and the scheduler runs G 1 from runnext, giving it
the rest of the GC worker's quantum.
7. Go to 1.
Even if there are other goroutines on the run queue, they never get a
chance to run in the above sequence. This requires a confluence of
circumstances that make it unlikely, though not impossible, that it
would happen in "real" code. In the test added by this commit, we
force this confluence by setting GOMAXPROCS to 1 and GOGC to 1 so it's
easy for the test to repeated trigger GC and wake from a blocked
assist.
We fix this by making GC always put user goroutines at the end of the
run queue, instead of in runnext. This makes it so user code can't
piggy-back on the GC's high priority to make a user goroutine act like
it has high priority. The only other situation where GC wakes user
goroutines is waking all blocked assists at the end, but this uses the
global run queue and hence doesn't have this problem.
Fixes #15706.
Change-Id: I1589dee4b7b7d0c9c8575ed3472226084dfce8bc
Reviewed-on: https://go-review.googlesource.com/23172
Reviewed-by: Rick Hudson <rlh@golang.org>
2016-05-17 18:46:03 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
fmt.Println("OK")
|
|
|
|
}
|
2018-10-01 19:58:01 +00:00
|
|
|
|
|
|
|
func GCPhys() {
|
runtime: overhaul TestPhysicalMemoryUtilization
Currently, this test allocates many objects and relies on heap-growth
scavenging to happen unconditionally on heap-growth. However with the
new pacing system for the scavenging, this is no longer true and the
test is flaky.
So, this change overhauls TestPhysicalMemoryUtilization to check the
same aspect of the runtime, but in a much more robust way.
Firstly, it sets up a much more constrained scenario: only 5 objects are
allocated total with a maximum worst-case (i.e. the test fails) memory
footprint of about 16 MiB. The test is now aware that scavenging will
only happen if the heap growth causes us to push way past our scavenge
goal, which is based on the heap goal. So, it makes the holes in the
test much bigger and the actual retained allocations much smaller to
keep the heap goal at the heap's minimum size. It does this twice to
create exactly two unscavenged holes. Because the ratio between the size
of the "saved" objects and the "condemned" object is so small, two holes
are sufficient to create a consistent test.
Then, the test allocates one enormous object (the size of the 4 other
objects allocated, combined) with the intent that heap-growth scavenging
should kick in and scavenge the holes. The heap goal will rise after
this object is allocated, so it's very important we do all the
scavenging in a single allocation that exceeds the heap goal because
otherwise the rising heap goal could foil our test.
Finally, we check memory use relative to HeapAlloc as before. Since the
runtime should scavenge the entirety of the remaining holes,
theoretically there should be no more free and unscavenged memory.
However due to other allocations that may happen during the test we may
still see unscavenged memory, so we need to have some threshold. We keep
the current 10% threshold which, while arbitrary, is very conservative
and should easily account for any other allocations the test makes.
Before, we also had to ensure the allocations we were making looked
large relative to the size of a heap arena since newly-mapped memory was
considered unscavenged, and so that could significantly skew the test.
However, thanks to the fix for #32012 we were able to reduce memory use
to 16 MiB in the worst case.
Fixes #32010.
Change-Id: Ia38130481e292f581da7fa3289c98c99dc5394ed
Reviewed-on: https://go-review.googlesource.com/c/go/+/177237
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2019-05-14 19:59:57 +00:00
|
|
|
// This test ensures that heap-growth scavenging is working as intended.
|
2018-10-01 19:58:01 +00:00
|
|
|
//
|
2021-11-10 20:14:15 +00:00
|
|
|
// It attempts to construct a sizeable "swiss cheese" heap, with many
|
|
|
|
// allocChunk-sized holes. Then, it triggers a heap growth by trying to
|
|
|
|
// allocate as much memory as would fit in those holes.
|
|
|
|
//
|
|
|
|
// The heap growth should cause a large number of those holes to be
|
|
|
|
// returned to the OS.
|
|
|
|
|
2018-10-01 19:58:01 +00:00
|
|
|
const (
|
2021-11-11 17:31:36 +00:00
|
|
|
// The total amount of memory we're willing to allocate.
|
2021-11-10 20:14:15 +00:00
|
|
|
allocTotal = 32 << 20
|
runtime: increase TestPhysicalMemoryUtilization threshold
TestPhysicalMemoryUtilization occasionally fails on some platforms by
only a small margin. The reason for this is that it assumes the
scavenger will always be able to scavenge all the memory that's released
by sweeping, but because of the page cache, there could be free and
unscavenged memory held onto by a P which the scavenger simply cannot
get to.
As a result, if the page cache gets filled completely (512 KiB of free
and unscavenged memory) this could skew a test which expects to
scavenge roughly 8 MiB of memory. More specifically, this is 512 KiB of
memory per P, and if a system is more inclined to bounce around
between Ps (even if there's only one goroutine), this memory can get
"stuck".
Through some experimentation, I found that failures correlated highly
with relatively large amounts of memory ending up in some page cache
(like 60 or 64 pages) on at least one P.
This change changes the test's threshold such that it accounts for the
page cache, and scales up with GOMAXPROCS. Because the test constants
themselves don't change, however, the test must now also bound
GOMAXPROCS such that the threshold doesn't get too high (at which point
the test becomes meaningless).
Fixes #35580.
Change-Id: I6bdb70706de991966a9d28347da830be4a19d3a1
Reviewed-on: https://go-review.googlesource.com/c/go/+/208377
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2019-11-21 22:37:12 +00:00
|
|
|
|
|
|
|
// The page cache could hide 64 8-KiB pages from the scavenger today.
|
|
|
|
maxPageCache = (8 << 10) * 64
|
2018-10-01 19:58:01 +00:00
|
|
|
)
|
2021-11-11 17:31:36 +00:00
|
|
|
|
|
|
|
// How big the allocations are needs to depend on the page size.
|
|
|
|
// If the page size is too big and the allocations are too small,
|
|
|
|
// they might not be aligned to the physical page size, so the scavenger
|
|
|
|
// will gloss over them.
|
|
|
|
pageSize := os.Getpagesize()
|
|
|
|
var allocChunk int
|
|
|
|
if pageSize <= 8<<10 {
|
|
|
|
allocChunk = 64 << 10
|
|
|
|
} else {
|
|
|
|
allocChunk = 512 << 10
|
|
|
|
}
|
|
|
|
allocs := allocTotal / allocChunk
|
|
|
|
|
2021-11-10 20:14:15 +00:00
|
|
|
// Set GC percent just so this test is a little more consistent in the
|
|
|
|
// face of varying environments.
|
runtime: overhaul TestPhysicalMemoryUtilization
Currently, this test allocates many objects and relies on heap-growth
scavenging to happen unconditionally on heap-growth. However with the
new pacing system for the scavenging, this is no longer true and the
test is flaky.
So, this change overhauls TestPhysicalMemoryUtilization to check the
same aspect of the runtime, but in a much more robust way.
Firstly, it sets up a much more constrained scenario: only 5 objects are
allocated total with a maximum worst-case (i.e. the test fails) memory
footprint of about 16 MiB. The test is now aware that scavenging will
only happen if the heap growth causes us to push way past our scavenge
goal, which is based on the heap goal. So, it makes the holes in the
test much bigger and the actual retained allocations much smaller to
keep the heap goal at the heap's minimum size. It does this twice to
create exactly two unscavenged holes. Because the ratio between the size
of the "saved" objects and the "condemned" object is so small, two holes
are sufficient to create a consistent test.
Then, the test allocates one enormous object (the size of the 4 other
objects allocated, combined) with the intent that heap-growth scavenging
should kick in and scavenge the holes. The heap goal will rise after
this object is allocated, so it's very important we do all the
scavenging in a single allocation that exceeds the heap goal because
otherwise the rising heap goal could foil our test.
Finally, we check memory use relative to HeapAlloc as before. Since the
runtime should scavenge the entirety of the remaining holes,
theoretically there should be no more free and unscavenged memory.
However due to other allocations that may happen during the test we may
still see unscavenged memory, so we need to have some threshold. We keep
the current 10% threshold which, while arbitrary, is very conservative
and should easily account for any other allocations the test makes.
Before, we also had to ensure the allocations we were making looked
large relative to the size of a heap arena since newly-mapped memory was
considered unscavenged, and so that could significantly skew the test.
However, thanks to the fix for #32012 we were able to reduce memory use
to 16 MiB in the worst case.
Fixes #32010.
Change-Id: Ia38130481e292f581da7fa3289c98c99dc5394ed
Reviewed-on: https://go-review.googlesource.com/c/go/+/177237
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2019-05-14 19:59:57 +00:00
|
|
|
debug.SetGCPercent(100)
|
2021-11-10 20:14:15 +00:00
|
|
|
|
|
|
|
// Set GOMAXPROCS to 1 to minimize the amount of memory held in the page cache,
|
|
|
|
// and to reduce the chance that the background scavenger gets scheduled.
|
|
|
|
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
|
|
|
|
|
|
|
|
// Allocate allocTotal bytes of memory in allocChunk byte chunks.
|
|
|
|
// Alternate between whether the chunk will be held live or will be
|
|
|
|
// condemned to GC to create holes in the heap.
|
|
|
|
saved := make([][]byte, allocs/2+1)
|
|
|
|
condemned := make([][]byte, allocs/2)
|
|
|
|
for i := 0; i < allocs; i++ {
|
|
|
|
b := make([]byte, allocChunk)
|
2018-10-01 19:58:01 +00:00
|
|
|
if i%2 == 0 {
|
2021-11-10 20:14:15 +00:00
|
|
|
saved = append(saved, b)
|
2018-10-01 19:58:01 +00:00
|
|
|
} else {
|
2021-11-10 20:14:15 +00:00
|
|
|
condemned = append(condemned, b)
|
2018-10-01 19:58:01 +00:00
|
|
|
}
|
|
|
|
}
|
2021-11-10 20:14:15 +00:00
|
|
|
|
|
|
|
// Run a GC cycle just so we're at a consistent state.
|
runtime: overhaul TestPhysicalMemoryUtilization
Currently, this test allocates many objects and relies on heap-growth
scavenging to happen unconditionally on heap-growth. However with the
new pacing system for the scavenging, this is no longer true and the
test is flaky.
So, this change overhauls TestPhysicalMemoryUtilization to check the
same aspect of the runtime, but in a much more robust way.
Firstly, it sets up a much more constrained scenario: only 5 objects are
allocated total with a maximum worst-case (i.e. the test fails) memory
footprint of about 16 MiB. The test is now aware that scavenging will
only happen if the heap growth causes us to push way past our scavenge
goal, which is based on the heap goal. So, it makes the holes in the
test much bigger and the actual retained allocations much smaller to
keep the heap goal at the heap's minimum size. It does this twice to
create exactly two unscavenged holes. Because the ratio between the size
of the "saved" objects and the "condemned" object is so small, two holes
are sufficient to create a consistent test.
Then, the test allocates one enormous object (the size of the 4 other
objects allocated, combined) with the intent that heap-growth scavenging
should kick in and scavenge the holes. The heap goal will rise after
this object is allocated, so it's very important we do all the
scavenging in a single allocation that exceeds the heap goal because
otherwise the rising heap goal could foil our test.
Finally, we check memory use relative to HeapAlloc as before. Since the
runtime should scavenge the entirety of the remaining holes,
theoretically there should be no more free and unscavenged memory.
However due to other allocations that may happen during the test we may
still see unscavenged memory, so we need to have some threshold. We keep
the current 10% threshold which, while arbitrary, is very conservative
and should easily account for any other allocations the test makes.
Before, we also had to ensure the allocations we were making looked
large relative to the size of a heap arena since newly-mapped memory was
considered unscavenged, and so that could significantly skew the test.
However, thanks to the fix for #32012 we were able to reduce memory use
to 16 MiB in the worst case.
Fixes #32010.
Change-Id: Ia38130481e292f581da7fa3289c98c99dc5394ed
Reviewed-on: https://go-review.googlesource.com/c/go/+/177237
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2019-05-14 19:59:57 +00:00
|
|
|
runtime.GC()
|
2021-11-10 20:14:15 +00:00
|
|
|
|
|
|
|
// Drop the only reference to all the condemned memory.
|
|
|
|
condemned = nil
|
|
|
|
|
|
|
|
// Clear the condemned memory.
|
2018-10-01 19:58:01 +00:00
|
|
|
runtime.GC()
|
2021-11-10 20:14:15 +00:00
|
|
|
|
|
|
|
// At this point, the background scavenger is likely running
|
|
|
|
// and could pick up the work, so the next line of code doesn't
|
|
|
|
// end up doing anything. That's fine. What's important is that
|
|
|
|
// this test fails somewhat regularly if the runtime doesn't
|
|
|
|
// scavenge on heap growth, and doesn't fail at all otherwise.
|
|
|
|
|
|
|
|
// Make a large allocation that in theory could fit, but won't
|
|
|
|
// because we turned the heap into swiss cheese.
|
|
|
|
saved = append(saved, make([]byte, allocTotal/2))
|
|
|
|
|
2018-10-01 19:58:01 +00:00
|
|
|
// heapBacked is an estimate of the amount of physical memory used by
|
|
|
|
// this test. HeapSys is an estimate of the size of the mapped virtual
|
|
|
|
// address space (which may or may not be backed by physical pages)
|
|
|
|
// whereas HeapReleased is an estimate of the amount of bytes returned
|
|
|
|
// to the OS. Their difference then roughly corresponds to the amount
|
|
|
|
// of virtual address space that is backed by physical pages.
|
2021-11-10 20:14:15 +00:00
|
|
|
//
|
|
|
|
// heapBacked also subtracts out maxPageCache bytes of memory because
|
|
|
|
// this is memory that may be hidden from the scavenger per-P. Since
|
2021-11-11 17:31:36 +00:00
|
|
|
// GOMAXPROCS=1 here, subtracting it out once is fine.
|
2018-10-01 19:58:01 +00:00
|
|
|
var stats runtime.MemStats
|
|
|
|
runtime.ReadMemStats(&stats)
|
2021-11-10 20:14:15 +00:00
|
|
|
heapBacked := stats.HeapSys - stats.HeapReleased - maxPageCache
|
runtime: overhaul TestPhysicalMemoryUtilization
Currently, this test allocates many objects and relies on heap-growth
scavenging to happen unconditionally on heap-growth. However with the
new pacing system for the scavenging, this is no longer true and the
test is flaky.
So, this change overhauls TestPhysicalMemoryUtilization to check the
same aspect of the runtime, but in a much more robust way.
Firstly, it sets up a much more constrained scenario: only 5 objects are
allocated total with a maximum worst-case (i.e. the test fails) memory
footprint of about 16 MiB. The test is now aware that scavenging will
only happen if the heap growth causes us to push way past our scavenge
goal, which is based on the heap goal. So, it makes the holes in the
test much bigger and the actual retained allocations much smaller to
keep the heap goal at the heap's minimum size. It does this twice to
create exactly two unscavenged holes. Because the ratio between the size
of the "saved" objects and the "condemned" object is so small, two holes
are sufficient to create a consistent test.
Then, the test allocates one enormous object (the size of the 4 other
objects allocated, combined) with the intent that heap-growth scavenging
should kick in and scavenge the holes. The heap goal will rise after
this object is allocated, so it's very important we do all the
scavenging in a single allocation that exceeds the heap goal because
otherwise the rising heap goal could foil our test.
Finally, we check memory use relative to HeapAlloc as before. Since the
runtime should scavenge the entirety of the remaining holes,
theoretically there should be no more free and unscavenged memory.
However due to other allocations that may happen during the test we may
still see unscavenged memory, so we need to have some threshold. We keep
the current 10% threshold which, while arbitrary, is very conservative
and should easily account for any other allocations the test makes.
Before, we also had to ensure the allocations we were making looked
large relative to the size of a heap arena since newly-mapped memory was
considered unscavenged, and so that could significantly skew the test.
However, thanks to the fix for #32012 we were able to reduce memory use
to 16 MiB in the worst case.
Fixes #32010.
Change-Id: Ia38130481e292f581da7fa3289c98c99dc5394ed
Reviewed-on: https://go-review.googlesource.com/c/go/+/177237
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2019-05-14 19:59:57 +00:00
|
|
|
// If heapBacked does not exceed the heap goal by more than retainExtraPercent
|
|
|
|
// then the scavenger is working as expected; the newly-created holes have been
|
|
|
|
// scavenged immediately as part of the allocations which cannot fit in the holes.
|
2018-10-01 19:58:01 +00:00
|
|
|
//
|
runtime: overhaul TestPhysicalMemoryUtilization
Currently, this test allocates many objects and relies on heap-growth
scavenging to happen unconditionally on heap-growth. However with the
new pacing system for the scavenging, this is no longer true and the
test is flaky.
So, this change overhauls TestPhysicalMemoryUtilization to check the
same aspect of the runtime, but in a much more robust way.
Firstly, it sets up a much more constrained scenario: only 5 objects are
allocated total with a maximum worst-case (i.e. the test fails) memory
footprint of about 16 MiB. The test is now aware that scavenging will
only happen if the heap growth causes us to push way past our scavenge
goal, which is based on the heap goal. So, it makes the holes in the
test much bigger and the actual retained allocations much smaller to
keep the heap goal at the heap's minimum size. It does this twice to
create exactly two unscavenged holes. Because the ratio between the size
of the "saved" objects and the "condemned" object is so small, two holes
are sufficient to create a consistent test.
Then, the test allocates one enormous object (the size of the 4 other
objects allocated, combined) with the intent that heap-growth scavenging
should kick in and scavenge the holes. The heap goal will rise after
this object is allocated, so it's very important we do all the
scavenging in a single allocation that exceeds the heap goal because
otherwise the rising heap goal could foil our test.
Finally, we check memory use relative to HeapAlloc as before. Since the
runtime should scavenge the entirety of the remaining holes,
theoretically there should be no more free and unscavenged memory.
However due to other allocations that may happen during the test we may
still see unscavenged memory, so we need to have some threshold. We keep
the current 10% threshold which, while arbitrary, is very conservative
and should easily account for any other allocations the test makes.
Before, we also had to ensure the allocations we were making looked
large relative to the size of a heap arena since newly-mapped memory was
considered unscavenged, and so that could significantly skew the test.
However, thanks to the fix for #32012 we were able to reduce memory use
to 16 MiB in the worst case.
Fixes #32010.
Change-Id: Ia38130481e292f581da7fa3289c98c99dc5394ed
Reviewed-on: https://go-review.googlesource.com/c/go/+/177237
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2019-05-14 19:59:57 +00:00
|
|
|
// Since the runtime should scavenge the entirety of the remaining holes,
|
|
|
|
// theoretically there should be no more free and unscavenged memory. However due
|
|
|
|
// to other allocations that happen during this test we may still see some physical
|
runtime: increase TestPhysicalMemoryUtilization threshold
TestPhysicalMemoryUtilization occasionally fails on some platforms by
only a small margin. The reason for this is that it assumes the
scavenger will always be able to scavenge all the memory that's released
by sweeping, but because of the page cache, there could be free and
unscavenged memory held onto by a P which the scavenger simply cannot
get to.
As a result, if the page cache gets filled completely (512 KiB of free
and unscavenged memory) this could skew a test which expects to
scavenge roughly 8 MiB of memory. More specifically, this is 512 KiB of
memory per P, and if a system is more inclined to bounce around
between Ps (even if there's only one goroutine), this memory can get
"stuck".
Through some experimentation, I found that failures correlated highly
with relatively large amounts of memory ending up in some page cache
(like 60 or 64 pages) on at least one P.
This change changes the test's threshold such that it accounts for the
page cache, and scales up with GOMAXPROCS. Because the test constants
themselves don't change, however, the test must now also bound
GOMAXPROCS such that the threshold doesn't get too high (at which point
the test becomes meaningless).
Fixes #35580.
Change-Id: I6bdb70706de991966a9d28347da830be4a19d3a1
Reviewed-on: https://go-review.googlesource.com/c/go/+/208377
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2019-11-21 22:37:12 +00:00
|
|
|
// memory over-use.
|
2018-10-01 19:58:01 +00:00
|
|
|
overuse := (float64(heapBacked) - float64(stats.HeapAlloc)) / float64(stats.HeapAlloc)
|
2021-11-10 20:14:15 +00:00
|
|
|
// Check against our overuse threshold, which is what the scavenger always reserves
|
|
|
|
// to encourage allocation of memory that doesn't need to be faulted in.
|
2021-11-11 17:31:36 +00:00
|
|
|
//
|
|
|
|
// Add additional slack in case the page size is large and the scavenger
|
|
|
|
// can't reach that memory because it doesn't constitute a complete aligned
|
|
|
|
// physical page. Assume the worst case: a full physical page out of each
|
|
|
|
// allocation.
|
|
|
|
threshold := 0.1 + float64(pageSize)/float64(allocChunk)
|
runtime: increase TestPhysicalMemoryUtilization threshold
TestPhysicalMemoryUtilization occasionally fails on some platforms by
only a small margin. The reason for this is that it assumes the
scavenger will always be able to scavenge all the memory that's released
by sweeping, but because of the page cache, there could be free and
unscavenged memory held onto by a P which the scavenger simply cannot
get to.
As a result, if the page cache gets filled completely (512 KiB of free
and unscavenged memory) this could skew a test which expects to
scavenge roughly 8 MiB of memory. More specifically, this is 512 KiB of
memory per P, and if a system is more inclined to bounce around
between Ps (even if there's only one goroutine), this memory can get
"stuck".
Through some experimentation, I found that failures correlated highly
with relatively large amounts of memory ending up in some page cache
(like 60 or 64 pages) on at least one P.
This change changes the test's threshold such that it accounts for the
page cache, and scales up with GOMAXPROCS. Because the test constants
themselves don't change, however, the test must now also bound
GOMAXPROCS such that the threshold doesn't get too high (at which point
the test becomes meaningless).
Fixes #35580.
Change-Id: I6bdb70706de991966a9d28347da830be4a19d3a1
Reviewed-on: https://go-review.googlesource.com/c/go/+/208377
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2019-11-21 22:37:12 +00:00
|
|
|
if overuse <= threshold {
|
runtime: overhaul TestPhysicalMemoryUtilization
Currently, this test allocates many objects and relies on heap-growth
scavenging to happen unconditionally on heap-growth. However with the
new pacing system for the scavenging, this is no longer true and the
test is flaky.
So, this change overhauls TestPhysicalMemoryUtilization to check the
same aspect of the runtime, but in a much more robust way.
Firstly, it sets up a much more constrained scenario: only 5 objects are
allocated total with a maximum worst-case (i.e. the test fails) memory
footprint of about 16 MiB. The test is now aware that scavenging will
only happen if the heap growth causes us to push way past our scavenge
goal, which is based on the heap goal. So, it makes the holes in the
test much bigger and the actual retained allocations much smaller to
keep the heap goal at the heap's minimum size. It does this twice to
create exactly two unscavenged holes. Because the ratio between the size
of the "saved" objects and the "condemned" object is so small, two holes
are sufficient to create a consistent test.
Then, the test allocates one enormous object (the size of the 4 other
objects allocated, combined) with the intent that heap-growth scavenging
should kick in and scavenge the holes. The heap goal will rise after
this object is allocated, so it's very important we do all the
scavenging in a single allocation that exceeds the heap goal because
otherwise the rising heap goal could foil our test.
Finally, we check memory use relative to HeapAlloc as before. Since the
runtime should scavenge the entirety of the remaining holes,
theoretically there should be no more free and unscavenged memory.
However due to other allocations that may happen during the test we may
still see unscavenged memory, so we need to have some threshold. We keep
the current 10% threshold which, while arbitrary, is very conservative
and should easily account for any other allocations the test makes.
Before, we also had to ensure the allocations we were making looked
large relative to the size of a heap arena since newly-mapped memory was
considered unscavenged, and so that could significantly skew the test.
However, thanks to the fix for #32012 we were able to reduce memory use
to 16 MiB in the worst case.
Fixes #32010.
Change-Id: Ia38130481e292f581da7fa3289c98c99dc5394ed
Reviewed-on: https://go-review.googlesource.com/c/go/+/177237
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2019-05-14 19:59:57 +00:00
|
|
|
fmt.Println("OK")
|
2018-10-01 19:58:01 +00:00
|
|
|
return
|
|
|
|
}
|
runtime: overhaul TestPhysicalMemoryUtilization
Currently, this test allocates many objects and relies on heap-growth
scavenging to happen unconditionally on heap-growth. However with the
new pacing system for the scavenging, this is no longer true and the
test is flaky.
So, this change overhauls TestPhysicalMemoryUtilization to check the
same aspect of the runtime, but in a much more robust way.
Firstly, it sets up a much more constrained scenario: only 5 objects are
allocated total with a maximum worst-case (i.e. the test fails) memory
footprint of about 16 MiB. The test is now aware that scavenging will
only happen if the heap growth causes us to push way past our scavenge
goal, which is based on the heap goal. So, it makes the holes in the
test much bigger and the actual retained allocations much smaller to
keep the heap goal at the heap's minimum size. It does this twice to
create exactly two unscavenged holes. Because the ratio between the size
of the "saved" objects and the "condemned" object is so small, two holes
are sufficient to create a consistent test.
Then, the test allocates one enormous object (the size of the 4 other
objects allocated, combined) with the intent that heap-growth scavenging
should kick in and scavenge the holes. The heap goal will rise after
this object is allocated, so it's very important we do all the
scavenging in a single allocation that exceeds the heap goal because
otherwise the rising heap goal could foil our test.
Finally, we check memory use relative to HeapAlloc as before. Since the
runtime should scavenge the entirety of the remaining holes,
theoretically there should be no more free and unscavenged memory.
However due to other allocations that may happen during the test we may
still see unscavenged memory, so we need to have some threshold. We keep
the current 10% threshold which, while arbitrary, is very conservative
and should easily account for any other allocations the test makes.
Before, we also had to ensure the allocations we were making looked
large relative to the size of a heap arena since newly-mapped memory was
considered unscavenged, and so that could significantly skew the test.
However, thanks to the fix for #32012 we were able to reduce memory use
to 16 MiB in the worst case.
Fixes #32010.
Change-Id: Ia38130481e292f581da7fa3289c98c99dc5394ed
Reviewed-on: https://go-review.googlesource.com/c/go/+/177237
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2019-05-14 19:59:57 +00:00
|
|
|
// Physical memory utilization exceeds the threshold, so heap-growth scavenging
|
|
|
|
// did not operate as expected.
|
|
|
|
//
|
|
|
|
// In the context of this test, this indicates a large amount of
|
|
|
|
// fragmentation with physical pages that are otherwise unused but not
|
|
|
|
// returned to the OS.
|
runtime: increase TestPhysicalMemoryUtilization threshold
TestPhysicalMemoryUtilization occasionally fails on some platforms by
only a small margin. The reason for this is that it assumes the
scavenger will always be able to scavenge all the memory that's released
by sweeping, but because of the page cache, there could be free and
unscavenged memory held onto by a P which the scavenger simply cannot
get to.
As a result, if the page cache gets filled completely (512 KiB of free
and unscavenged memory) this could skew a test which expects to
scavenge roughly 8 MiB of memory. More specifically, this is 512 KiB of
memory per P, and if a system is more inclined to bounce around
between Ps (even if there's only one goroutine), this memory can get
"stuck".
Through some experimentation, I found that failures correlated highly
with relatively large amounts of memory ending up in some page cache
(like 60 or 64 pages) on at least one P.
This change changes the test's threshold such that it accounts for the
page cache, and scales up with GOMAXPROCS. Because the test constants
themselves don't change, however, the test must now also bound
GOMAXPROCS such that the threshold doesn't get too high (at which point
the test becomes meaningless).
Fixes #35580.
Change-Id: I6bdb70706de991966a9d28347da830be4a19d3a1
Reviewed-on: https://go-review.googlesource.com/c/go/+/208377
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2019-11-21 22:37:12 +00:00
|
|
|
fmt.Printf("exceeded physical memory overuse threshold of %3.2f%%: %3.2f%%\n"+
|
|
|
|
"(alloc: %d, goal: %d, sys: %d, rel: %d, objs: %d)\n", threshold*100, overuse*100,
|
runtime: overhaul TestPhysicalMemoryUtilization
Currently, this test allocates many objects and relies on heap-growth
scavenging to happen unconditionally on heap-growth. However with the
new pacing system for the scavenging, this is no longer true and the
test is flaky.
So, this change overhauls TestPhysicalMemoryUtilization to check the
same aspect of the runtime, but in a much more robust way.
Firstly, it sets up a much more constrained scenario: only 5 objects are
allocated total with a maximum worst-case (i.e. the test fails) memory
footprint of about 16 MiB. The test is now aware that scavenging will
only happen if the heap growth causes us to push way past our scavenge
goal, which is based on the heap goal. So, it makes the holes in the
test much bigger and the actual retained allocations much smaller to
keep the heap goal at the heap's minimum size. It does this twice to
create exactly two unscavenged holes. Because the ratio between the size
of the "saved" objects and the "condemned" object is so small, two holes
are sufficient to create a consistent test.
Then, the test allocates one enormous object (the size of the 4 other
objects allocated, combined) with the intent that heap-growth scavenging
should kick in and scavenge the holes. The heap goal will rise after
this object is allocated, so it's very important we do all the
scavenging in a single allocation that exceeds the heap goal because
otherwise the rising heap goal could foil our test.
Finally, we check memory use relative to HeapAlloc as before. Since the
runtime should scavenge the entirety of the remaining holes,
theoretically there should be no more free and unscavenged memory.
However due to other allocations that may happen during the test we may
still see unscavenged memory, so we need to have some threshold. We keep
the current 10% threshold which, while arbitrary, is very conservative
and should easily account for any other allocations the test makes.
Before, we also had to ensure the allocations we were making looked
large relative to the size of a heap arena since newly-mapped memory was
considered unscavenged, and so that could significantly skew the test.
However, thanks to the fix for #32012 we were able to reduce memory use
to 16 MiB in the worst case.
Fixes #32010.
Change-Id: Ia38130481e292f581da7fa3289c98c99dc5394ed
Reviewed-on: https://go-review.googlesource.com/c/go/+/177237
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2019-05-14 19:59:57 +00:00
|
|
|
stats.HeapAlloc, stats.NextGC, stats.HeapSys, stats.HeapReleased, len(saved))
|
2018-10-01 19:58:01 +00:00
|
|
|
runtime.KeepAlive(saved)
|
2021-11-10 20:14:15 +00:00
|
|
|
runtime.KeepAlive(condemned)
|
2018-10-01 19:58:01 +00:00
|
|
|
}
|
2019-02-27 12:34:20 -05:00
|
|
|
|
|
|
|
// Test that defer closure is correctly scanned when the stack is scanned.
|
|
|
|
func DeferLiveness() {
|
|
|
|
var x [10]int
|
|
|
|
escape(&x)
|
|
|
|
fn := func() {
|
|
|
|
if x[0] != 42 {
|
|
|
|
panic("FAIL")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
defer fn()
|
|
|
|
|
|
|
|
x[0] = 42
|
|
|
|
runtime.GC()
|
|
|
|
runtime.GC()
|
|
|
|
runtime.GC()
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:noinline
|
2021-12-01 12:15:45 -05:00
|
|
|
func escape(x any) { sink2 = x; sink2 = nil }
|
2019-02-27 12:34:20 -05:00
|
|
|
|
2021-12-01 12:15:45 -05:00
|
|
|
var sink2 any
|
2020-05-14 16:55:39 -04:00
|
|
|
|
|
|
|
// Test zombie object detection and reporting.
|
|
|
|
func GCZombie() {
|
|
|
|
// Allocate several objects of unusual size (so free slots are
|
|
|
|
// unlikely to all be re-allocated by the runtime).
|
|
|
|
const size = 190
|
|
|
|
const count = 8192 / size
|
|
|
|
keep := make([]*byte, 0, (count+1)/2)
|
|
|
|
free := make([]uintptr, 0, (count+1)/2)
|
|
|
|
zombies := make([]*byte, 0, len(free))
|
|
|
|
for i := 0; i < count; i++ {
|
|
|
|
obj := make([]byte, size)
|
|
|
|
p := &obj[0]
|
|
|
|
if i%2 == 0 {
|
|
|
|
keep = append(keep, p)
|
|
|
|
} else {
|
|
|
|
free = append(free, uintptr(unsafe.Pointer(p)))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Free the unreferenced objects.
|
|
|
|
runtime.GC()
|
|
|
|
|
|
|
|
// Bring the free objects back to life.
|
|
|
|
for _, p := range free {
|
|
|
|
zombies = append(zombies, (*byte)(unsafe.Pointer(p)))
|
|
|
|
}
|
|
|
|
|
|
|
|
// GC should detect the zombie objects.
|
|
|
|
runtime.GC()
|
|
|
|
println("failed")
|
|
|
|
runtime.KeepAlive(keep)
|
|
|
|
runtime.KeepAlive(zombies)
|
|
|
|
}
|
2022-03-30 22:18:43 +00:00
|
|
|
|
|
|
|
func GCMemoryLimit() {
|
|
|
|
gcMemoryLimit(100)
|
|
|
|
}
|
|
|
|
|
|
|
|
func GCMemoryLimitNoGCPercent() {
|
|
|
|
gcMemoryLimit(-1)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test SetMemoryLimit functionality.
|
|
|
|
//
|
|
|
|
// This test lives here instead of runtime/debug because the entire
|
|
|
|
// implementation is in the runtime, and testprog gives us a more
|
|
|
|
// consistent testing environment to help avoid flakiness.
|
|
|
|
func gcMemoryLimit(gcPercent int) {
|
|
|
|
if oldProcs := runtime.GOMAXPROCS(4); oldProcs < 4 {
|
|
|
|
// Fail if the default GOMAXPROCS isn't at least 4.
|
|
|
|
// Whatever invokes this should check and do a proper t.Skip.
|
|
|
|
println("insufficient CPUs")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
debug.SetGCPercent(gcPercent)
|
|
|
|
|
|
|
|
const myLimit = 256 << 20
|
|
|
|
if limit := debug.SetMemoryLimit(-1); limit != math.MaxInt64 {
|
|
|
|
print("expected MaxInt64 limit, got ", limit, " bytes instead\n")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if limit := debug.SetMemoryLimit(myLimit); limit != math.MaxInt64 {
|
|
|
|
print("expected MaxInt64 limit, got ", limit, " bytes instead\n")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if limit := debug.SetMemoryLimit(-1); limit != myLimit {
|
|
|
|
print("expected a ", myLimit, "-byte limit, got ", limit, " bytes instead\n")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
target := make(chan int64)
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
|
|
|
|
sinkSize := int(<-target / memLimitUnit)
|
|
|
|
for {
|
|
|
|
if len(memLimitSink) != sinkSize {
|
|
|
|
memLimitSink = make([]*[memLimitUnit]byte, sinkSize)
|
|
|
|
}
|
|
|
|
for i := 0; i < len(memLimitSink); i++ {
|
|
|
|
memLimitSink[i] = new([memLimitUnit]byte)
|
|
|
|
// Write to this memory to slow down the allocator, otherwise
|
|
|
|
// we get flaky behavior. See #52433.
|
|
|
|
for j := range memLimitSink[i] {
|
|
|
|
memLimitSink[i][j] = 9
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Again, Gosched to slow down the allocator.
|
|
|
|
runtime.Gosched()
|
|
|
|
select {
|
|
|
|
case newTarget := <-target:
|
|
|
|
if newTarget == math.MaxInt64 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
sinkSize = int(newTarget / memLimitUnit)
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
var m [2]metrics.Sample
|
|
|
|
m[0].Name = "/memory/classes/total:bytes"
|
|
|
|
m[1].Name = "/memory/classes/heap/released:bytes"
|
|
|
|
|
|
|
|
// Don't set this too high, because this is a *live heap* target which
|
|
|
|
// is not directly comparable to a total memory limit.
|
|
|
|
maxTarget := int64((myLimit / 10) * 8)
|
|
|
|
increment := int64((myLimit / 10) * 1)
|
|
|
|
for i := increment; i < maxTarget; i += increment {
|
|
|
|
target <- i
|
|
|
|
|
|
|
|
// Check to make sure the memory limit is maintained.
|
|
|
|
// We're just sampling here so if it transiently goes over we might miss it.
|
|
|
|
// The internal accounting is inconsistent anyway, so going over by a few
|
|
|
|
// pages is certainly possible. Just make sure we're within some bound.
|
|
|
|
// Note that to avoid flakiness due to #52433 (especially since we're allocating
|
|
|
|
// somewhat heavily here) this bound is kept loose. In practice the Go runtime
|
|
|
|
// should do considerably better than this bound.
|
|
|
|
bound := int64(myLimit + 16<<20)
|
2025-07-21 13:37:27 -07:00
|
|
|
if runtime.GOOS == "darwin" {
|
|
|
|
bound += 16 << 20 // Be more lax on Darwin, see issue 73136.
|
|
|
|
}
|
2022-03-30 22:18:43 +00:00
|
|
|
start := time.Now()
|
2022-09-30 10:29:30 +08:00
|
|
|
for time.Since(start) < 200*time.Millisecond {
|
2022-03-30 22:18:43 +00:00
|
|
|
metrics.Read(m[:])
|
|
|
|
retained := int64(m[0].Value.Uint64() - m[1].Value.Uint64())
|
|
|
|
if retained > bound {
|
|
|
|
print("retained=", retained, " limit=", myLimit, " bound=", bound, "\n")
|
|
|
|
panic("exceeded memory limit by more than bound allows")
|
|
|
|
}
|
|
|
|
runtime.Gosched()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if limit := debug.SetMemoryLimit(math.MaxInt64); limit != myLimit {
|
|
|
|
print("expected a ", myLimit, "-byte limit, got ", limit, " bytes instead\n")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
println("OK")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pick a value close to the page size. We want to m
|
|
|
|
const memLimitUnit = 8000
|
|
|
|
|
|
|
|
var memLimitSink []*[memLimitUnit]byte
|