mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime: lower mutex contention test expectations
As of https://go.dev/cl/586796, the runtime/metrics view of internal mutex contention is sampled at 1 per gTrackingPeriod, rather than either 1 (immediately prior to CL 586796) or the more frequent of gTrackingPeriod or the mutex profiling rate (Go 1.22). Thus, we no longer have a real lower bound on the amount of contention that runtime/metrics will report. Relax the test's expectations again. For #64253 Change-Id: I94e1d92348a03599a819ec8ac785a0eb3c1ddd73 Reviewed-on: https://go-review.googlesource.com/c/go/+/587515 LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Carlos Amedee <carlos@golang.org> Reviewed-by: Michael Knyszek <mknyszek@google.com> Auto-Submit: Rhys Hiltner <rhys.hiltner@gmail.com>
This commit is contained in:
parent
b6fa505c4a
commit
ac6dea7aa1
1 changed files with 21 additions and 8 deletions
|
|
@ -1036,6 +1036,13 @@ func TestRuntimeLockMetricsAndProfile(t *testing.T) {
|
|||
if metricGrowth == 0 && strictTiming {
|
||||
// If the critical section is very short, systems with low timer
|
||||
// resolution may be unable to measure it via nanotime.
|
||||
//
|
||||
// This is sampled at 1 per gTrackingPeriod, but the explicit
|
||||
// runtime.mutex tests create 200 contention events. Observing
|
||||
// zero of those has a probability of (7/8)^200 = 2.5e-12 which
|
||||
// is acceptably low (though the calculation has a tenuous
|
||||
// dependency on cheaprandn being a good-enough source of
|
||||
// entropy).
|
||||
t.Errorf("no increase in /sync/mutex/wait/total:seconds metric")
|
||||
}
|
||||
// This comparison is possible because the time measurements in support of
|
||||
|
|
@ -1111,7 +1118,7 @@ func TestRuntimeLockMetricsAndProfile(t *testing.T) {
|
|||
name := t.Name()
|
||||
|
||||
t.Run("runtime.lock", func(t *testing.T) {
|
||||
mus := make([]runtime.Mutex, 100)
|
||||
mus := make([]runtime.Mutex, 200)
|
||||
var needContention atomic.Int64
|
||||
baseDelay := 100 * time.Microsecond // large relative to system noise, for comparison between clocks
|
||||
fastDelayMicros := baseDelay.Microseconds()
|
||||
|
|
@ -1207,13 +1214,19 @@ func TestRuntimeLockMetricsAndProfile(t *testing.T) {
|
|||
needContention.Store(int64(len(mus) - 1))
|
||||
metricGrowth, profileGrowth, n, _ := testcase(true, stks, workers, fn)(t)
|
||||
|
||||
if have, want := metricGrowth, baseDelay.Seconds()*float64(len(mus)); have < want {
|
||||
// The test imposes a delay with usleep, verified with calls to
|
||||
// nanotime. Compare against the runtime/metrics package's view
|
||||
// (based on nanotime) rather than runtime/pprof's view (based
|
||||
// on cputicks).
|
||||
t.Errorf("runtime/metrics reported less than the known minimum contention duration (%fs < %fs)", have, want)
|
||||
}
|
||||
t.Run("metric", func(t *testing.T) {
|
||||
// The runtime/metrics view is sampled at 1 per gTrackingPeriod,
|
||||
// so we don't have a hard lower bound here.
|
||||
testenv.SkipFlaky(t, 64253)
|
||||
|
||||
if have, want := metricGrowth, baseDelay.Seconds()*float64(len(mus)); have < want {
|
||||
// The test imposes a delay with usleep, verified with calls to
|
||||
// nanotime. Compare against the runtime/metrics package's view
|
||||
// (based on nanotime) rather than runtime/pprof's view (based
|
||||
// on cputicks).
|
||||
t.Errorf("runtime/metrics reported less than the known minimum contention duration (%fs < %fs)", have, want)
|
||||
}
|
||||
})
|
||||
if have, want := n, int64(len(mus)); have != want {
|
||||
t.Errorf("mutex profile reported contention count different from the known true count (%d != %d)", have, want)
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue