mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime,runtime/metrics: use explicit histogram boundaries
This change modifies the semantics of runtime/metrics.Float64Histogram.Buckets to remove implicit buckets to that extend to positive and negative infinity and instead defines all bucket boundaries as explicitly listed. Bucket boundaries remain the same as before except /gc/heap/allocs-by-size:objects and /gc/heap/frees-by-size:objects no longer have a bucket that extends to negative infinity. This change simplifies the Float64Histogram API, making it both easier to understand and easier to use. Also, add a test for allocs-by-size and frees-by-size that checks them against MemStats. Fixes #43443. Change-Id: I5620f15bd084562dadf288f733c4a8cace21910c Reviewed-on: https://go-review.googlesource.com/c/go/+/281238 Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com> Reviewed-by: Michael Pratt <mpratt@google.com> Trust: Michael Knyszek <mknyszek@google.com>
This commit is contained in:
parent
a9ccd2d795
commit
ae97717133
4 changed files with 99 additions and 30 deletions
|
|
@ -41,8 +41,13 @@ func initMetrics() {
|
|||
if metricsInit {
|
||||
return
|
||||
}
|
||||
sizeClassBuckets = make([]float64, _NumSizeClasses)
|
||||
for i := range sizeClassBuckets {
|
||||
|
||||
sizeClassBuckets = make([]float64, _NumSizeClasses, _NumSizeClasses+1)
|
||||
// Skip size class 0 which is a stand-in for large objects, but large
|
||||
// objects are tracked separately (and they actually get placed in
|
||||
// the last bucket, not the first).
|
||||
sizeClassBuckets[0] = 1 // The smallest allocation is 1 byte in size.
|
||||
for i := 1; i < _NumSizeClasses; i++ {
|
||||
// Size classes have an inclusive upper-bound
|
||||
// and exclusive lower bound (e.g. 48-byte size class is
|
||||
// (32, 48]) whereas we want and inclusive lower-bound
|
||||
|
|
@ -56,6 +61,8 @@ func initMetrics() {
|
|||
// boundaries.
|
||||
sizeClassBuckets[i] = float64(class_to_size[i] + 1)
|
||||
}
|
||||
sizeClassBuckets = append(sizeClassBuckets, float64Inf())
|
||||
|
||||
timeHistBuckets = timeHistogramMetricsBuckets()
|
||||
metrics = map[string]metricData{
|
||||
"/gc/cycles/automatic:gc-cycles": {
|
||||
|
|
@ -84,8 +91,10 @@ func initMetrics() {
|
|||
compute: func(in *statAggregate, out *metricValue) {
|
||||
hist := out.float64HistOrInit(sizeClassBuckets)
|
||||
hist.counts[len(hist.counts)-1] = uint64(in.heapStats.largeAllocCount)
|
||||
for i := range hist.buckets {
|
||||
hist.counts[i] = uint64(in.heapStats.smallAllocCount[i])
|
||||
// Cut off the first index which is ostensibly for size class 0,
|
||||
// but large objects are tracked separately so it's actually unused.
|
||||
for i, count := range in.heapStats.smallAllocCount[1:] {
|
||||
hist.counts[i] = uint64(count)
|
||||
}
|
||||
},
|
||||
},
|
||||
|
|
@ -94,8 +103,10 @@ func initMetrics() {
|
|||
compute: func(in *statAggregate, out *metricValue) {
|
||||
hist := out.float64HistOrInit(sizeClassBuckets)
|
||||
hist.counts[len(hist.counts)-1] = uint64(in.heapStats.largeFreeCount)
|
||||
for i := range hist.buckets {
|
||||
hist.counts[i] = uint64(in.heapStats.smallFreeCount[i])
|
||||
// Cut off the first index which is ostensibly for size class 0,
|
||||
// but large objects are tracked separately so it's actually unused.
|
||||
for i, count := range in.heapStats.smallFreeCount[1:] {
|
||||
hist.counts[i] = uint64(count)
|
||||
}
|
||||
},
|
||||
},
|
||||
|
|
@ -116,8 +127,11 @@ func initMetrics() {
|
|||
"/gc/pauses:seconds": {
|
||||
compute: func(_ *statAggregate, out *metricValue) {
|
||||
hist := out.float64HistOrInit(timeHistBuckets)
|
||||
// The bottom-most bucket, containing negative values, is tracked
|
||||
// as a separately as underflow, so fill that in manually and then
|
||||
// iterate over the rest.
|
||||
hist.counts[0] = atomic.Load64(&memstats.gcPauseDist.underflow)
|
||||
for i := range hist.buckets {
|
||||
for i := range memstats.gcPauseDist.counts {
|
||||
hist.counts[i+1] = atomic.Load64(&memstats.gcPauseDist.counts[i])
|
||||
}
|
||||
},
|
||||
|
|
@ -437,8 +451,8 @@ func (v *metricValue) float64HistOrInit(buckets []float64) *metricFloat64Histogr
|
|||
v.pointer = unsafe.Pointer(hist)
|
||||
}
|
||||
hist.buckets = buckets
|
||||
if len(hist.counts) != len(hist.buckets)+1 {
|
||||
hist.counts = make([]uint64, len(buckets)+1)
|
||||
if len(hist.counts) != len(hist.buckets)-1 {
|
||||
hist.counts = make([]uint64, len(buckets)-1)
|
||||
}
|
||||
return hist
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue