mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime: retype mheap.pagesInUse as atomic.Uint64
[git-generate]
cd src/runtime
mv export_test.go export.go
GOROOT=$(dirname $(dirname $PWD)) rf '
add mheap.pagesInUse \
// Proportional sweep \
// \
// These parameters represent a linear function from gcController.heapLive \
// to page sweep count. The proportional sweep system works to \
// stay in the black by keeping the current page sweep count \
// above this line at the current gcController.heapLive. \
// \
// The line has slope sweepPagesPerByte and passes through a \
// basis point at (sweepHeapLiveBasis, pagesSweptBasis). At \
// any given time, the system is at (gcController.heapLive, \
// pagesSwept) in this space. \
// \
// It is important that the line pass through a point we \
// control rather than simply starting at a 0,0 origin \
// because that lets us adjust sweep pacing at any time while \
// accounting for current progress. If we could only adjust \
// the slope, it would create a discontinuity in debt if any \
// progress has already been made. \
pagesInUse_ atomic.Uint64 // pages of spans in stats mSpanInUse
ex {
import "runtime/internal/atomic"
var t mheap
var v, w uint64
var d int64
t.pagesInUse -> t.pagesInUse_.Load()
t.pagesInUse = v -> t.pagesInUse_.Store(v)
atomic.Load64(&t.pagesInUse) -> t.pagesInUse_.Load()
atomic.LoadAcq64(&t.pagesInUse) -> t.pagesInUse_.LoadAcquire()
atomic.Store64(&t.pagesInUse, v) -> t.pagesInUse_.Store(v)
atomic.StoreRel64(&t.pagesInUse, v) -> t.pagesInUse_.StoreRelease(v)
atomic.Cas64(&t.pagesInUse, v, w) -> t.pagesInUse_.CompareAndSwap(v, w)
atomic.Xchg64(&t.pagesInUse, v) -> t.pagesInUse_.Swap(v)
atomic.Xadd64(&t.pagesInUse, d) -> t.pagesInUse_.Add(d)
}
rm mheap.pagesInUse
mv mheap.pagesInUse_ mheap.pagesInUse
'
mv export.go export_test.go
Change-Id: I495d188683dba0778518563c46755b5ad43be298
Reviewed-on: https://go-review.googlesource.com/c/go/+/356549
Trust: Michael Knyszek <mknyszek@google.com>
Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
parent
75b73d68b3
commit
d419a80bc7
3 changed files with 11 additions and 11 deletions
|
|
@ -96,17 +96,17 @@ type mheap struct {
|
|||
// any given time, the system is at (gcController.heapLive,
|
||||
// pagesSwept) in this space.
|
||||
//
|
||||
// It's important that the line pass through a point we
|
||||
// control rather than simply starting at a (0,0) origin
|
||||
// It is important that the line pass through a point we
|
||||
// control rather than simply starting at a 0,0 origin
|
||||
// because that lets us adjust sweep pacing at any time while
|
||||
// accounting for current progress. If we could only adjust
|
||||
// the slope, it would create a discontinuity in debt if any
|
||||
// progress has already been made.
|
||||
pagesInUse uint64 // pages of spans in stats mSpanInUse; updated atomically
|
||||
pagesSwept uint64 // pages swept this cycle; updated atomically
|
||||
pagesSweptBasis uint64 // pagesSwept to use as the origin of the sweep ratio; updated atomically
|
||||
sweepHeapLiveBasis uint64 // value of gcController.heapLive to use as the origin of sweep ratio; written with lock, read without
|
||||
sweepPagesPerByte float64 // proportional sweep ratio; written with lock, read without
|
||||
pagesInUse atomic.Uint64 // pages of spans in stats mSpanInUse
|
||||
pagesSwept uint64 // pages swept this cycle; updated atomically
|
||||
pagesSweptBasis uint64 // pagesSwept to use as the origin of the sweep ratio; updated atomically
|
||||
sweepHeapLiveBasis uint64 // value of gcController.heapLive to use as the origin of sweep ratio; written with lock, read without
|
||||
sweepPagesPerByte float64 // proportional sweep ratio; written with lock, read without
|
||||
// TODO(austin): pagesInUse should be a uintptr, but the 386
|
||||
// compiler can't 8-byte align fields.
|
||||
|
||||
|
|
@ -1311,7 +1311,7 @@ HaveSpan:
|
|||
atomic.Or8(&arena.pageInUse[pageIdx], pageMask)
|
||||
|
||||
// Update related page sweeper stats.
|
||||
atomic.Xadd64(&h.pagesInUse, int64(npages))
|
||||
h.pagesInUse.Add(int64(npages))
|
||||
}
|
||||
|
||||
// Make sure the newly allocated span will be observed
|
||||
|
|
@ -1468,7 +1468,7 @@ func (h *mheap) freeSpanLocked(s *mspan, typ spanAllocType) {
|
|||
print("mheap.freeSpanLocked - span ", s, " ptr ", hex(s.base()), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n")
|
||||
throw("mheap.freeSpanLocked - invalid free")
|
||||
}
|
||||
atomic.Xadd64(&h.pagesInUse, -int64(s.npages))
|
||||
h.pagesInUse.Add(-int64(s.npages))
|
||||
|
||||
// Clear in-use bit in arena page bitmap.
|
||||
arena, pageIdx, pageMask := pageIndexOf(s.base())
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue