mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime: change the span allocation policy to first-fit
This change modifies the treap implementation to be address-ordered instead of size-ordered, and further augments it so it may be used for allocation. It then modifies the find method to implement a first-fit allocation policy. This change to the treap implementation consequently makes it so that spans are scavenged in highest-address-first order without any additional changes to the scavenging code. Because the treap itself is now address ordered, and the scavenging code iterates over it in reverse, the highest address is now chosen instead of the largest span. This change also renames the now wrongly-named "scavengeLargest" method on mheap to just "scavengeLocked" and also fixes up logic in that method which made assumptions about size. For #30333. Change-Id: I94b6f3209211cc1bfdc8cdaea04152a232cfbbb4 Reviewed-on: https://go-review.googlesource.com/c/go/+/164101 Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
parent
e30aa166ea
commit
40036a99a0
5 changed files with 196 additions and 131 deletions
|
|
@ -1121,10 +1121,10 @@ func (h *mheap) pickFreeSpan(npage uintptr) *mspan {
|
|||
// Note that we want the _smaller_ free span, i.e. the free span
|
||||
// closer in size to the amount we requested (npage).
|
||||
var s *mspan
|
||||
if tf.valid() && (!ts.valid() || tf.span().npages <= ts.span().npages) {
|
||||
if tf.valid() && (!ts.valid() || tf.span().base() <= ts.span().base()) {
|
||||
s = tf.span()
|
||||
h.free.erase(tf)
|
||||
} else if ts.valid() && (!tf.valid() || tf.span().npages > ts.span().npages) {
|
||||
} else if ts.valid() && (!tf.valid() || tf.span().base() > ts.span().base()) {
|
||||
s = ts.span()
|
||||
h.scav.erase(ts)
|
||||
}
|
||||
|
|
@ -1198,10 +1198,10 @@ HaveSpan:
|
|||
// grew the RSS. Mitigate this by scavenging enough free
|
||||
// space to make up for it.
|
||||
//
|
||||
// Also, scavengeLargest may cause coalescing, so prevent
|
||||
// Also, scavenge may cause coalescing, so prevent
|
||||
// coalescing with s by temporarily changing its state.
|
||||
s.state = mSpanManual
|
||||
h.scavengeLargest(s.npages * pageSize)
|
||||
h.scavengeLocked(s.npages * pageSize)
|
||||
s.state = mSpanFree
|
||||
}
|
||||
s.unusedsince = 0
|
||||
|
|
@ -1236,7 +1236,7 @@ func (h *mheap) grow(npage uintptr) bool {
|
|||
// is proportional to the number of sysUnused() calls rather than
|
||||
// the number of pages released, so we make fewer of those calls
|
||||
// with larger spans.
|
||||
h.scavengeLargest(size)
|
||||
h.scavengeLocked(size)
|
||||
|
||||
// Create a fake "in use" span and free it, so that the
|
||||
// right coalescing happens.
|
||||
|
|
@ -1344,10 +1344,10 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
|
|||
h.treapForSpan(s).insert(s)
|
||||
}
|
||||
|
||||
// scavengeLargest scavenges nbytes worth of spans in unscav
|
||||
// starting from the largest span and working down. It then takes those spans
|
||||
// and places them in scav. h must be locked.
|
||||
func (h *mheap) scavengeLargest(nbytes uintptr) {
|
||||
// scavengeLocked scavenges nbytes worth of spans in the free treap by
|
||||
// starting from the span with the highest base address and working down.
|
||||
// It then takes those spans and places them in scav. h must be locked.
|
||||
func (h *mheap) scavengeLocked(nbytes uintptr) {
|
||||
// Use up scavenge credit if there's any available.
|
||||
if nbytes > h.scavengeCredit {
|
||||
nbytes -= h.scavengeCredit
|
||||
|
|
@ -1356,23 +1356,16 @@ func (h *mheap) scavengeLargest(nbytes uintptr) {
|
|||
h.scavengeCredit -= nbytes
|
||||
return
|
||||
}
|
||||
// Iterate over the treap backwards (from largest to smallest) scavenging spans
|
||||
// until we've reached our quota of nbytes.
|
||||
// Iterate over the treap backwards (from highest address to lowest address)
|
||||
// scavenging spans until we've reached our quota of nbytes.
|
||||
released := uintptr(0)
|
||||
for t := h.free.end(); released < nbytes && t.valid(); {
|
||||
s := t.span()
|
||||
r := s.scavenge()
|
||||
if r == 0 {
|
||||
// Since we're going in order of largest-to-smallest span, this
|
||||
// means all other spans are no bigger than s. There's a high
|
||||
// chance that the other spans don't even cover a full page,
|
||||
// (though they could) but iterating further just for a handful
|
||||
// of pages probably isn't worth it, so just stop here.
|
||||
//
|
||||
// This check also preserves the invariant that spans that have
|
||||
// `scavenged` set are only ever in the `scav` treap, and
|
||||
// those which have it unset are only in the `free` treap.
|
||||
break
|
||||
// This span doesn't cover at least one physical page, so skip it.
|
||||
t = t.prev()
|
||||
continue
|
||||
}
|
||||
n := t.prev()
|
||||
h.free.erase(t)
|
||||
|
|
@ -1393,7 +1386,7 @@ func (h *mheap) scavengeLargest(nbytes uintptr) {
|
|||
// scavengeAll visits each node in the unscav treap and scavenges the
|
||||
// treapNode's span. It then removes the scavenged span from
|
||||
// unscav and adds it into scav before continuing. h must be locked.
|
||||
func (h *mheap) scavengeAll(now, limit uint64) uintptr {
|
||||
func (h *mheap) scavengeAllLocked(now, limit uint64) uintptr {
|
||||
// Iterate over the treap scavenging spans if unused for at least limit time.
|
||||
released := uintptr(0)
|
||||
for t := h.free.start(); t.valid(); {
|
||||
|
|
@ -1416,14 +1409,14 @@ func (h *mheap) scavengeAll(now, limit uint64) uintptr {
|
|||
return released
|
||||
}
|
||||
|
||||
func (h *mheap) scavenge(k int32, now, limit uint64) {
|
||||
func (h *mheap) scavengeAll(k int32, now, limit uint64) {
|
||||
// Disallow malloc or panic while holding the heap lock. We do
|
||||
// this here because this is an non-mallocgc entry-point to
|
||||
// the mheap API.
|
||||
gp := getg()
|
||||
gp.m.mallocing++
|
||||
lock(&h.lock)
|
||||
released := h.scavengeAll(now, limit)
|
||||
released := h.scavengeAllLocked(now, limit)
|
||||
unlock(&h.lock)
|
||||
gp.m.mallocing--
|
||||
|
||||
|
|
@ -1438,7 +1431,7 @@ func (h *mheap) scavenge(k int32, now, limit uint64) {
|
|||
//go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory
|
||||
func runtime_debug_freeOSMemory() {
|
||||
GC()
|
||||
systemstack(func() { mheap_.scavenge(-1, ^uint64(0), 0) })
|
||||
systemstack(func() { mheap_.scavengeAll(-1, ^uint64(0), 0) })
|
||||
}
|
||||
|
||||
// Initialize a new span with the given start and npages.
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue