diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index ddbc872080d..8f6db8eec52 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -945,6 +945,14 @@ func (h *mheap) grow(npage uintptr) bool { return false } + // Scavenge some pages out of the free treap to make up for + // the virtual memory space we just allocated. We prefer to + // scavenge the largest spans first since the cost of scavenging + // is proportional to the number of sysUnused() calls rather than + // the number of pages released, so we make fewer of those calls + // with larger spans. + h.scavengeLargest(size) + // Create a fake "in use" span and free it, so that the // right coalescing happens. s := (*mspan)(h.spanalloc.alloc()) @@ -1107,6 +1115,46 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i } } +// scavengeLargest scavenges nbytes worth of spans in unscav +// starting from the largest span and working down. It then takes those spans +// and places them in scav. h must be locked. +func (h *mheap) scavengeLargest(nbytes uintptr) { + // Find the largest child. + t := h.free.treap + if t == nil { + return + } + for t.right != nil { + t = t.right + } + // Iterate over the treap from the largest child to the smallest by + // starting from the largest and finding its predecessor until we've + // recovered nbytes worth of physical memory, or it no longer has a + // predecessor (meaning the treap is now empty). + released := uintptr(0) + for t != nil && released < nbytes { + s := t.spanKey + r := s.scavenge() + if r == 0 { + // Since we're going in order of largest-to-smallest span, this + // means all other spans are no bigger than s. There's a high + // chance that the other spans don't even cover a full page, + // (though they could) but iterating further just for a handful + // of pages probably isn't worth it, so just stop here. + // + // This check also preserves the invariant that spans that have + // `scavenged` set are only ever in the `scav` treap, and + // those which have it unset are only in the `free` treap. + return + } + prev := t.pred() + h.free.removeNode(t) + t = prev + h.scav.insert(s) + released += r + } +} + // scavengeAll visits each node in the unscav treap and scavenges the // treapNode's span. It then removes the scavenged span from // unscav and adds it into scav before continuing. h must be locked.