runtime: introduce treapForSpan to reduce code duplication

Currently which treap a span should be inserted into/removed from is
checked by looking at the span's properties. This logic is repeated in
four places. As this logic gets more complex, it makes sense to
de-duplicate this, so introduce treapForSpan instead which captures this
logic by returning the appropriate treap for the span.

For #30333.

Change-Id: I4bd933d93dc50c5fc7c7c7f56ceb95194dcbfbcc
Reviewed-on: https://go-review.googlesource.com/c/go/+/170857
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
Michael Anthony Knyszek 2019-03-29 16:02:05 +00:00 committed by Michael Knyszek
parent d13a9312f5
commit 7b33b6274f

View file

@ -464,11 +464,7 @@ func (h *mheap) coalesce(s *mspan) {
// The size is potentially changing so the treap needs to delete adjacent nodes and // The size is potentially changing so the treap needs to delete adjacent nodes and
// insert back as a combined node. // insert back as a combined node.
if other.scavenged { h.treapForSpan(other).removeSpan(other)
h.scav.removeSpan(other)
} else {
h.free.removeSpan(other)
}
other.state = mSpanDead other.state = mSpanDead
h.spanalloc.free(unsafe.Pointer(other)) h.spanalloc.free(unsafe.Pointer(other))
} }
@ -486,11 +482,8 @@ func (h *mheap) coalesce(s *mspan) {
return return
} }
// Since we're resizing other, we must remove it from the treap. // Since we're resizing other, we must remove it from the treap.
if other.scavenged { h.treapForSpan(other).removeSpan(other)
h.scav.removeSpan(other)
} else {
h.free.removeSpan(other)
}
// Round boundary to the nearest physical page size, toward the // Round boundary to the nearest physical page size, toward the
// scavenged span. // scavenged span.
boundary := b.startAddr boundary := b.startAddr
@ -507,11 +500,7 @@ func (h *mheap) coalesce(s *mspan) {
h.setSpan(boundary, b) h.setSpan(boundary, b)
// Re-insert other now that it has a new size. // Re-insert other now that it has a new size.
if other.scavenged { h.treapForSpan(other).insert(other)
h.scav.insert(other)
} else {
h.free.insert(other)
}
} }
// Coalesce with earlier, later spans. // Coalesce with earlier, later spans.
@ -1112,6 +1101,15 @@ func (h *mheap) setSpans(base, npage uintptr, s *mspan) {
} }
} }
// treapForSpan returns the appropriate treap for a span for
// insertion and removal.
func (h *mheap) treapForSpan(span *mspan) *mTreap {
if span.scavenged {
return &h.scav
}
return &h.free
}
// pickFreeSpan acquires a free span from internal free list // pickFreeSpan acquires a free span from internal free list
// structures if one is available. Otherwise returns nil. // structures if one is available. Otherwise returns nil.
// h must be locked. // h must be locked.
@ -1343,11 +1341,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
h.coalesce(s) h.coalesce(s)
// Insert s into the appropriate treap. // Insert s into the appropriate treap.
if s.scavenged { h.treapForSpan(s).insert(s)
h.scav.insert(s)
} else {
h.free.insert(s)
}
} }
// scavengeLargest scavenges nbytes worth of spans in unscav // scavengeLargest scavenges nbytes worth of spans in unscav