runtime: break up large calls to memclrNoHeapPointers to allow preemption

If something "huge" is allocated, and the zeroing is trivial (no pointers
involved) then zero it by chunks in a loop so that preemption can occur,
not all in a single non-preemptible call.

Benchmarking suggests that 256K is the best chunk size.

Updates #42642.

Change-Id: I94015e467eaa098c59870e479d6d83bc88efbfb4
Reviewed-on: https://go-review.googlesource.com/c/go/+/270943
Trust: David Chase <drchase@google.com>
Run-TryBot: David Chase <drchase@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
This commit is contained in:
David Chase 2020-11-17 19:54:31 -05:00
parent 41afd3af42
commit 0bbfc5c31e
4 changed files with 51 additions and 7 deletions

View file

@ -206,7 +206,10 @@ func (c *mcache) refill(spc spanClass) {
}
// allocLarge allocates a span for a large object.
func (c *mcache) allocLarge(size uintptr, needzero bool, noscan bool) *mspan {
// The boolean result indicates whether the span is known-zeroed.
// If it did not need to be zeroed, it may not have been zeroed;
// but if it came directly from the OS, it is already zeroed.
func (c *mcache) allocLarge(size uintptr, needzero bool, noscan bool) (*mspan, bool) {
if size+_PageSize < size {
throw("out of memory")
}
@ -221,7 +224,7 @@ func (c *mcache) allocLarge(size uintptr, needzero bool, noscan bool) *mspan {
deductSweepCredit(npages*_PageSize, npages)
spc := makeSpanClass(0, noscan)
s := mheap_.alloc(npages, spc, needzero)
s, isZeroed := mheap_.alloc(npages, spc, needzero)
if s == nil {
throw("out of memory")
}
@ -245,7 +248,7 @@ func (c *mcache) allocLarge(size uintptr, needzero bool, noscan bool) *mspan {
mheap_.central[spc].mcentral.fullSwept(mheap_.sweepgen).push(s)
s.limit = s.base() + size
heapBitsForAddr(s.base()).initSpan(s)
return s
return s, isZeroed
}
func (c *mcache) releaseAll() {