mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime: prepare arenas for use incrementally
This change moves the call of sysMap from (*mheap).sysAlloc into (*mheap).grow, so we only sysMap what we're going to use in the near future (thanks to the curArena mechanism). The purpose of this change is to better support systems with strict overcommit rules which generally accept reserved memory but not prepared memory (see malloc.go for exact descriptions of these states). This move requires changing linearAlloc to only optionally map memory. In one case, with mheap.heapArenaAlloc, we do want it to map memory. But now in the other case, with mheap.arena, we don't, because we want grow to take care of it. The risk with this change is we may make more syscalls than before on systems with 64 MiB arenas, but because heap growth is relatively rare this is unlikely to be a noticable issue. We also bound the amount of syscalls made by only extending curArena (and thus mapping) by pallocChunkPages*pageSize which is 4 MiB. Fixes #42612. Change-Id: I736df696afe78ddb1a747a896caa0db8726027e5 Reviewed-on: https://go-review.googlesource.com/c/go/+/270537 Trust: Michael Knyszek <mknyszek@google.com> Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Michael Pratt <mpratt@google.com>
This commit is contained in:
parent
a9cfd55e2b
commit
bd6aeca968
3 changed files with 49 additions and 27 deletions
|
|
@ -568,7 +568,7 @@ func mallocinit() {
|
|||
const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{})
|
||||
meta := uintptr(sysReserve(nil, arenaMetaSize))
|
||||
if meta != 0 {
|
||||
mheap_.heapArenaAlloc.init(meta, arenaMetaSize)
|
||||
mheap_.heapArenaAlloc.init(meta, arenaMetaSize, true)
|
||||
}
|
||||
|
||||
// We want to start the arena low, but if we're linked
|
||||
|
|
@ -605,7 +605,7 @@ func mallocinit() {
|
|||
for _, arenaSize := range arenaSizes {
|
||||
a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes)
|
||||
if a != nil {
|
||||
mheap_.arena.init(uintptr(a), size)
|
||||
mheap_.arena.init(uintptr(a), size, false)
|
||||
p = mheap_.arena.end // For hint below
|
||||
break
|
||||
}
|
||||
|
|
@ -622,8 +622,8 @@ func mallocinit() {
|
|||
// heapArenaBytes. sysAlloc returns nil on failure.
|
||||
// There is no corresponding free function.
|
||||
//
|
||||
// sysAlloc returns a memory region in the Prepared state. This region must
|
||||
// be transitioned to Ready before use.
|
||||
// sysAlloc returns a memory region in the Reserved state. This region must
|
||||
// be transitioned to Prepared and then Ready before use.
|
||||
//
|
||||
// h must be locked.
|
||||
func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) {
|
||||
|
|
@ -725,9 +725,6 @@ func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) {
|
|||
throw("misrounded allocation in sysAlloc")
|
||||
}
|
||||
|
||||
// Transition from Reserved to Prepared.
|
||||
sysMap(v, size, &memstats.heap_sys)
|
||||
|
||||
mapped:
|
||||
// Create arena metadata.
|
||||
for ri := arenaIndex(uintptr(v)); ri <= arenaIndex(uintptr(v)+size-1); ri++ {
|
||||
|
|
@ -1400,15 +1397,19 @@ func inPersistentAlloc(p uintptr) bool {
|
|||
}
|
||||
|
||||
// linearAlloc is a simple linear allocator that pre-reserves a region
|
||||
// of memory and then maps that region into the Ready state as needed. The
|
||||
// caller is responsible for locking.
|
||||
// of memory and then optionally maps that region into the Ready state
|
||||
// as needed.
|
||||
//
|
||||
// The caller is responsible for locking.
|
||||
type linearAlloc struct {
|
||||
next uintptr // next free byte
|
||||
mapped uintptr // one byte past end of mapped space
|
||||
end uintptr // end of reserved space
|
||||
|
||||
mapMemory bool // transition memory from Reserved to Ready if true
|
||||
}
|
||||
|
||||
func (l *linearAlloc) init(base, size uintptr) {
|
||||
func (l *linearAlloc) init(base, size uintptr, mapMemory bool) {
|
||||
if base+size < base {
|
||||
// Chop off the last byte. The runtime isn't prepared
|
||||
// to deal with situations where the bounds could overflow.
|
||||
|
|
@ -1418,6 +1419,7 @@ func (l *linearAlloc) init(base, size uintptr) {
|
|||
}
|
||||
l.next, l.mapped = base, base
|
||||
l.end = base + size
|
||||
l.mapMemory = mapMemory
|
||||
}
|
||||
|
||||
func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
|
||||
|
|
@ -1427,9 +1429,11 @@ func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Poi
|
|||
}
|
||||
l.next = p + size
|
||||
if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped {
|
||||
// Transition from Reserved to Prepared to Ready.
|
||||
sysMap(unsafe.Pointer(l.mapped), pEnd-l.mapped, sysStat)
|
||||
sysUsed(unsafe.Pointer(l.mapped), pEnd-l.mapped)
|
||||
if l.mapMemory {
|
||||
// Transition from Reserved to Prepared to Ready.
|
||||
sysMap(unsafe.Pointer(l.mapped), pEnd-l.mapped, sysStat)
|
||||
sysUsed(unsafe.Pointer(l.mapped), pEnd-l.mapped)
|
||||
}
|
||||
l.mapped = pEnd
|
||||
}
|
||||
return unsafe.Pointer(p)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue