go/src/runtime/mem_bsd.go

88 lines
2.2 KiB
Go
Raw Normal View History

// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build dragonfly || freebsd || netbsd || openbsd || solaris
package runtime
import (
"unsafe"
)
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//
//go:nosplit
func sysAllocOS(n uintptr) unsafe.Pointer {
v, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
if err != 0 {
return nil
}
return v
}
func sysUnusedOS(v unsafe.Pointer, n uintptr) {
if debug.madvdontneed != 0 {
madvise(v, n, _MADV_DONTNEED)
} else {
madvise(v, n, _MADV_FREE)
}
}
func sysUsedOS(v unsafe.Pointer, n uintptr) {
}
func sysHugePageOS(v unsafe.Pointer, n uintptr) {
}
func sysNoHugePageOS(v unsafe.Pointer, n uintptr) {
}
runtime: avoid MADV_HUGEPAGE for heap memory Currently the runtime marks all new memory as MADV_HUGEPAGE on Linux and manages its hugepage eligibility status. Unfortunately, the default THP behavior on most Linux distros is that MADV_HUGEPAGE blocks while the kernel eagerly reclaims and compacts memory to allocate a hugepage. This direct reclaim and compaction is unbounded, and may result in significant application thread stalls. In really bad cases, this can exceed 100s of ms or even seconds. Really all we want is to undo MADV_NOHUGEPAGE marks and let the default Linux paging behavior take over, but the only way to unmark a region as MADV_NOHUGEPAGE is to also mark it MADV_HUGEPAGE. The overall strategy of trying to keep hugepages for the heap unbroken however is sound. So instead let's use the new shiny MADV_COLLAPSE if it exists. MADV_COLLAPSE makes a best-effort synchronous attempt at collapsing the physical memory backing a memory region into a hugepage. We'll use MADV_COLLAPSE where we would've used MADV_HUGEPAGE, and stop using MADV_NOHUGEPAGE altogether. Because MADV_COLLAPSE is synchronous, it's also important to not re-collapse huge pages if the huge pages are likely part of some large allocation. Although in many cases it's advantageous to back these allocations with hugepages because they're contiguous, eagerly collapsing every hugepage means having to page in at least part of the large allocation. However, because we won't use MADV_NOHUGEPAGE anymore, we'll no longer handle the fact that khugepaged might come in and back some memory we returned to the OS with a hugepage. I've come to the conclusion that this is basically unavoidable without a new madvise flag and that it's just not a good default. If this change lands, advice about Linux huge page settings will be added to the GC guide. Verified that this change doesn't regress Sweet, at least not on my machine with: /sys/kernel/mm/transparent_hugepage/enabled [always or madvise] /sys/kernel/mm/transparent_hugepage/defrag [madvise] /sys/kernel/mm/transparent_hugepage/khugepaged/max_ptes_none [0 or 511] Unfortunately, this workaround means that we only get forced hugepages on Linux 6.1+. Fixes #61718. Change-Id: I7f4a7ba397847de29f800a99f9cb66cb2720a533 Reviewed-on: https://go-review.googlesource.com/c/go/+/516795 Reviewed-by: Austin Clements <austin@google.com> TryBot-Result: Gopher Robot <gobot@golang.org> Run-TryBot: Michael Knyszek <mknyszek@google.com> Auto-Submit: Michael Knyszek <mknyszek@google.com>
2023-08-07 19:09:59 +00:00
func sysHugePageCollapseOS(v unsafe.Pointer, n uintptr) {
}
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//
//go:nosplit
func sysFreeOS(v unsafe.Pointer, n uintptr) {
munmap(v, n)
}
func sysFaultOS(v unsafe.Pointer, n uintptr) {
mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE|_MAP_FIXED, -1, 0)
}
// Indicates not to reserve swap space for the mapping.
const _sunosMAP_NORESERVE = 0x40
func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer {
flags := int32(_MAP_ANON | _MAP_PRIVATE)
if GOOS == "solaris" || GOOS == "illumos" {
// Be explicit that we don't want to reserve swap space
// for PROT_NONE anonymous mappings. This avoids an issue
// wherein large mappings can cause fork to fail.
flags |= _sunosMAP_NORESERVE
}
p, err := mmap(v, n, _PROT_NONE, flags, -1, 0)
if err != 0 {
return nil
}
return p
}
const _sunosEAGAIN = 11
const _ENOMEM = 12
func sysMapOS(v unsafe.Pointer, n uintptr) {
p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
if err == _ENOMEM || ((GOOS == "solaris" || GOOS == "illumos") && err == _sunosEAGAIN) {
throw("runtime: out of memory")
}
if p != v || err != 0 {
print("runtime: mmap(", v, ", ", n, ") returned ", p, ", ", err, "\n")
throw("runtime: cannot map pages in arena address space")
}
}