mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime: decorate anonymous memory mappings
Leverage the prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, ...) API to name the anonymous memory areas. This API has been introduced in Linux 5.17 to decorate the anonymous memory areas shown in /proc/<pid>/maps. This is already used by glibc. See: * https://sourceware.org/git/?p=glibc.git;a=blob;f=malloc/malloc.c;h=27dfd1eb907f4615b70c70237c42c552bb4f26a8;hb=HEAD#l2434 * https://sourceware.org/git/?p=glibc.git;a=blob;f=sysdeps/unix/sysv/linux/setvmaname.c;h=ea93a5ffbebc9e5a7e32a297138f465724b4725f;hb=HEAD#l63 This can be useful when investigating the memory consumption of a multi-language program. On a 100% Go program, pprof profiler can be used to profile the memory consumption of the program. But pprof is only aware of what happens within the Go world. On a multi-language program, there could be a doubt about whether the suspicious extra-memory consumption comes from the Go part or the native part. With this change, the following Go program: package main import ( "fmt" "log" "os" ) /* #include <stdlib.h> void f(void) { (void)malloc(1024*1024*1024); } */ import "C" func main() { C.f() data, err := os.ReadFile("/proc/self/maps") if err != nil { log.Fatal(err) } fmt.Println(string(data)) } produces this output: $ GLIBC_TUNABLES=glibc.mem.decorate_maps=1 ~/doc/devel/open-source/go/bin/go run . 00400000-00402000 r--p 00000000 00:21 28451768 /home/lenaic/.cache/go-build/9f/9f25a17baed5a80d03eb080a2ce2a5ff49c17f9a56e28330f0474a2bb74a30a0-d/test_vma_name 00402000-004a4000 r-xp 00002000 00:21 28451768 /home/lenaic/.cache/go-build/9f/9f25a17baed5a80d03eb080a2ce2a5ff49c17f9a56e28330f0474a2bb74a30a0-d/test_vma_name 004a4000-00574000 r--p 000a4000 00:21 28451768 /home/lenaic/.cache/go-build/9f/9f25a17baed5a80d03eb080a2ce2a5ff49c17f9a56e28330f0474a2bb74a30a0-d/test_vma_name 00574000-00575000 r--p 00173000 00:21 28451768 /home/lenaic/.cache/go-build/9f/9f25a17baed5a80d03eb080a2ce2a5ff49c17f9a56e28330f0474a2bb74a30a0-d/test_vma_name 00575000-00580000 rw-p 00174000 00:21 28451768 /home/lenaic/.cache/go-build/9f/9f25a17baed5a80d03eb080a2ce2a5ff49c17f9a56e28330f0474a2bb74a30a0-d/test_vma_name 00580000-005a4000 rw-p 00000000 00:00 0 2e075000-2e096000 rw-p 00000000 00:00 0 [heap] c000000000-c000400000 rw-p 00000000 00:00 0 [anon: Go: heap] c000400000-c004000000 ---p 00000000 00:00 0 [anon: Go: heap reservation] 777f40000000-777f40021000 rw-p 00000000 00:00 0 [anon: glibc: malloc arena] 777f40021000-777f44000000 ---p 00000000 00:00 0 777f44000000-777f44021000 rw-p 00000000 00:00 0 [anon: glibc: malloc arena] 777f44021000-777f48000000 ---p 00000000 00:00 0 777f48000000-777f48021000 rw-p 00000000 00:00 0 [anon: glibc: malloc arena] 777f48021000-777f4c000000 ---p 00000000 00:00 0 777f4c000000-777f4c021000 rw-p 00000000 00:00 0 [anon: glibc: malloc arena] 777f4c021000-777f50000000 ---p 00000000 00:00 0 777f50000000-777f50021000 rw-p 00000000 00:00 0 [anon: glibc: malloc arena] 777f50021000-777f54000000 ---p 00000000 00:00 0 777f55afb000-777f55afc000 ---p 00000000 00:00 0 777f55afc000-777f562fc000 rw-p 00000000 00:00 0 [anon: glibc: pthread stack: 216378] 777f562fc000-777f562fd000 ---p 00000000 00:00 0 777f562fd000-777f56afd000 rw-p 00000000 00:00 0 [anon: glibc: pthread stack: 216377] 777f56afd000-777f56afe000 ---p 00000000 00:00 0 777f56afe000-777f572fe000 rw-p 00000000 00:00 0 [anon: glibc: pthread stack: 216376] 777f572fe000-777f572ff000 ---p 00000000 00:00 0 777f572ff000-777f57aff000 rw-p 00000000 00:00 0 [anon: glibc: pthread stack: 216375] 777f57aff000-777f57b00000 ---p 00000000 00:00 0 777f57b00000-777f58300000 rw-p 00000000 00:00 0 [anon: glibc: pthread stack: 216374] 777f58300000-777f58400000 rw-p 00000000 00:00 0 [anon: Go: page alloc index] 777f58400000-777f5a400000 rw-p 00000000 00:00 0 [anon: Go: heap index] 777f5a400000-777f6a580000 ---p 00000000 00:00 0 [anon: Go: scavenge index] 777f6a580000-777f6a581000 rw-p 00000000 00:00 0 [anon: Go: scavenge index] 777f6a581000-777f7a400000 ---p 00000000 00:00 0 [anon: Go: scavenge index] 777f7a400000-777f8a580000 ---p 00000000 00:00 0 [anon: Go: page summary] 777f8a580000-777f8a581000 rw-p 00000000 00:00 0 [anon: Go: page alloc] 777f8a581000-777f9c430000 ---p 00000000 00:00 0 [anon: Go: page summary] 777f9c430000-777f9c431000 rw-p 00000000 00:00 0 [anon: Go: page alloc] 777f9c431000-777f9e806000 ---p 00000000 00:00 0 [anon: Go: page summary] 777f9e806000-777f9e807000 rw-p 00000000 00:00 0 [anon: Go: page alloc] 777f9e807000-777f9ec00000 ---p 00000000 00:00 0 [anon: Go: page summary] 777f9ec36000-777f9ecb6000 rw-p 00000000 00:00 0 [anon: Go: immortal metadata] 777f9ecb6000-777f9ecc6000 rw-p 00000000 00:00 0 [anon: Go: gc bits] 777f9ecc6000-777f9ecd6000 rw-p 00000000 00:00 0 [anon: Go: allspans array] 777f9ecd6000-777f9ece7000 rw-p 00000000 00:00 0 [anon: Go: immortal metadata] 777f9ece7000-777f9ed67000 ---p 00000000 00:00 0 [anon: Go: page summary] 777f9ed67000-777f9ed68000 rw-p 00000000 00:00 0 [anon: Go: page alloc] 777f9ed68000-777f9ede7000 ---p 00000000 00:00 0 [anon: Go: page summary] 777f9ede7000-777f9ee07000 rw-p 00000000 00:00 0 [anon: Go: page alloc] 777f9ee07000-777f9ee0a000 rw-p 00000000 00:00 0 [anon: glibc: loader malloc] 777f9ee0a000-777f9ee2e000 r--p 00000000 00:21 48158213 /usr/lib/libc.so.6 777f9ee2e000-777f9ef9f000 r-xp 00024000 00:21 48158213 /usr/lib/libc.so.6 777f9ef9f000-777f9efee000 r--p 00195000 00:21 48158213 /usr/lib/libc.so.6 777f9efee000-777f9eff2000 r--p 001e3000 00:21 48158213 /usr/lib/libc.so.6 777f9eff2000-777f9eff4000 rw-p 001e7000 00:21 48158213 /usr/lib/libc.so.6 777f9eff4000-777f9effc000 rw-p 00000000 00:00 0 777f9effc000-777f9effe000 rw-p 00000000 00:00 0 [anon: glibc: loader malloc] 777f9f00a000-777f9f04a000 rw-p 00000000 00:00 0 [anon: Go: immortal metadata] 777f9f04a000-777f9f04c000 r--p 00000000 00:00 0 [vvar] 777f9f04c000-777f9f04e000 r--p 00000000 00:00 0 [vvar_vclock] 777f9f04e000-777f9f050000 r-xp 00000000 00:00 0 [vdso] 777f9f050000-777f9f051000 r--p 00000000 00:21 48158204 /usr/lib/ld-linux-x86-64.so.2 777f9f051000-777f9f07a000 r-xp 00001000 00:21 48158204 /usr/lib/ld-linux-x86-64.so.2 777f9f07a000-777f9f085000 r--p 0002a000 00:21 48158204 /usr/lib/ld-linux-x86-64.so.2 777f9f085000-777f9f087000 r--p 00034000 00:21 48158204 /usr/lib/ld-linux-x86-64.so.2 777f9f087000-777f9f088000 rw-p 00036000 00:21 48158204 /usr/lib/ld-linux-x86-64.so.2 777f9f088000-777f9f089000 rw-p 00000000 00:00 0 7ffc7bfa7000-7ffc7bfc8000 rw-p 00000000 00:00 0 [stack] ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 [vsyscall] The anonymous memory areas are now labelled so that we can see which ones have been allocated by the Go runtime versus which ones have been allocated by the glibc. Fixes #71546 Change-Id: I304e8b4dd7f2477a6da794fd44e9a7a5354e4bf4 Reviewed-on: https://go-review.googlesource.com/c/go/+/646095 Auto-Submit: Alan Donovan <adonovan@google.com> Commit-Queue: Alan Donovan <adonovan@google.com> Reviewed-by: Felix Geisendörfer <felix.geisendoerfer@datadoghq.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Michael Knyszek <mknyszek@google.com> Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
This commit is contained in:
parent
f95941de49
commit
52eaed6633
36 changed files with 130 additions and 62 deletions
|
|
@ -16,4 +16,7 @@ const (
|
|||
EPOLL_CTL_DEL = 0x2
|
||||
EPOLL_CTL_MOD = 0x3
|
||||
EFD_CLOEXEC = 0x80000
|
||||
|
||||
PR_SET_VMA = 0x53564d41
|
||||
PR_SET_VMA_ANON_NAME = 0
|
||||
)
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ package syscall
|
|||
const (
|
||||
SYS_FCNTL = 55
|
||||
SYS_MPROTECT = 125
|
||||
SYS_PRCTL = 172
|
||||
SYS_EPOLL_CTL = 255
|
||||
SYS_EPOLL_PWAIT = 319
|
||||
SYS_EPOLL_CREATE1 = 329
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ package syscall
|
|||
const (
|
||||
SYS_MPROTECT = 10
|
||||
SYS_FCNTL = 72
|
||||
SYS_PRCTL = 157
|
||||
SYS_EPOLL_CTL = 233
|
||||
SYS_EPOLL_PWAIT = 281
|
||||
SYS_EPOLL_CREATE1 = 291
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ package syscall
|
|||
const (
|
||||
SYS_FCNTL = 55
|
||||
SYS_MPROTECT = 125
|
||||
SYS_PRCTL = 172
|
||||
SYS_EPOLL_CTL = 251
|
||||
SYS_EPOLL_PWAIT = 346
|
||||
SYS_EPOLL_CREATE1 = 357
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ const (
|
|||
SYS_EPOLL_CTL = 21
|
||||
SYS_EPOLL_PWAIT = 22
|
||||
SYS_FCNTL = 25
|
||||
SYS_PRCTL = 167
|
||||
SYS_MPROTECT = 226
|
||||
SYS_EPOLL_PWAIT2 = 441
|
||||
SYS_EVENTFD2 = 19
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ const (
|
|||
SYS_EPOLL_CTL = 21
|
||||
SYS_EPOLL_PWAIT = 22
|
||||
SYS_FCNTL = 25
|
||||
SYS_PRCTL = 167
|
||||
SYS_MPROTECT = 226
|
||||
SYS_EPOLL_PWAIT2 = 441
|
||||
SYS_EVENTFD2 = 19
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ package syscall
|
|||
const (
|
||||
SYS_MPROTECT = 5010
|
||||
SYS_FCNTL = 5070
|
||||
SYS_PRCTL = 5153
|
||||
SYS_EPOLL_CTL = 5208
|
||||
SYS_EPOLL_PWAIT = 5272
|
||||
SYS_EPOLL_CREATE1 = 5285
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ package syscall
|
|||
const (
|
||||
SYS_FCNTL = 4055
|
||||
SYS_MPROTECT = 4125
|
||||
SYS_PRCTL = 4192
|
||||
SYS_EPOLL_CTL = 4249
|
||||
SYS_EPOLL_PWAIT = 4313
|
||||
SYS_EPOLL_CREATE1 = 4326
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ package syscall
|
|||
const (
|
||||
SYS_FCNTL = 55
|
||||
SYS_MPROTECT = 125
|
||||
SYS_PRCTL = 171
|
||||
SYS_EPOLL_CTL = 237
|
||||
SYS_EPOLL_PWAIT = 303
|
||||
SYS_EPOLL_CREATE1 = 315
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ const (
|
|||
SYS_EPOLL_CTL = 21
|
||||
SYS_EPOLL_PWAIT = 22
|
||||
SYS_FCNTL = 25
|
||||
SYS_PRCTL = 167
|
||||
SYS_MPROTECT = 226
|
||||
SYS_EPOLL_PWAIT2 = 441
|
||||
SYS_EVENTFD2 = 19
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ package syscall
|
|||
const (
|
||||
SYS_FCNTL = 55
|
||||
SYS_MPROTECT = 125
|
||||
SYS_PRCTL = 172
|
||||
SYS_EPOLL_CTL = 250
|
||||
SYS_EPOLL_PWAIT = 312
|
||||
SYS_EPOLL_CREATE1 = 327
|
||||
|
|
|
|||
|
|
@ -1041,7 +1041,7 @@ func (h *mheap) allocUserArenaChunk() *mspan {
|
|||
//
|
||||
// Unlike (*mheap).grow, just map in everything that we
|
||||
// asked for. We're likely going to use it all.
|
||||
sysMap(unsafe.Pointer(base), userArenaChunkBytes, &gcController.heapReleased)
|
||||
sysMap(unsafe.Pointer(base), userArenaChunkBytes, &gcController.heapReleased, "user arena chunk")
|
||||
sysUsed(unsafe.Pointer(base), userArenaChunkBytes, userArenaChunkBytes)
|
||||
|
||||
// Model the user arena as a heap span for a large object.
|
||||
|
|
|
|||
|
|
@ -95,7 +95,7 @@ func dlogImpl() *dloggerImpl {
|
|||
if l == nil {
|
||||
// Use sysAllocOS instead of sysAlloc because we want to interfere
|
||||
// with the runtime as little as possible, and sysAlloc updates accounting.
|
||||
l = (*dloggerImpl)(sysAllocOS(unsafe.Sizeof(dloggerImpl{})))
|
||||
l = (*dloggerImpl)(sysAllocOS(unsafe.Sizeof(dloggerImpl{}), "debug log"))
|
||||
if l == nil {
|
||||
throw("failed to allocate debug log")
|
||||
}
|
||||
|
|
@ -774,7 +774,7 @@ func printDebugLogImpl() {
|
|||
}
|
||||
// Use sysAllocOS instead of sysAlloc because we want to interfere
|
||||
// with the runtime as little as possible, and sysAlloc updates accounting.
|
||||
state1 := sysAllocOS(unsafe.Sizeof(readState{}) * uintptr(n))
|
||||
state1 := sysAllocOS(unsafe.Sizeof(readState{})*uintptr(n), "debug log")
|
||||
if state1 == nil {
|
||||
println("failed to allocate read state for", n, "logs")
|
||||
printunlock()
|
||||
|
|
|
|||
|
|
@ -536,7 +536,7 @@ func MapNextArenaHint() (start, end uintptr, ok bool) {
|
|||
} else {
|
||||
start, end = addr, addr+heapArenaBytes
|
||||
}
|
||||
got := sysReserve(unsafe.Pointer(addr), physPageSize)
|
||||
got := sysReserve(unsafe.Pointer(addr), physPageSize, "")
|
||||
ok = (addr == uintptr(got))
|
||||
if !ok {
|
||||
// We were unable to get the requested reservation.
|
||||
|
|
|
|||
|
|
@ -727,7 +727,7 @@ func makeheapobjbv(p uintptr, size uintptr) bitvector {
|
|||
sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
|
||||
}
|
||||
n := nptr/8 + 1
|
||||
p := sysAlloc(n, &memstats.other_sys)
|
||||
p := sysAlloc(n, &memstats.other_sys, "heapdump")
|
||||
if p == nil {
|
||||
throw("heapdump: out of memory")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -570,7 +570,7 @@ func mallocinit() {
|
|||
// heap reservation.
|
||||
|
||||
const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{})
|
||||
meta := uintptr(sysReserve(nil, arenaMetaSize))
|
||||
meta := uintptr(sysReserve(nil, arenaMetaSize, "heap reservation"))
|
||||
if meta != 0 {
|
||||
mheap_.heapArenaAlloc.init(meta, arenaMetaSize, true)
|
||||
}
|
||||
|
|
@ -607,7 +607,7 @@ func mallocinit() {
|
|||
128 << 20,
|
||||
}
|
||||
for _, arenaSize := range arenaSizes {
|
||||
a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes)
|
||||
a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes, "heap reservation")
|
||||
if a != nil {
|
||||
mheap_.arena.init(uintptr(a), size, false)
|
||||
p = mheap_.arena.end // For hint below
|
||||
|
|
@ -657,7 +657,7 @@ func (h *mheap) sysAlloc(n uintptr, hintList **arenaHint, arenaList *[]arenaIdx)
|
|||
//
|
||||
// Only do this if we're using the regular heap arena hints.
|
||||
// This behavior is only for the heap.
|
||||
v = h.arena.alloc(n, heapArenaBytes, &gcController.heapReleased)
|
||||
v = h.arena.alloc(n, heapArenaBytes, &gcController.heapReleased, "heap")
|
||||
if v != nil {
|
||||
size = n
|
||||
goto mapped
|
||||
|
|
@ -678,7 +678,7 @@ func (h *mheap) sysAlloc(n uintptr, hintList **arenaHint, arenaList *[]arenaIdx)
|
|||
// Outside addressable heap. Can't use.
|
||||
v = nil
|
||||
} else {
|
||||
v = sysReserve(unsafe.Pointer(p), n)
|
||||
v = sysReserve(unsafe.Pointer(p), n, "heap reservation")
|
||||
}
|
||||
if p == uintptr(v) {
|
||||
// Success. Update the hint.
|
||||
|
|
@ -714,7 +714,7 @@ func (h *mheap) sysAlloc(n uintptr, hintList **arenaHint, arenaList *[]arenaIdx)
|
|||
// All of the hints failed, so we'll take any
|
||||
// (sufficiently aligned) address the kernel will give
|
||||
// us.
|
||||
v, size = sysReserveAligned(nil, n, heapArenaBytes)
|
||||
v, size = sysReserveAligned(nil, n, heapArenaBytes, "heap")
|
||||
if v == nil {
|
||||
return nil, 0
|
||||
}
|
||||
|
|
@ -764,7 +764,7 @@ mapped:
|
|||
// is paged in is too expensive. Trying to account for the whole region means
|
||||
// that it will appear like an enormous memory overhead in statistics, even though
|
||||
// it is not.
|
||||
l2 = (*[1 << arenaL2Bits]*heapArena)(sysAllocOS(unsafe.Sizeof(*l2)))
|
||||
l2 = (*[1 << arenaL2Bits]*heapArena)(sysAllocOS(unsafe.Sizeof(*l2), "heap index"))
|
||||
if l2 == nil {
|
||||
throw("out of memory allocating heap arena map")
|
||||
}
|
||||
|
|
@ -780,7 +780,7 @@ mapped:
|
|||
throw("arena already initialized")
|
||||
}
|
||||
var r *heapArena
|
||||
r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
|
||||
r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys, "heap metadata"))
|
||||
if r == nil {
|
||||
r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
|
||||
if r == nil {
|
||||
|
|
@ -827,7 +827,7 @@ mapped:
|
|||
// sysReserveAligned is like sysReserve, but the returned pointer is
|
||||
// aligned to align bytes. It may reserve either n or n+align bytes,
|
||||
// so it returns the size that was reserved.
|
||||
func sysReserveAligned(v unsafe.Pointer, size, align uintptr) (unsafe.Pointer, uintptr) {
|
||||
func sysReserveAligned(v unsafe.Pointer, size, align uintptr, vmaName string) (unsafe.Pointer, uintptr) {
|
||||
if isSbrkPlatform {
|
||||
if v != nil {
|
||||
throw("unexpected heap arena hint on sbrk platform")
|
||||
|
|
@ -839,7 +839,7 @@ func sysReserveAligned(v unsafe.Pointer, size, align uintptr) (unsafe.Pointer, u
|
|||
// for a larger region and remove the parts we don't need.
|
||||
retries := 0
|
||||
retry:
|
||||
p := uintptr(sysReserve(v, size+align))
|
||||
p := uintptr(sysReserve(v, size+align, vmaName))
|
||||
switch {
|
||||
case p == 0:
|
||||
return nil, 0
|
||||
|
|
@ -852,7 +852,7 @@ retry:
|
|||
// so we may have to try again.
|
||||
sysFreeOS(unsafe.Pointer(p), size+align)
|
||||
p = alignUp(p, align)
|
||||
p2 := sysReserve(unsafe.Pointer(p), size)
|
||||
p2 := sysReserve(unsafe.Pointer(p), size, vmaName)
|
||||
if p != uintptr(p2) {
|
||||
// Must have raced. Try again.
|
||||
sysFreeOS(p2, size)
|
||||
|
|
@ -1933,7 +1933,7 @@ func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap {
|
|||
}
|
||||
|
||||
if size >= maxBlock {
|
||||
return (*notInHeap)(sysAlloc(size, sysStat))
|
||||
return (*notInHeap)(sysAlloc(size, sysStat, "immortal metadata"))
|
||||
}
|
||||
|
||||
mp := acquirem()
|
||||
|
|
@ -1946,7 +1946,7 @@ func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap {
|
|||
}
|
||||
persistent.off = alignUp(persistent.off, align)
|
||||
if persistent.off+size > persistentChunkSize || persistent.base == nil {
|
||||
persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys))
|
||||
persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys, "immortal metadata"))
|
||||
if persistent.base == nil {
|
||||
if persistent == &globalAlloc.persistentAlloc {
|
||||
unlock(&globalAlloc.mutex)
|
||||
|
|
@ -2020,7 +2020,7 @@ func (l *linearAlloc) init(base, size uintptr, mapMemory bool) {
|
|||
l.mapMemory = mapMemory
|
||||
}
|
||||
|
||||
func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
|
||||
func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat, vmaName string) unsafe.Pointer {
|
||||
p := alignUp(l.next, align)
|
||||
if p+size > l.end {
|
||||
return nil
|
||||
|
|
@ -2030,7 +2030,7 @@ func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Poi
|
|||
if l.mapMemory {
|
||||
// Transition from Reserved to Prepared to Ready.
|
||||
n := pEnd - l.mapped
|
||||
sysMap(unsafe.Pointer(l.mapped), n, sysStat)
|
||||
sysMap(unsafe.Pointer(l.mapped), n, sysStat, vmaName)
|
||||
sysUsed(unsafe.Pointer(l.mapped), n, n)
|
||||
}
|
||||
l.mapped = pEnd
|
||||
|
|
|
|||
|
|
@ -46,10 +46,10 @@ import "unsafe"
|
|||
// which prevents us from allocating more stack.
|
||||
//
|
||||
//go:nosplit
|
||||
func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer {
|
||||
func sysAlloc(n uintptr, sysStat *sysMemStat, vmaName string) unsafe.Pointer {
|
||||
sysStat.add(int64(n))
|
||||
gcController.mappedReady.Add(int64(n))
|
||||
return sysAllocOS(n)
|
||||
return sysAllocOS(n, vmaName)
|
||||
}
|
||||
|
||||
// sysUnused transitions a memory region from Ready to Prepared. It notifies the
|
||||
|
|
@ -142,15 +142,15 @@ func sysFault(v unsafe.Pointer, n uintptr) {
|
|||
// NOTE: sysReserve returns OS-aligned memory, but the heap allocator
|
||||
// may use larger alignment, so the caller must be careful to realign the
|
||||
// memory obtained by sysReserve.
|
||||
func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer {
|
||||
return sysReserveOS(v, n)
|
||||
func sysReserve(v unsafe.Pointer, n uintptr, vmaName string) unsafe.Pointer {
|
||||
return sysReserveOS(v, n, vmaName)
|
||||
}
|
||||
|
||||
// sysMap transitions a memory region from Reserved to Prepared. It ensures the
|
||||
// memory region can be efficiently transitioned to Ready.
|
||||
//
|
||||
// sysStat must be non-nil.
|
||||
func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
|
||||
func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat, vmaName string) {
|
||||
sysStat.add(int64(n))
|
||||
sysMapOS(v, n)
|
||||
sysMapOS(v, n, vmaName)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ import (
|
|||
// prevents us from allocating more stack.
|
||||
//
|
||||
//go:nosplit
|
||||
func sysAllocOS(n uintptr) unsafe.Pointer {
|
||||
func sysAllocOS(n uintptr, _ string) unsafe.Pointer {
|
||||
p, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
|
||||
if err != 0 {
|
||||
if err == _EACCES {
|
||||
|
|
@ -56,7 +56,7 @@ func sysFaultOS(v unsafe.Pointer, n uintptr) {
|
|||
mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE|_MAP_FIXED, -1, 0)
|
||||
}
|
||||
|
||||
func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer {
|
||||
func sysReserveOS(v unsafe.Pointer, n uintptr, _ string) unsafe.Pointer {
|
||||
p, err := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
|
||||
if err != 0 {
|
||||
return nil
|
||||
|
|
@ -64,7 +64,7 @@ func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer {
|
|||
return p
|
||||
}
|
||||
|
||||
func sysMapOS(v unsafe.Pointer, n uintptr) {
|
||||
func sysMapOS(v unsafe.Pointer, n uintptr, _ string) {
|
||||
// AIX does not allow mapping a range that is already mapped.
|
||||
// So, call mprotect to change permissions.
|
||||
// Note that sysMap is always called with a non-nil pointer
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ import (
|
|||
// which prevents us from allocating more stack.
|
||||
//
|
||||
//go:nosplit
|
||||
func sysAllocOS(n uintptr) unsafe.Pointer {
|
||||
func sysAllocOS(n uintptr, _ string) unsafe.Pointer {
|
||||
v, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
|
||||
if err != 0 {
|
||||
return nil
|
||||
|
|
@ -57,7 +57,7 @@ func sysFaultOS(v unsafe.Pointer, n uintptr) {
|
|||
// Indicates not to reserve swap space for the mapping.
|
||||
const _sunosMAP_NORESERVE = 0x40
|
||||
|
||||
func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer {
|
||||
func sysReserveOS(v unsafe.Pointer, n uintptr, _ string) unsafe.Pointer {
|
||||
flags := int32(_MAP_ANON | _MAP_PRIVATE)
|
||||
if GOOS == "solaris" || GOOS == "illumos" {
|
||||
// Be explicit that we don't want to reserve swap space
|
||||
|
|
@ -75,7 +75,7 @@ func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer {
|
|||
const _sunosEAGAIN = 11
|
||||
const _ENOMEM = 12
|
||||
|
||||
func sysMapOS(v unsafe.Pointer, n uintptr) {
|
||||
func sysMapOS(v unsafe.Pointer, n uintptr, _ string) {
|
||||
p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
|
||||
if err == _ENOMEM || ((GOOS == "solaris" || GOOS == "illumos") && err == _sunosEAGAIN) {
|
||||
throw("runtime: out of memory")
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ import (
|
|||
// which prevents us from allocating more stack.
|
||||
//
|
||||
//go:nosplit
|
||||
func sysAllocOS(n uintptr) unsafe.Pointer {
|
||||
func sysAllocOS(n uintptr, _ string) unsafe.Pointer {
|
||||
v, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
|
||||
if err != 0 {
|
||||
return nil
|
||||
|
|
@ -54,7 +54,7 @@ func sysFaultOS(v unsafe.Pointer, n uintptr) {
|
|||
mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE|_MAP_FIXED, -1, 0)
|
||||
}
|
||||
|
||||
func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer {
|
||||
func sysReserveOS(v unsafe.Pointer, n uintptr, _ string) unsafe.Pointer {
|
||||
p, err := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
|
||||
if err != 0 {
|
||||
return nil
|
||||
|
|
@ -64,7 +64,7 @@ func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer {
|
|||
|
||||
const _ENOMEM = 12
|
||||
|
||||
func sysMapOS(v unsafe.Pointer, n uintptr) {
|
||||
func sysMapOS(v unsafe.Pointer, n uintptr, _ string) {
|
||||
p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
|
||||
if err == _ENOMEM {
|
||||
throw("runtime: out of memory")
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ const (
|
|||
// prevents us from allocating more stack.
|
||||
//
|
||||
//go:nosplit
|
||||
func sysAllocOS(n uintptr) unsafe.Pointer {
|
||||
func sysAllocOS(n uintptr, vmaName string) unsafe.Pointer {
|
||||
p, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
|
||||
if err != 0 {
|
||||
if err == _EACCES {
|
||||
|
|
@ -31,6 +31,7 @@ func sysAllocOS(n uintptr) unsafe.Pointer {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
setVMAName(p, n, vmaName)
|
||||
return p
|
||||
}
|
||||
|
||||
|
|
@ -70,7 +71,10 @@ func sysUnusedOS(v unsafe.Pointer, n uintptr) {
|
|||
// Fall back on mmap if it's not supported.
|
||||
// _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE will unmap all the
|
||||
// pages in the old mapping, and remap the memory region.
|
||||
mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
|
||||
p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
|
||||
if err == 0 && p != nil {
|
||||
setVMAName(p, n, "unused")
|
||||
}
|
||||
}
|
||||
|
||||
if debug.harddecommit > 0 {
|
||||
|
|
@ -78,6 +82,7 @@ func sysUnusedOS(v unsafe.Pointer, n uintptr) {
|
|||
if p != v || err != 0 {
|
||||
throw("runtime: cannot disable permissions in address space")
|
||||
}
|
||||
setVMAName(p, n, "unused")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -90,6 +95,7 @@ func sysUsedOS(v unsafe.Pointer, n uintptr) {
|
|||
if p != v || err != 0 {
|
||||
throw("runtime: cannot remap pages in address space")
|
||||
}
|
||||
setVMAName(p, n, "used")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
@ -154,15 +160,16 @@ func sysFaultOS(v unsafe.Pointer, n uintptr) {
|
|||
madvise(v, n, _MADV_DONTNEED)
|
||||
}
|
||||
|
||||
func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer {
|
||||
func sysReserveOS(v unsafe.Pointer, n uintptr, vmaName string) unsafe.Pointer {
|
||||
p, err := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
|
||||
if err != 0 {
|
||||
return nil
|
||||
}
|
||||
setVMAName(p, n, vmaName)
|
||||
return p
|
||||
}
|
||||
|
||||
func sysMapOS(v unsafe.Pointer, n uintptr) {
|
||||
func sysMapOS(v unsafe.Pointer, n uintptr, vmaName string) {
|
||||
p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
|
||||
if err == _ENOMEM {
|
||||
throw("runtime: out of memory")
|
||||
|
|
@ -171,6 +178,7 @@ func sysMapOS(v unsafe.Pointer, n uintptr) {
|
|||
print("runtime: mmap(", v, ", ", n, ") returned ", p, ", ", err, "\n")
|
||||
throw("runtime: cannot map pages in arena address space")
|
||||
}
|
||||
setVMAName(p, n, vmaName)
|
||||
|
||||
// Disable huge pages if the GODEBUG for it is set.
|
||||
//
|
||||
|
|
|
|||
|
|
@ -157,7 +157,7 @@ func initBloc() {
|
|||
blocMax = bloc
|
||||
}
|
||||
|
||||
func sysAllocOS(n uintptr) unsafe.Pointer {
|
||||
func sysAllocOS(n uintptr, _ string) unsafe.Pointer {
|
||||
lock(&memlock)
|
||||
p := memAlloc(n)
|
||||
memCheck()
|
||||
|
|
@ -195,13 +195,13 @@ func sysNoHugePageOS(v unsafe.Pointer, n uintptr) {
|
|||
func sysHugePageCollapseOS(v unsafe.Pointer, n uintptr) {
|
||||
}
|
||||
|
||||
func sysMapOS(v unsafe.Pointer, n uintptr) {
|
||||
func sysMapOS(v unsafe.Pointer, n uintptr, _ string) {
|
||||
}
|
||||
|
||||
func sysFaultOS(v unsafe.Pointer, n uintptr) {
|
||||
}
|
||||
|
||||
func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer {
|
||||
func sysReserveOS(v unsafe.Pointer, n uintptr, _ string) unsafe.Pointer {
|
||||
lock(&memlock)
|
||||
var p unsafe.Pointer
|
||||
if uintptr(v) == bloc {
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ const (
|
|||
// which prevents us from allocating more stack.
|
||||
//
|
||||
//go:nosplit
|
||||
func sysAllocOS(n uintptr) unsafe.Pointer {
|
||||
func sysAllocOS(n uintptr, _ string) unsafe.Pointer {
|
||||
return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_COMMIT|_MEM_RESERVE, _PAGE_READWRITE))
|
||||
}
|
||||
|
||||
|
|
@ -117,7 +117,7 @@ func sysFaultOS(v unsafe.Pointer, n uintptr) {
|
|||
sysUnusedOS(v, n)
|
||||
}
|
||||
|
||||
func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer {
|
||||
func sysReserveOS(v unsafe.Pointer, n uintptr, _ string) unsafe.Pointer {
|
||||
// v is just a hint.
|
||||
// First try at v.
|
||||
// This will fail if any of [v, v+n) is already reserved.
|
||||
|
|
@ -130,5 +130,5 @@ func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer {
|
|||
return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_RESERVE, _PAGE_READWRITE))
|
||||
}
|
||||
|
||||
func sysMapOS(v unsafe.Pointer, n uintptr) {
|
||||
func sysMapOS(v unsafe.Pointer, n uintptr, _ string) {
|
||||
}
|
||||
|
|
|
|||
|
|
@ -548,7 +548,7 @@ func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
|
|||
}
|
||||
var new []*mspan
|
||||
sp := (*slice)(unsafe.Pointer(&new))
|
||||
sp.array = sysAlloc(uintptr(n)*goarch.PtrSize, &memstats.other_sys)
|
||||
sp.array = sysAlloc(uintptr(n)*goarch.PtrSize, &memstats.other_sys, "allspans array")
|
||||
if sp.array == nil {
|
||||
throw("runtime: cannot allocate memory")
|
||||
}
|
||||
|
|
@ -1527,7 +1527,7 @@ func (h *mheap) grow(npage uintptr) (uintptr, bool) {
|
|||
// Transition this space from Reserved to Prepared and mark it
|
||||
// as released since we'll be able to start using it after updating
|
||||
// the page allocator and releasing the lock at any time.
|
||||
sysMap(unsafe.Pointer(h.curArena.base), size, &gcController.heapReleased)
|
||||
sysMap(unsafe.Pointer(h.curArena.base), size, &gcController.heapReleased, "heap")
|
||||
// Update stats.
|
||||
stats := memstats.heapStats.acquire()
|
||||
atomic.Xaddint64(&stats.released, int64(size))
|
||||
|
|
@ -1558,7 +1558,7 @@ func (h *mheap) grow(npage uintptr) (uintptr, bool) {
|
|||
// The allocation is always aligned to the heap arena
|
||||
// size which is always > physPageSize, so its safe to
|
||||
// just add directly to heapReleased.
|
||||
sysMap(unsafe.Pointer(v), nBase-v, &gcController.heapReleased)
|
||||
sysMap(unsafe.Pointer(v), nBase-v, &gcController.heapReleased, "heap")
|
||||
|
||||
// The memory just allocated counts as both released
|
||||
// and idle, even though it's not yet backed by spans.
|
||||
|
|
@ -2658,7 +2658,7 @@ func newArenaMayUnlock() *gcBitsArena {
|
|||
var result *gcBitsArena
|
||||
if gcBitsArenas.free == nil {
|
||||
unlock(&gcBitsArenas.lock)
|
||||
result = (*gcBitsArena)(sysAlloc(gcBitsChunkBytes, &memstats.gcMiscSys))
|
||||
result = (*gcBitsArena)(sysAlloc(gcBitsChunkBytes, &memstats.gcMiscSys, "gc bits"))
|
||||
if result == nil {
|
||||
throw("runtime: cannot allocate memory")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -81,6 +81,8 @@ const (
|
|||
// there should this change.
|
||||
pallocChunksL2Bits = heapAddrBits - logPallocChunkBytes - pallocChunksL1Bits
|
||||
pallocChunksL1Shift = pallocChunksL2Bits
|
||||
|
||||
vmaNamePageAllocIndex = "page alloc index"
|
||||
)
|
||||
|
||||
// maxSearchAddr returns the maximum searchAddr value, which indicates
|
||||
|
|
@ -401,7 +403,7 @@ func (p *pageAlloc) grow(base, size uintptr) {
|
|||
if p.chunks[c.l1()] == nil {
|
||||
// Create the necessary l2 entry.
|
||||
const l2Size = unsafe.Sizeof(*p.chunks[0])
|
||||
r := sysAlloc(l2Size, p.sysStat)
|
||||
r := sysAlloc(l2Size, p.sysStat, vmaNamePageAllocIndex)
|
||||
if r == nil {
|
||||
throw("pageAlloc: out of memory")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -71,12 +71,12 @@ func (p *pageAlloc) sysInit(test bool) {
|
|||
totalSize = alignUp(totalSize, physPageSize)
|
||||
|
||||
// Reserve memory for all levels in one go. There shouldn't be much for 32-bit.
|
||||
reservation := sysReserve(nil, totalSize)
|
||||
reservation := sysReserve(nil, totalSize, "page summary")
|
||||
if reservation == nil {
|
||||
throw("failed to reserve page summary memory")
|
||||
}
|
||||
// There isn't much. Just map it and mark it as used immediately.
|
||||
sysMap(reservation, totalSize, p.sysStat)
|
||||
sysMap(reservation, totalSize, p.sysStat, "page summary")
|
||||
sysUsed(reservation, totalSize, totalSize)
|
||||
p.summaryMappedReady += totalSize
|
||||
|
||||
|
|
@ -123,7 +123,7 @@ func (s *scavengeIndex) sysInit(test bool, sysStat *sysMemStat) (mappedReady uin
|
|||
if test {
|
||||
// Set up the scavenge index via sysAlloc so the test can free it later.
|
||||
scavIndexSize := uintptr(len(scavengeIndexArray)) * unsafe.Sizeof(atomicScavChunkData{})
|
||||
s.chunks = ((*[(1 << heapAddrBits) / pallocChunkBytes]atomicScavChunkData)(sysAlloc(scavIndexSize, sysStat)))[:]
|
||||
s.chunks = ((*[(1 << heapAddrBits) / pallocChunkBytes]atomicScavChunkData)(sysAlloc(scavIndexSize, sysStat, vmaNamePageAllocIndex)))[:]
|
||||
mappedReady = scavIndexSize
|
||||
} else {
|
||||
// Set up the scavenge index.
|
||||
|
|
|
|||
|
|
@ -76,7 +76,7 @@ func (p *pageAlloc) sysInit(test bool) {
|
|||
|
||||
// Reserve b bytes of memory anywhere in the address space.
|
||||
b := alignUp(uintptr(entries)*pallocSumBytes, physPageSize)
|
||||
r := sysReserve(nil, b)
|
||||
r := sysReserve(nil, b, "page summary")
|
||||
if r == nil {
|
||||
throw("failed to reserve page summary memory")
|
||||
}
|
||||
|
|
@ -176,7 +176,7 @@ func (p *pageAlloc) sysGrow(base, limit uintptr) {
|
|||
}
|
||||
|
||||
// Map and commit need.
|
||||
sysMap(unsafe.Pointer(need.base.addr()), need.size(), p.sysStat)
|
||||
sysMap(unsafe.Pointer(need.base.addr()), need.size(), p.sysStat, "page alloc")
|
||||
sysUsed(unsafe.Pointer(need.base.addr()), need.size(), need.size())
|
||||
p.summaryMappedReady += need.size()
|
||||
}
|
||||
|
|
@ -229,7 +229,7 @@ func (s *scavengeIndex) sysGrow(base, limit uintptr, sysStat *sysMemStat) uintpt
|
|||
|
||||
// If we've got something to map, map it, and update the slice bounds.
|
||||
if need.size() != 0 {
|
||||
sysMap(unsafe.Pointer(need.base.addr()), need.size(), sysStat)
|
||||
sysMap(unsafe.Pointer(need.base.addr()), need.size(), sysStat, "scavenge index")
|
||||
sysUsed(unsafe.Pointer(need.base.addr()), need.size(), need.size())
|
||||
// Update the indices only after the new memory is valid.
|
||||
if haveMax == 0 || needMin < haveMin {
|
||||
|
|
@ -248,7 +248,7 @@ func (s *scavengeIndex) sysGrow(base, limit uintptr, sysStat *sysMemStat) uintpt
|
|||
func (s *scavengeIndex) sysInit(test bool, sysStat *sysMemStat) uintptr {
|
||||
n := uintptr(1<<heapAddrBits) / pallocChunkBytes
|
||||
nbytes := n * unsafe.Sizeof(atomicScavChunkData{})
|
||||
r := sysReserve(nil, nbytes)
|
||||
r := sysReserve(nil, nbytes, "scavenge index")
|
||||
sl := notInHeapSlice{(*notInHeap)(r), int(n), int(n)}
|
||||
s.chunks = *(*[]atomicScavChunkData)(unsafe.Pointer(&sl))
|
||||
return 0 // All memory above is mapped Reserved.
|
||||
|
|
|
|||
|
|
@ -279,7 +279,7 @@ func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket
|
|||
// check again under the lock
|
||||
bh = (*buckhashArray)(buckhash.Load())
|
||||
if bh == nil {
|
||||
bh = (*buckhashArray)(sysAlloc(unsafe.Sizeof(buckhashArray{}), &memstats.buckhash_sys))
|
||||
bh = (*buckhashArray)(sysAlloc(unsafe.Sizeof(buckhashArray{}), &memstats.buckhash_sys, "profiler hash buckets"))
|
||||
if bh == nil {
|
||||
throw("runtime: cannot allocate memory")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -233,7 +233,7 @@ func newosproc(mp *m) {
|
|||
//
|
||||
//go:nosplit
|
||||
func newosproc0(stacksize uintptr, fn unsafe.Pointer) {
|
||||
stack := sysAlloc(stacksize, &memstats.stacks_sys)
|
||||
stack := sysAlloc(stacksize, &memstats.stacks_sys, "OS thread stack")
|
||||
if stack == nil {
|
||||
writeErrStr(failallocatestack)
|
||||
exit(1)
|
||||
|
|
|
|||
|
|
@ -206,7 +206,7 @@ func newosproc(mp *m) {
|
|||
//
|
||||
//go:nosplit
|
||||
func newosproc0(stacksize uintptr, fn unsafe.Pointer) {
|
||||
stack := sysAlloc(stacksize, &memstats.stacks_sys)
|
||||
stack := sysAlloc(stacksize, &memstats.stacks_sys, "OS thread stack")
|
||||
if stack == nil {
|
||||
writeErrStr(failallocatestack)
|
||||
exit(1)
|
||||
|
|
|
|||
32
src/runtime/set_vma_name_linux.go
Normal file
32
src/runtime/set_vma_name_linux.go
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"internal/runtime/atomic"
|
||||
"internal/runtime/syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var prSetVMAUnsupported atomic.Bool
|
||||
|
||||
// setVMAName calls prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, start, len, name)
|
||||
func setVMAName(start unsafe.Pointer, length uintptr, name string) {
|
||||
if unsupported := prSetVMAUnsupported.Load(); unsupported {
|
||||
return
|
||||
}
|
||||
|
||||
var sysName [80]byte
|
||||
n := copy(sysName[:], " Go: ")
|
||||
copy(sysName[n:79], name) // leave final byte zero
|
||||
|
||||
_, _, err := syscall.Syscall6(syscall.SYS_PRCTL, syscall.PR_SET_VMA, syscall.PR_SET_VMA_ANON_NAME, uintptr(start), length, uintptr(unsafe.Pointer(&sysName[0])), 0)
|
||||
if err == _EINVAL {
|
||||
prSetVMAUnsupported.Store(true)
|
||||
}
|
||||
// ignore other errors
|
||||
}
|
||||
12
src/runtime/set_vma_name_stub.go
Normal file
12
src/runtime/set_vma_name_stub.go
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !linux
|
||||
|
||||
package runtime
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// setVMAName isn’t implemented
|
||||
func setVMAName(start unsafe.Pointer, len uintptr, name string) {}
|
||||
|
|
@ -350,7 +350,7 @@ func stackalloc(n uint32) stack {
|
|||
|
||||
if debug.efence != 0 || stackFromSystem != 0 {
|
||||
n = uint32(alignUp(uintptr(n), physPageSize))
|
||||
v := sysAlloc(uintptr(n), &memstats.stacks_sys)
|
||||
v := sysAlloc(uintptr(n), &memstats.stacks_sys, "goroutine stack (system)")
|
||||
if v == nil {
|
||||
throw("out of memory (stackalloc)")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -165,7 +165,7 @@ func (w traceWriter) refill() traceWriter {
|
|||
unlock(&trace.lock)
|
||||
} else {
|
||||
unlock(&trace.lock)
|
||||
w.traceBuf = (*traceBuf)(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
|
||||
w.traceBuf = (*traceBuf)(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys, "trace buffer"))
|
||||
if w.traceBuf == nil {
|
||||
throw("trace: out of memory")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -77,7 +77,7 @@ func (a *traceRegionAlloc) alloc(n uintptr) *notInHeap {
|
|||
}
|
||||
|
||||
// Allocate a new block.
|
||||
block = (*traceRegionAllocBlock)(sysAlloc(unsafe.Sizeof(traceRegionAllocBlock{}), &memstats.other_sys))
|
||||
block = (*traceRegionAllocBlock)(sysAlloc(unsafe.Sizeof(traceRegionAllocBlock{}), &memstats.other_sys, "trace arena alloc"))
|
||||
if block == nil {
|
||||
throw("traceRegion: out of memory")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -55,6 +55,7 @@ func vgetrandomGetState() uintptr {
|
|||
unlock(&vgetrandomAlloc.statesLock)
|
||||
return 0
|
||||
}
|
||||
setVMAName(p, allocSize, "getrandom states")
|
||||
newBlock := uintptr(p)
|
||||
if vgetrandomAlloc.states == nil {
|
||||
vgetrandomAlloc.states = make([]uintptr, 0, num)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue