mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime: implement xadduintptr and update system mstats using it
The motivation is that sysAlloc/Free() currently aren't safe to be called without a valid G, because arm's xadd64() uses locks that require a valid G. The solution here was proposed by Dmitry Vyukov: use xadduintptr() instead of xadd64(), until arm can support xadd64 on all of its architectures (not a trivial task for arm). Change-Id: I250252079357ea2e4360e1235958b1c22051498f Reviewed-on: https://go-review.googlesource.com/9002 Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
This commit is contained in:
parent
8566979972
commit
6ad33be2d9
18 changed files with 189 additions and 39 deletions
|
|
@ -530,6 +530,9 @@ TEXT runtime·xadd64(SB), NOSPLIT, $0-24
|
||||||
MOVQ AX, ret+16(FP)
|
MOVQ AX, ret+16(FP)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
|
TEXT runtime·xadduintptr(SB), NOSPLIT, $0-24
|
||||||
|
JMP runtime·xadd64(SB)
|
||||||
|
|
||||||
TEXT runtime·xchg(SB), NOSPLIT, $0-20
|
TEXT runtime·xchg(SB), NOSPLIT, $0-20
|
||||||
MOVQ ptr+0(FP), BX
|
MOVQ ptr+0(FP), BX
|
||||||
MOVL new+8(FP), AX
|
MOVL new+8(FP), AX
|
||||||
|
|
|
||||||
|
|
@ -483,6 +483,9 @@ TEXT runtime·xadd64(SB), NOSPLIT, $0-24
|
||||||
MOVQ AX, ret+16(FP)
|
MOVQ AX, ret+16(FP)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
|
TEXT runtime·xadduintptr(SB), NOSPLIT, $0-12
|
||||||
|
JMP runtime·xadd(SB)
|
||||||
|
|
||||||
TEXT runtime·xchg(SB), NOSPLIT, $0-12
|
TEXT runtime·xchg(SB), NOSPLIT, $0-12
|
||||||
MOVL ptr+0(FP), BX
|
MOVL ptr+0(FP), BX
|
||||||
MOVL new+4(FP), AX
|
MOVL new+4(FP), AX
|
||||||
|
|
|
||||||
|
|
@ -32,6 +32,10 @@ func xadd64(ptr *uint64, delta int64) uint64 {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//go:noescape
|
||||||
|
//go:linkname xadduintptr runtime.xadd
|
||||||
|
func xadduintptr(ptr *uintptr, delta uintptr) uintptr
|
||||||
|
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
func xchg64(ptr *uint64, new uint64) uint64 {
|
func xchg64(ptr *uint64, new uint64) uint64 {
|
||||||
for {
|
for {
|
||||||
|
|
|
||||||
|
|
@ -36,6 +36,9 @@ func xadd(ptr *uint32, delta int32) uint32
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func xadd64(ptr *uint64, delta int64) uint64
|
func xadd64(ptr *uint64, delta int64) uint64
|
||||||
|
|
||||||
|
//go:noescape
|
||||||
|
func xadduintptr(ptr *uintptr, delta uintptr) uintptr
|
||||||
|
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func xchg(ptr *uint32, new uint32) uint32
|
func xchg(ptr *uint32, new uint32) uint32
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -27,6 +27,10 @@ func xadd(val *uint32, delta int32) uint32 {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//go:noescape
|
||||||
|
//go:linkname xadduintptr runtime.xadd
|
||||||
|
func xadduintptr(ptr *uintptr, delta uintptr) uintptr
|
||||||
|
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
func xchg(addr *uint32, v uint32) uint32 {
|
func xchg(addr *uint32, v uint32) uint32 {
|
||||||
for {
|
for {
|
||||||
|
|
|
||||||
|
|
@ -12,6 +12,10 @@ func xadd(ptr *uint32, delta int32) uint32
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func xadd64(ptr *uint64, delta int64) uint64
|
func xadd64(ptr *uint64, delta int64) uint64
|
||||||
|
|
||||||
|
//go:noescape
|
||||||
|
//go:linkname xadduintptr runtime.xadd64
|
||||||
|
func xadduintptr(ptr *uintptr, delta uintptr) uintptr
|
||||||
|
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func xchg(ptr *uint32, new uint32) uint32
|
func xchg(ptr *uint32, new uint32) uint32
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -14,6 +14,10 @@ func xadd(ptr *uint32, delta int32) uint32
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func xadd64(ptr *uint64, delta int64) uint64
|
func xadd64(ptr *uint64, delta int64) uint64
|
||||||
|
|
||||||
|
//go:noescape
|
||||||
|
//go:linkname xadduintptr runtime.xadd64
|
||||||
|
func xadduintptr(ptr *uintptr, delta uintptr) uintptr
|
||||||
|
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func xchg(ptr *uint32, new uint32) uint32
|
func xchg(ptr *uint32, new uint32) uint32
|
||||||
|
|
||||||
|
|
|
||||||
66
src/runtime/atomic_test.go
Normal file
66
src/runtime/atomic_test.go
Normal file
|
|
@ -0,0 +1,66 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package runtime_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime"
|
||||||
|
"testing"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
func runParallel(N, iter int, f func()) {
|
||||||
|
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(int(N)))
|
||||||
|
done := make(chan bool)
|
||||||
|
for i := 0; i < N; i++ {
|
||||||
|
go func() {
|
||||||
|
for j := 0; j < iter; j++ {
|
||||||
|
f()
|
||||||
|
}
|
||||||
|
done <- true
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
for i := 0; i < N; i++ {
|
||||||
|
<-done
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestXadduintptr(t *testing.T) {
|
||||||
|
const N = 20
|
||||||
|
const iter = 100000
|
||||||
|
inc := uintptr(100)
|
||||||
|
total := uintptr(0)
|
||||||
|
runParallel(N, iter, func() {
|
||||||
|
runtime.Xadduintptr(&total, inc)
|
||||||
|
})
|
||||||
|
if want := uintptr(N * iter * inc); want != total {
|
||||||
|
t.Fatalf("xadduintpr error, want %d, got %d", want, total)
|
||||||
|
}
|
||||||
|
total = 0
|
||||||
|
runParallel(N, iter, func() {
|
||||||
|
runtime.Xadduintptr(&total, inc)
|
||||||
|
runtime.Xadduintptr(&total, uintptr(-int64(inc)))
|
||||||
|
})
|
||||||
|
if total != 0 {
|
||||||
|
t.Fatalf("xadduintpr total error, want %d, got %d", 0, total)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that xadduintptr correctly updates 64-bit values. The place where
|
||||||
|
// we actually do so is mstats.go, functions mSysStat{Inc,Dec}.
|
||||||
|
func TestXadduintptrOnUint64(t *testing.T) {
|
||||||
|
if runtime.BigEndian != 0 {
|
||||||
|
// On big endian architectures, we never use xadduintptr to update
|
||||||
|
// 64-bit values and hence we skip the test. (Note that functions
|
||||||
|
// mSysStat{Inc,Dec} in mstats.go have explicit checks for
|
||||||
|
// big-endianness.)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
const inc = 100
|
||||||
|
val := uint64(0)
|
||||||
|
runtime.Xadduintptr((*uintptr)(unsafe.Pointer(&val)), inc)
|
||||||
|
if inc != val {
|
||||||
|
t.Fatalf("xadduintptr should increase lower-order bits, want %d, got %d", inc, val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -21,6 +21,7 @@ var F64toint = f64toint
|
||||||
var Entersyscall = entersyscall
|
var Entersyscall = entersyscall
|
||||||
var Exitsyscall = exitsyscall
|
var Exitsyscall = exitsyscall
|
||||||
var LockedOSThread = lockedOSThread
|
var LockedOSThread = lockedOSThread
|
||||||
|
var Xadduintptr = xadduintptr
|
||||||
|
|
||||||
var FuncPC = funcPC
|
var FuncPC = funcPC
|
||||||
|
|
||||||
|
|
@ -129,3 +130,5 @@ var Write = write
|
||||||
|
|
||||||
func Envs() []string { return envs }
|
func Envs() []string { return envs }
|
||||||
func SetEnvs(e []string) { envs = e }
|
func SetEnvs(e []string) { envs = e }
|
||||||
|
|
||||||
|
var BigEndian = _BigEndian
|
||||||
|
|
|
||||||
|
|
@ -790,7 +790,7 @@ var globalAlloc struct {
|
||||||
// There is no associated free operation.
|
// There is no associated free operation.
|
||||||
// Intended for things like function/type/debug-related persistent data.
|
// Intended for things like function/type/debug-related persistent data.
|
||||||
// If align is 0, uses default align (currently 8).
|
// If align is 0, uses default align (currently 8).
|
||||||
func persistentalloc(size, align uintptr, stat *uint64) unsafe.Pointer {
|
func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer {
|
||||||
const (
|
const (
|
||||||
chunk = 256 << 10
|
chunk = 256 << 10
|
||||||
maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
|
maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
|
||||||
|
|
@ -811,7 +811,7 @@ func persistentalloc(size, align uintptr, stat *uint64) unsafe.Pointer {
|
||||||
}
|
}
|
||||||
|
|
||||||
if size >= maxBlock {
|
if size >= maxBlock {
|
||||||
return sysAlloc(size, stat)
|
return sysAlloc(size, sysStat)
|
||||||
}
|
}
|
||||||
|
|
||||||
mp := acquirem()
|
mp := acquirem()
|
||||||
|
|
@ -840,9 +840,9 @@ func persistentalloc(size, align uintptr, stat *uint64) unsafe.Pointer {
|
||||||
unlock(&globalAlloc.mutex)
|
unlock(&globalAlloc.mutex)
|
||||||
}
|
}
|
||||||
|
|
||||||
if stat != &memstats.other_sys {
|
if sysStat != &memstats.other_sys {
|
||||||
xadd64(stat, int64(size))
|
mSysStatInc(sysStat, size)
|
||||||
xadd64(&memstats.other_sys, -int64(size))
|
mSysStatDec(&memstats.other_sys, size)
|
||||||
}
|
}
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -8,13 +8,15 @@ package runtime
|
||||||
|
|
||||||
import "unsafe"
|
import "unsafe"
|
||||||
|
|
||||||
|
// Don't split the stack as this function may be invoked without a valid G,
|
||||||
|
// which prevents us from allocating more stack.
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
func sysAlloc(n uintptr, stat *uint64) unsafe.Pointer {
|
func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {
|
||||||
v := unsafe.Pointer(mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0))
|
v := unsafe.Pointer(mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0))
|
||||||
if uintptr(v) < 4096 {
|
if uintptr(v) < 4096 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
xadd64(stat, int64(n))
|
mSysStatInc(sysStat, n)
|
||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -25,8 +27,11 @@ func sysUnused(v unsafe.Pointer, n uintptr) {
|
||||||
func sysUsed(v unsafe.Pointer, n uintptr) {
|
func sysUsed(v unsafe.Pointer, n uintptr) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func sysFree(v unsafe.Pointer, n uintptr, stat *uint64) {
|
// Don't split the stack as this function may be invoked without a valid G,
|
||||||
xadd64(stat, -int64(n))
|
// which prevents us from allocating more stack.
|
||||||
|
//go:nosplit
|
||||||
|
func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) {
|
||||||
|
mSysStatDec(sysStat, n)
|
||||||
munmap(v, n)
|
munmap(v, n)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -51,10 +56,10 @@ func sysReserve(v unsafe.Pointer, n uintptr, reserved *bool) unsafe.Pointer {
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
func sysMap(v unsafe.Pointer, n uintptr, reserved bool, stat *uint64) {
|
func sysMap(v unsafe.Pointer, n uintptr, reserved bool, sysStat *uint64) {
|
||||||
const _ENOMEM = 12
|
const _ENOMEM = 12
|
||||||
|
|
||||||
xadd64(stat, int64(n))
|
mSysStatInc(sysStat, n)
|
||||||
|
|
||||||
// On 64-bit, we don't actually have v reserved, so tread carefully.
|
// On 64-bit, we don't actually have v reserved, so tread carefully.
|
||||||
if !reserved {
|
if !reserved {
|
||||||
|
|
|
||||||
|
|
@ -6,13 +6,15 @@ package runtime
|
||||||
|
|
||||||
import "unsafe"
|
import "unsafe"
|
||||||
|
|
||||||
|
// Don't split the stack as this function may be invoked without a valid G,
|
||||||
|
// which prevents us from allocating more stack.
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
func sysAlloc(n uintptr, stat *uint64) unsafe.Pointer {
|
func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {
|
||||||
v := (unsafe.Pointer)(mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0))
|
v := (unsafe.Pointer)(mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0))
|
||||||
if uintptr(v) < 4096 {
|
if uintptr(v) < 4096 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
xadd64(stat, int64(n))
|
mSysStatInc(sysStat, n)
|
||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -24,8 +26,11 @@ func sysUnused(v unsafe.Pointer, n uintptr) {
|
||||||
func sysUsed(v unsafe.Pointer, n uintptr) {
|
func sysUsed(v unsafe.Pointer, n uintptr) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func sysFree(v unsafe.Pointer, n uintptr, stat *uint64) {
|
// Don't split the stack as this function may be invoked without a valid G,
|
||||||
xadd64(stat, -int64(n))
|
// which prevents us from allocating more stack.
|
||||||
|
//go:nosplit
|
||||||
|
func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) {
|
||||||
|
mSysStatDec(sysStat, n)
|
||||||
munmap(v, n)
|
munmap(v, n)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -46,8 +51,8 @@ const (
|
||||||
_ENOMEM = 12
|
_ENOMEM = 12
|
||||||
)
|
)
|
||||||
|
|
||||||
func sysMap(v unsafe.Pointer, n uintptr, reserved bool, stat *uint64) {
|
func sysMap(v unsafe.Pointer, n uintptr, reserved bool, sysStat *uint64) {
|
||||||
xadd64(stat, int64(n))
|
mSysStatInc(sysStat, n)
|
||||||
p := (unsafe.Pointer)(mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0))
|
p := (unsafe.Pointer)(mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0))
|
||||||
if uintptr(p) == _ENOMEM {
|
if uintptr(p) == _ENOMEM {
|
||||||
throw("runtime: out of memory")
|
throw("runtime: out of memory")
|
||||||
|
|
|
||||||
|
|
@ -48,8 +48,10 @@ func mmap_fixed(v unsafe.Pointer, n uintptr, prot, flags, fd int32, offset uint3
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Don't split the stack as this method may be invoked without a valid G, which
|
||||||
|
// prevents us from allocating more stack.
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
func sysAlloc(n uintptr, stat *uint64) unsafe.Pointer {
|
func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {
|
||||||
p := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
|
p := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
|
||||||
if uintptr(p) < 4096 {
|
if uintptr(p) < 4096 {
|
||||||
if uintptr(p) == _EACCES {
|
if uintptr(p) == _EACCES {
|
||||||
|
|
@ -62,7 +64,7 @@ func sysAlloc(n uintptr, stat *uint64) unsafe.Pointer {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
xadd64(stat, int64(n))
|
mSysStatInc(sysStat, n)
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -93,8 +95,11 @@ func sysUsed(v unsafe.Pointer, n uintptr) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func sysFree(v unsafe.Pointer, n uintptr, stat *uint64) {
|
// Don't split the stack as this function may be invoked without a valid G,
|
||||||
xadd64(stat, -int64(n))
|
// which prevents us from allocating more stack.
|
||||||
|
//go:nosplit
|
||||||
|
func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) {
|
||||||
|
mSysStatDec(sysStat, n)
|
||||||
munmap(v, n)
|
munmap(v, n)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -128,8 +133,8 @@ func sysReserve(v unsafe.Pointer, n uintptr, reserved *bool) unsafe.Pointer {
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
func sysMap(v unsafe.Pointer, n uintptr, reserved bool, stat *uint64) {
|
func sysMap(v unsafe.Pointer, n uintptr, reserved bool, sysStat *uint64) {
|
||||||
xadd64(stat, int64(n))
|
mSysStatInc(sysStat, n)
|
||||||
|
|
||||||
// On 64-bit, we don't actually have v reserved, so tread carefully.
|
// On 64-bit, we don't actually have v reserved, so tread carefully.
|
||||||
if !reserved {
|
if !reserved {
|
||||||
|
|
|
||||||
|
|
@ -130,19 +130,19 @@ func sbrk(n uintptr) unsafe.Pointer {
|
||||||
return unsafe.Pointer(bl)
|
return unsafe.Pointer(bl)
|
||||||
}
|
}
|
||||||
|
|
||||||
func sysAlloc(n uintptr, stat *uint64) unsafe.Pointer {
|
func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {
|
||||||
lock(&memlock)
|
lock(&memlock)
|
||||||
p := memAlloc(n)
|
p := memAlloc(n)
|
||||||
memCheck()
|
memCheck()
|
||||||
unlock(&memlock)
|
unlock(&memlock)
|
||||||
if p != nil {
|
if p != nil {
|
||||||
xadd64(stat, int64(n))
|
mSysStatInc(sysStat, n)
|
||||||
}
|
}
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
func sysFree(v unsafe.Pointer, n uintptr, stat *uint64) {
|
func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) {
|
||||||
xadd64(stat, -int64(n))
|
mSysStatDec(sysStat, n)
|
||||||
lock(&memlock)
|
lock(&memlock)
|
||||||
memFree(v, n)
|
memFree(v, n)
|
||||||
memCheck()
|
memCheck()
|
||||||
|
|
@ -155,10 +155,10 @@ func sysUnused(v unsafe.Pointer, n uintptr) {
|
||||||
func sysUsed(v unsafe.Pointer, n uintptr) {
|
func sysUsed(v unsafe.Pointer, n uintptr) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func sysMap(v unsafe.Pointer, n uintptr, reserved bool, stat *uint64) {
|
func sysMap(v unsafe.Pointer, n uintptr, reserved bool, sysStat *uint64) {
|
||||||
// sysReserve has already allocated all heap memory,
|
// sysReserve has already allocated all heap memory,
|
||||||
// but has not adjusted stats.
|
// but has not adjusted stats.
|
||||||
xadd64(stat, int64(n))
|
mSysStatInc(sysStat, n)
|
||||||
}
|
}
|
||||||
|
|
||||||
func sysFault(v unsafe.Pointer, n uintptr) {
|
func sysFault(v unsafe.Pointer, n uintptr) {
|
||||||
|
|
|
||||||
|
|
@ -18,9 +18,11 @@ const (
|
||||||
_PAGE_NOACCESS = 0x0001
|
_PAGE_NOACCESS = 0x0001
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Don't split the stack as this function may be invoked without a valid G,
|
||||||
|
// which prevents us from allocating more stack.
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
func sysAlloc(n uintptr, stat *uint64) unsafe.Pointer {
|
func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {
|
||||||
xadd64(stat, int64(n))
|
mSysStatInc(sysStat, n)
|
||||||
return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_COMMIT|_MEM_RESERVE, _PAGE_READWRITE))
|
return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_COMMIT|_MEM_RESERVE, _PAGE_READWRITE))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -74,8 +76,11 @@ func sysUsed(v unsafe.Pointer, n uintptr) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func sysFree(v unsafe.Pointer, n uintptr, stat *uint64) {
|
// Don't split the stack as this function may be invoked without a valid G,
|
||||||
xadd64(stat, -int64(n))
|
// which prevents us from allocating more stack.
|
||||||
|
//go:nosplit
|
||||||
|
func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) {
|
||||||
|
mSysStatDec(sysStat, n)
|
||||||
r := stdcall3(_VirtualFree, uintptr(v), 0, _MEM_RELEASE)
|
r := stdcall3(_VirtualFree, uintptr(v), 0, _MEM_RELEASE)
|
||||||
if r == 0 {
|
if r == 0 {
|
||||||
throw("runtime: failed to release pages")
|
throw("runtime: failed to release pages")
|
||||||
|
|
@ -100,8 +105,8 @@ func sysReserve(v unsafe.Pointer, n uintptr, reserved *bool) unsafe.Pointer {
|
||||||
return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_RESERVE, _PAGE_READWRITE))
|
return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_RESERVE, _PAGE_READWRITE))
|
||||||
}
|
}
|
||||||
|
|
||||||
func sysMap(v unsafe.Pointer, n uintptr, reserved bool, stat *uint64) {
|
func sysMap(v unsafe.Pointer, n uintptr, reserved bool, sysStat *uint64) {
|
||||||
xadd64(stat, int64(n))
|
mSysStatInc(sysStat, n)
|
||||||
p := stdcall4(_VirtualAlloc, uintptr(v), n, _MEM_COMMIT, _PAGE_READWRITE)
|
p := stdcall4(_VirtualAlloc, uintptr(v), n, _MEM_COMMIT, _PAGE_READWRITE)
|
||||||
if p != uintptr(v) {
|
if p != uintptr(v) {
|
||||||
throw("runtime: cannot map pages in arena address space")
|
throw("runtime: cannot map pages in arena address space")
|
||||||
|
|
|
||||||
|
|
@ -353,3 +353,41 @@ func purgecachedstats(c *mcache) {
|
||||||
c.local_nsmallfree[i] = 0
|
c.local_nsmallfree[i] = 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Atomically increases a given *system* memory stat. We are counting on this
|
||||||
|
// stat never overflowing a uintptr, so this function must only be used for
|
||||||
|
// system memory stats.
|
||||||
|
//
|
||||||
|
// The current implementation for little endian architectures is based on
|
||||||
|
// xadduintptr(), which is less than ideal: xadd64() should really be used.
|
||||||
|
// Using xadduintptr() is a stop-gap solution until arm supports xadd64() that
|
||||||
|
// doesn't use locks. (Locks are a problem as they require a valid G, which
|
||||||
|
// restricts their useability.)
|
||||||
|
//
|
||||||
|
// A side-effect of using xadduintptr() is that we need to check for
|
||||||
|
// overflow errors.
|
||||||
|
//go:nosplit
|
||||||
|
func mSysStatInc(sysStat *uint64, n uintptr) {
|
||||||
|
if _BigEndian != 0 {
|
||||||
|
xadd64(sysStat, int64(n))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if val := xadduintptr((*uintptr)(unsafe.Pointer(sysStat)), n); val < n {
|
||||||
|
print("runtime: stat overflow: val ", val, ", n ", n, "\n")
|
||||||
|
exit(2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Atomically decreases a given *system* memory stat. Same comments as
|
||||||
|
// mSysStatInc apply.
|
||||||
|
//go:nosplit
|
||||||
|
func mSysStatDec(sysStat *uint64, n uintptr) {
|
||||||
|
if _BigEndian != 0 {
|
||||||
|
xadd64(sysStat, -int64(n))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if val := xadduintptr((*uintptr)(unsafe.Pointer(sysStat)), uintptr(-int64(n))); val+n < n {
|
||||||
|
print("runtime: stat underflow: val ", val, ", n ", n, "\n")
|
||||||
|
exit(2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -98,8 +98,7 @@ func newosproc(mp *m, stk unsafe.Pointer) {
|
||||||
//
|
//
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
func newosproc0(stacksize uintptr, fn unsafe.Pointer, fnarg uintptr) {
|
func newosproc0(stacksize uintptr, fn unsafe.Pointer, fnarg uintptr) {
|
||||||
var dummy uint64
|
stack := sysAlloc(stacksize, &memstats.stacks_sys)
|
||||||
stack := sysAlloc(stacksize, &dummy)
|
|
||||||
if stack == nil {
|
if stack == nil {
|
||||||
write(2, unsafe.Pointer(&failallocatestack[0]), int32(len(failallocatestack)))
|
write(2, unsafe.Pointer(&failallocatestack[0]), int32(len(failallocatestack)))
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
|
||||||
|
|
@ -146,8 +146,7 @@ func newosproc(mp *m, stk unsafe.Pointer) {
|
||||||
// Version of newosproc that doesn't require a valid G.
|
// Version of newosproc that doesn't require a valid G.
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
func newosproc0(stacksize uintptr, fn unsafe.Pointer) {
|
func newosproc0(stacksize uintptr, fn unsafe.Pointer) {
|
||||||
var dummy uint64
|
stack := sysAlloc(stacksize, &memstats.stacks_sys)
|
||||||
stack := sysAlloc(stacksize, &dummy)
|
|
||||||
if stack == nil {
|
if stack == nil {
|
||||||
write(2, unsafe.Pointer(&failallocatestack[0]), int32(len(failallocatestack)))
|
write(2, unsafe.Pointer(&failallocatestack[0]), int32(len(failallocatestack)))
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue