2016-03-01 22:57:46 +00:00
|
|
|
// Copyright 2010 The Go Authors. All rights reserved.
|
2014-11-21 19:39:01 +01:00
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
|
|
package runtime
|
|
|
|
|
|
2015-11-02 14:09:24 -05:00
|
|
|
import (
|
2021-05-19 17:33:32 -04:00
|
|
|
"internal/abi"
|
2025-04-08 10:00:13 +00:00
|
|
|
"internal/byteorder"
|
2024-02-01 10:21:14 +08:00
|
|
|
"internal/runtime/atomic"
|
2024-05-15 02:37:41 +08:00
|
|
|
"internal/stringslite"
|
2015-11-02 14:09:24 -05:00
|
|
|
"unsafe"
|
|
|
|
|
)
|
2014-11-21 19:39:01 +01:00
|
|
|
|
2016-05-06 08:26:37 -07:00
|
|
|
type mOS struct {
|
|
|
|
|
waitsemacount uint32
|
|
|
|
|
notesig *int8
|
|
|
|
|
errstr *byte
|
2016-11-11 20:49:11 +01:00
|
|
|
ignoreHangup bool
|
2016-05-06 08:26:37 -07:00
|
|
|
}
|
|
|
|
|
|
2025-03-11 11:23:24 -04:00
|
|
|
func dupfd(old, new int32) int32
|
2016-05-06 08:26:37 -07:00
|
|
|
func closefd(fd int32) int32
|
|
|
|
|
|
|
|
|
|
//go:noescape
|
|
|
|
|
func open(name *byte, mode, perm int32) int32
|
|
|
|
|
|
|
|
|
|
//go:noescape
|
|
|
|
|
func pread(fd int32, buf unsafe.Pointer, nbytes int32, offset int64) int32
|
|
|
|
|
|
|
|
|
|
//go:noescape
|
|
|
|
|
func pwrite(fd int32, buf unsafe.Pointer, nbytes int32, offset int64) int32
|
|
|
|
|
|
|
|
|
|
func seek(fd int32, offset int64, whence int32) int64
|
|
|
|
|
|
|
|
|
|
//go:noescape
|
|
|
|
|
func exits(msg *byte)
|
|
|
|
|
|
|
|
|
|
//go:noescape
|
|
|
|
|
func brk_(addr unsafe.Pointer) int32
|
|
|
|
|
|
|
|
|
|
func sleep(ms int32) int32
|
|
|
|
|
|
|
|
|
|
func rfork(flags int32) int32
|
|
|
|
|
|
|
|
|
|
//go:noescape
|
|
|
|
|
func plan9_semacquire(addr *uint32, block int32) int32
|
|
|
|
|
|
|
|
|
|
//go:noescape
|
|
|
|
|
func plan9_tsemacquire(addr *uint32, ms int32) int32
|
|
|
|
|
|
|
|
|
|
//go:noescape
|
|
|
|
|
func plan9_semrelease(addr *uint32, count int32) int32
|
|
|
|
|
|
|
|
|
|
//go:noescape
|
|
|
|
|
func notify(fn unsafe.Pointer) int32
|
|
|
|
|
|
|
|
|
|
func noted(mode int32) int32
|
|
|
|
|
|
|
|
|
|
//go:noescape
|
|
|
|
|
func nsec(*int64) int64
|
|
|
|
|
|
|
|
|
|
//go:noescape
|
2016-10-25 06:44:58 -07:00
|
|
|
func sigtramp(ureg, note unsafe.Pointer)
|
2016-05-06 08:26:37 -07:00
|
|
|
|
|
|
|
|
func setfpmasks()
|
|
|
|
|
|
|
|
|
|
//go:noescape
|
|
|
|
|
func tstart_plan9(newm *m)
|
|
|
|
|
|
|
|
|
|
func errstr() string
|
|
|
|
|
|
|
|
|
|
type _Plink uintptr
|
|
|
|
|
|
|
|
|
|
func sigpanic() {
|
2022-07-20 13:18:06 -04:00
|
|
|
gp := getg()
|
2022-07-20 11:09:14 -04:00
|
|
|
if !canpanic() {
|
2016-05-06 08:26:37 -07:00
|
|
|
throw("unexpected signal during runtime execution")
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-20 13:18:06 -04:00
|
|
|
note := gostringnocopy((*byte)(unsafe.Pointer(gp.m.notesig)))
|
|
|
|
|
switch gp.sig {
|
2016-05-06 08:26:37 -07:00
|
|
|
case _SIGRFAULT, _SIGWFAULT:
|
2020-07-22 11:21:36 -04:00
|
|
|
i := indexNoFloat(note, "addr=")
|
2016-05-06 08:26:37 -07:00
|
|
|
if i >= 0 {
|
|
|
|
|
i += 5
|
2020-07-22 11:21:36 -04:00
|
|
|
} else if i = indexNoFloat(note, "va="); i >= 0 {
|
2016-05-06 08:26:37 -07:00
|
|
|
i += 3
|
|
|
|
|
} else {
|
|
|
|
|
panicmem()
|
|
|
|
|
}
|
|
|
|
|
addr := note[i:]
|
2022-07-20 13:18:06 -04:00
|
|
|
gp.sigcode1 = uintptr(atolwhex(addr))
|
|
|
|
|
if gp.sigcode1 < 0x1000 {
|
2016-05-06 08:26:37 -07:00
|
|
|
panicmem()
|
|
|
|
|
}
|
2022-07-20 13:18:06 -04:00
|
|
|
if gp.paniconfault {
|
|
|
|
|
panicmemAddr(gp.sigcode1)
|
2020-08-20 14:22:30 -07:00
|
|
|
}
|
runtime: add safe arena support to the runtime
This change adds an API to the runtime for arenas. A later CL can
potentially export it as an experimental API, but for now, just the
runtime implementation will suffice.
The purpose of arenas is to improve efficiency, primarily by allowing
for an application to manually free memory, thereby delaying garbage
collection. It comes with other potential performance benefits, such as
better locality, a better allocation strategy, and better handling of
interior pointers by the GC.
This implementation is based on one by danscales@google.com with a few
significant differences:
* The implementation lives entirely in the runtime (all layers).
* Arena chunks are the minimum of 8 MiB or the heap arena size. This
choice is made because in practice 64 MiB appears to be way too large
of an area for most real-world use-cases.
* Arena chunks are not unmapped, instead they're placed on an evacuation
list and when there are no pointers left pointing into them, they're
allowed to be reused.
* Reusing partially-used arena chunks no longer tries to find one used
by the same P first; it just takes the first one available.
* In order to ensure worst-case fragmentation is never worse than 25%,
only types and slice backing stores whose sizes are 1/4th the size of
a chunk or less may be used. Previously larger sizes, up to the size
of the chunk, were allowed.
* ASAN, MSAN, and the race detector are fully supported.
* Sets arena chunks to fault that were deferred at the end of mark
termination (a non-public patch once did this; I don't see a reason
not to continue that).
For #51317.
Change-Id: I83b1693a17302554cb36b6daa4e9249a81b1644f
Reviewed-on: https://go-review.googlesource.com/c/go/+/423359
Reviewed-by: Cherry Mui <cherryyz@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Run-TryBot: Michael Knyszek <mknyszek@google.com>
2022-08-12 21:40:46 +00:00
|
|
|
if inUserArenaChunk(gp.sigcode1) {
|
|
|
|
|
// We could check that the arena chunk is explicitly set to fault,
|
|
|
|
|
// but the fact that we faulted on accessing it is enough to prove
|
|
|
|
|
// that it is.
|
|
|
|
|
print("accessed data from freed user arena ", hex(gp.sigcode1), "\n")
|
|
|
|
|
} else {
|
|
|
|
|
print("unexpected fault address ", hex(gp.sigcode1), "\n")
|
|
|
|
|
}
|
2016-05-06 08:26:37 -07:00
|
|
|
throw("fault")
|
|
|
|
|
case _SIGTRAP:
|
2022-07-20 13:18:06 -04:00
|
|
|
if gp.paniconfault {
|
2016-05-06 08:26:37 -07:00
|
|
|
panicmem()
|
|
|
|
|
}
|
|
|
|
|
throw(note)
|
|
|
|
|
case _SIGINTDIV:
|
|
|
|
|
panicdivide()
|
|
|
|
|
case _SIGFLOAT:
|
|
|
|
|
panicfloat()
|
|
|
|
|
default:
|
|
|
|
|
panic(errorString(note))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-22 11:21:36 -04:00
|
|
|
// indexNoFloat is bytealg.IndexString but safe to use in a note
|
|
|
|
|
// handler.
|
|
|
|
|
func indexNoFloat(s, t string) int {
|
|
|
|
|
if len(t) == 0 {
|
|
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
for i := 0; i < len(s); i++ {
|
2024-05-15 02:37:41 +08:00
|
|
|
if s[i] == t[0] && stringslite.HasPrefix(s[i:], t) {
|
2020-07-22 11:21:36 -04:00
|
|
|
return i
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return -1
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-06 08:26:37 -07:00
|
|
|
func atolwhex(p string) int64 {
|
2024-05-15 02:37:41 +08:00
|
|
|
for stringslite.HasPrefix(p, " ") || stringslite.HasPrefix(p, "\t") {
|
2016-05-06 08:26:37 -07:00
|
|
|
p = p[1:]
|
|
|
|
|
}
|
|
|
|
|
neg := false
|
2024-05-15 02:37:41 +08:00
|
|
|
if stringslite.HasPrefix(p, "-") || stringslite.HasPrefix(p, "+") {
|
2016-05-06 08:26:37 -07:00
|
|
|
neg = p[0] == '-'
|
|
|
|
|
p = p[1:]
|
2024-05-15 02:37:41 +08:00
|
|
|
for stringslite.HasPrefix(p, " ") || stringslite.HasPrefix(p, "\t") {
|
2016-05-06 08:26:37 -07:00
|
|
|
p = p[1:]
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
var n int64
|
|
|
|
|
switch {
|
2024-05-15 02:37:41 +08:00
|
|
|
case stringslite.HasPrefix(p, "0x"), stringslite.HasPrefix(p, "0X"):
|
2016-05-06 08:26:37 -07:00
|
|
|
p = p[2:]
|
|
|
|
|
for ; len(p) > 0; p = p[1:] {
|
|
|
|
|
if '0' <= p[0] && p[0] <= '9' {
|
|
|
|
|
n = n*16 + int64(p[0]-'0')
|
|
|
|
|
} else if 'a' <= p[0] && p[0] <= 'f' {
|
|
|
|
|
n = n*16 + int64(p[0]-'a'+10)
|
|
|
|
|
} else if 'A' <= p[0] && p[0] <= 'F' {
|
|
|
|
|
n = n*16 + int64(p[0]-'A'+10)
|
|
|
|
|
} else {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
2024-05-15 02:37:41 +08:00
|
|
|
case stringslite.HasPrefix(p, "0"):
|
2016-05-06 08:26:37 -07:00
|
|
|
for ; len(p) > 0 && '0' <= p[0] && p[0] <= '7'; p = p[1:] {
|
|
|
|
|
n = n*8 + int64(p[0]-'0')
|
|
|
|
|
}
|
|
|
|
|
default:
|
|
|
|
|
for ; len(p) > 0 && '0' <= p[0] && p[0] <= '9'; p = p[1:] {
|
|
|
|
|
n = n*10 + int64(p[0]-'0')
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if neg {
|
|
|
|
|
n = -n
|
|
|
|
|
}
|
|
|
|
|
return n
|
|
|
|
|
}
|
|
|
|
|
|
runtime: use a proper type, sigset, for m.sigmask
Replace the cross platform but unsafe [4]uintptr type with a OS
specific type, sigset. Most OSes already define sigset, and this
change defines a suitable sigset for the OSes that don't (darwin,
openbsd). The OSes that don't use m.sigmask (windows, plan9, nacl)
now defines sigset as the empty type, struct{}.
The gain is strongly typed access to m.sigmask, saving a dynamic
size sanity check and unsafe.Pointer casting. Also, some storage is
saved for each M, since [4]uinptr was conservative for most OSes.
The cost is that OSes that don't need m.sigmask has to define sigset.
completes ./all.bash with GOOS linux, on amd64
completes ./make.bash with GOOSes openbsd, android, plan9, windows,
darwin, solaris, netbsd, freebsd, dragonfly, all amd64.
With GOOS=nacl ./make.bash failed with a seemingly unrelated error.
[Replay of CL 16942 by Elias Naur.]
Change-Id: I98f144d626033ae5318576115ed635415ac71b2c
Reviewed-on: https://go-review.googlesource.com/17033
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
2015-11-17 11:41:06 +01:00
|
|
|
type sigset struct{}
|
|
|
|
|
|
2014-11-21 19:39:01 +01:00
|
|
|
// Called to initialize a new m (including the bootstrap m).
|
|
|
|
|
// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
|
|
|
|
|
func mpreinit(mp *m) {
|
|
|
|
|
// Initialize stack and goroutine for note handling.
|
|
|
|
|
mp.gsignal = malg(32 * 1024)
|
|
|
|
|
mp.gsignal.m = mp
|
2016-04-19 19:35:10 -07:00
|
|
|
mp.notesig = (*int8)(mallocgc(_ERRMAX, nil, true))
|
2014-11-21 19:39:01 +01:00
|
|
|
// Initialize stack for handling strings from the
|
|
|
|
|
// errstr system call, as used in package syscall.
|
2016-04-19 19:35:10 -07:00
|
|
|
mp.errstr = (*byte)(mallocgc(_ERRMAX, nil, true))
|
2014-11-21 19:39:01 +01:00
|
|
|
}
|
|
|
|
|
|
2020-10-27 16:09:40 -07:00
|
|
|
func sigsave(p *sigset) {
|
runtime: don't always unblock all signals
Ian proposed an improved way of handling signals masks in Go, motivated
by a problem where the Android java runtime expects certain signals to
be blocked for all JVM threads. Discussion here
https://groups.google.com/forum/#!topic/golang-dev/_TSCkQHJt6g
Ian's text is used in the following:
A Go program always needs to have the synchronous signals enabled.
These are the signals for which _SigPanic is set in sigtable, namely
SIGSEGV, SIGBUS, SIGFPE.
A Go program that uses the os/signal package, and calls signal.Notify,
needs to have at least one thread which is not blocking that signal,
but it doesn't matter much which one.
Unix programs do not change signal mask across execve. They inherit
signal masks across fork. The shell uses this fact to some extent;
for example, the job control signals (SIGTTIN, SIGTTOU, SIGTSTP) are
blocked for commands run due to backquote quoting or $().
Our current position on signal masks was not thought out. We wandered
into step by step, e.g., http://golang.org/cl/7323067 .
This CL does the following:
Introduce a new platform hook, msigsave, that saves the signal mask of
the current thread to m.sigsave.
Call msigsave from needm and newm.
In minit grab set up the signal mask from m.sigsave and unblock the
essential synchronous signals, and SIGILL, SIGTRAP, SIGPROF, SIGSTKFLT
(for systems that have it).
In unminit, restore the signal mask from m.sigsave.
The first time that os/signal.Notify is called, start a new thread whose
only purpose is to update its signal mask to make sure signals for
signal.Notify are unblocked on at least one thread.
The effect on Go programs will be that if they are invoked with some
non-synchronous signals blocked, those signals will normally be
ignored. Previously, those signals would mostly be ignored. A change
in behaviour will occur for programs started with any of these signals
blocked, if they receive the signal: SIGHUP, SIGINT, SIGQUIT, SIGABRT,
SIGTERM. Previously those signals would always cause a crash (unless
using the os/signal package); with this change, they will be ignored
if the program is started with the signal blocked (and does not use
the os/signal package).
./all.bash completes successfully on linux/amd64.
OpenBSD is missing the implementation.
Change-Id: I188098ba7eb85eae4c14861269cc466f2aa40e8c
Reviewed-on: https://go-review.googlesource.com/10173
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2015-05-18 11:00:24 +02:00
|
|
|
}
|
|
|
|
|
|
2016-01-12 15:34:03 -08:00
|
|
|
func msigrestore(sigmask sigset) {
|
2015-11-13 16:21:01 -05:00
|
|
|
}
|
|
|
|
|
|
2017-06-12 22:36:03 -07:00
|
|
|
//go:nosplit
|
|
|
|
|
//go:nowritebarrierrec
|
|
|
|
|
func clearSignalHandlers() {
|
|
|
|
|
}
|
|
|
|
|
|
2020-11-12 21:19:52 -08:00
|
|
|
func sigblock(exiting bool) {
|
2015-11-13 16:21:01 -05:00
|
|
|
}
|
|
|
|
|
|
2014-11-21 19:39:01 +01:00
|
|
|
// Called to initialize a new m (including the bootstrap m).
|
2016-01-27 12:49:13 -08:00
|
|
|
// Called on the new thread, cannot allocate memory.
|
2014-11-21 19:39:01 +01:00
|
|
|
func minit() {
|
2016-03-25 12:50:35 +00:00
|
|
|
if atomic.Load(&exiting) != 0 {
|
|
|
|
|
exits(&emptystatus[0])
|
|
|
|
|
}
|
2014-11-21 19:39:01 +01:00
|
|
|
// Mask all SSE floating-point exceptions
|
|
|
|
|
// when running on the 64-bit kernel.
|
|
|
|
|
setfpmasks()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Called from dropm to undo the effect of an minit.
|
|
|
|
|
func unminit() {
|
|
|
|
|
}
|
|
|
|
|
|
2025-04-03 03:26:25 +00:00
|
|
|
// Called from mexit, but not from dropm, to undo the effect of thread-owned
|
2021-01-15 13:01:37 +01:00
|
|
|
// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
|
2025-04-03 03:26:25 +00:00
|
|
|
//
|
|
|
|
|
// This always runs without a P, so //go:nowritebarrierrec is required.
|
2025-04-04 20:48:18 +01:00
|
|
|
//
|
2025-04-03 03:26:25 +00:00
|
|
|
//go:nowritebarrierrec
|
2021-01-15 13:01:37 +01:00
|
|
|
func mdestroy(mp *m) {
|
|
|
|
|
}
|
|
|
|
|
|
2014-11-21 19:39:01 +01:00
|
|
|
var sysstat = []byte("/dev/sysstat\x00")
|
|
|
|
|
|
2025-05-13 13:12:47 -04:00
|
|
|
func getCPUCount() int32 {
|
2014-11-21 19:39:01 +01:00
|
|
|
var buf [2048]byte
|
2025-03-11 11:23:24 -04:00
|
|
|
fd := open(&sysstat[0], _OREAD|_OCEXEC, 0)
|
2014-11-21 19:39:01 +01:00
|
|
|
if fd < 0 {
|
|
|
|
|
return 1
|
|
|
|
|
}
|
|
|
|
|
ncpu := int32(0)
|
|
|
|
|
for {
|
|
|
|
|
n := read(fd, unsafe.Pointer(&buf), int32(len(buf)))
|
|
|
|
|
if n <= 0 {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
for i := int32(0); i < n; i++ {
|
|
|
|
|
if buf[i] == '\n' {
|
|
|
|
|
ncpu++
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-04-13 19:37:04 -04:00
|
|
|
closefd(fd)
|
2014-11-21 19:39:01 +01:00
|
|
|
if ncpu == 0 {
|
|
|
|
|
ncpu = 1
|
|
|
|
|
}
|
|
|
|
|
return ncpu
|
|
|
|
|
}
|
|
|
|
|
|
2016-07-18 21:40:02 -04:00
|
|
|
var devswap = []byte("/dev/swap\x00")
|
|
|
|
|
var pagesize = []byte(" pagesize\n")
|
|
|
|
|
|
|
|
|
|
func getPageSize() uintptr {
|
|
|
|
|
var buf [2048]byte
|
|
|
|
|
var pos int
|
2025-03-11 11:23:24 -04:00
|
|
|
fd := open(&devswap[0], _OREAD|_OCEXEC, 0)
|
2016-07-18 21:40:02 -04:00
|
|
|
if fd < 0 {
|
|
|
|
|
// There's not much we can do if /dev/swap doesn't
|
|
|
|
|
// exist. However, nothing in the memory manager uses
|
|
|
|
|
// this on Plan 9, so it also doesn't really matter.
|
|
|
|
|
return minPhysPageSize
|
|
|
|
|
}
|
|
|
|
|
for pos < len(buf) {
|
|
|
|
|
n := read(fd, unsafe.Pointer(&buf[pos]), int32(len(buf)-pos))
|
|
|
|
|
if n <= 0 {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
pos += int(n)
|
|
|
|
|
}
|
|
|
|
|
closefd(fd)
|
|
|
|
|
text := buf[:pos]
|
|
|
|
|
// Find "<n> pagesize" line.
|
|
|
|
|
bol := 0
|
|
|
|
|
for i, c := range text {
|
|
|
|
|
if c == '\n' {
|
|
|
|
|
bol = i + 1
|
|
|
|
|
}
|
|
|
|
|
if bytesHasPrefix(text[i:], pagesize) {
|
|
|
|
|
// Parse number at the beginning of this line.
|
|
|
|
|
return uintptr(_atoi(text[bol:]))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// Again, the page size doesn't really matter, so use a fallback.
|
|
|
|
|
return minPhysPageSize
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func bytesHasPrefix(s, prefix []byte) bool {
|
|
|
|
|
if len(s) < len(prefix) {
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
for i, p := range prefix {
|
|
|
|
|
if s[i] != p {
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
|
2014-11-21 19:39:01 +01:00
|
|
|
var pid = []byte("#c/pid\x00")
|
|
|
|
|
|
|
|
|
|
func getpid() uint64 {
|
|
|
|
|
var b [20]byte
|
|
|
|
|
fd := open(&pid[0], 0, 0)
|
|
|
|
|
if fd >= 0 {
|
|
|
|
|
read(fd, unsafe.Pointer(&b), int32(len(b)))
|
2015-04-13 19:37:04 -04:00
|
|
|
closefd(fd)
|
2014-11-21 19:39:01 +01:00
|
|
|
}
|
|
|
|
|
c := b[:]
|
|
|
|
|
for c[0] == ' ' || c[0] == '\t' {
|
|
|
|
|
c = c[1:]
|
|
|
|
|
}
|
2014-12-29 17:35:42 +01:00
|
|
|
return uint64(_atoi(c))
|
2014-11-21 19:39:01 +01:00
|
|
|
}
|
|
|
|
|
|
2025-03-11 11:23:24 -04:00
|
|
|
var (
|
|
|
|
|
bintimeFD int32 = -1
|
|
|
|
|
|
|
|
|
|
bintimeDev = []byte("/dev/bintime\x00")
|
|
|
|
|
randomDev = []byte("/dev/random\x00")
|
|
|
|
|
)
|
|
|
|
|
|
2014-11-21 19:39:01 +01:00
|
|
|
func osinit() {
|
2023-03-20 22:16:21 +00:00
|
|
|
physPageSize = getPageSize()
|
2014-11-21 19:39:01 +01:00
|
|
|
initBloc()
|
2025-05-13 13:12:47 -04:00
|
|
|
numCPUStartup = getCPUCount()
|
2014-11-21 19:39:01 +01:00
|
|
|
getg().m.procid = getpid()
|
2025-03-11 11:23:24 -04:00
|
|
|
|
|
|
|
|
fd := open(&bintimeDev[0], _OREAD|_OCEXEC, 0)
|
|
|
|
|
if fd < 0 {
|
|
|
|
|
fatal("cannot open /dev/bintime")
|
|
|
|
|
}
|
|
|
|
|
bintimeFD = fd
|
|
|
|
|
|
|
|
|
|
// Move fd high up, to avoid conflicts with smaller ones
|
|
|
|
|
// that programs might hard code, and to make exec's job easier.
|
|
|
|
|
// Plan 9 allocates chunks of DELTAFD=20 fds in a row,
|
|
|
|
|
// so 18 is near the top of what's possible.
|
|
|
|
|
if bintimeFD < 18 {
|
|
|
|
|
if dupfd(bintimeFD, 18) < 0 {
|
|
|
|
|
fatal("cannot dup /dev/bintime onto 18")
|
|
|
|
|
}
|
|
|
|
|
closefd(bintimeFD)
|
|
|
|
|
bintimeFD = 18
|
|
|
|
|
}
|
2014-11-21 19:39:01 +01:00
|
|
|
}
|
|
|
|
|
|
runtime: perform crashes outside systemstack
CL 93658 moved stack trace printing inside a systemstack call to
sidestep complexity in case the runtime is in a inconsistent state.
Unfortunately, debuggers generating backtraces for a Go panic
will be confused and come up with a technical correct but useless
stack. This CL moves just the crash performing - typically a SIGABRT
signal - outside the systemstack call to improve backtraces.
Unfortunately, the crash function now needs to be marked nosplit and
that triggers the no split stackoverflow check. To work around that,
split fatalpanic in two: fatalthrow for runtime.throw and fatalpanic for
runtime.gopanic. Only Go panics really needs crashes on the right stack
and there is enough stack for gopanic.
Example program:
package main
import "runtime/debug"
func main() {
debug.SetTraceback("crash")
crash()
}
func crash() {
panic("panic!")
}
Before:
(lldb) bt
* thread #1, name = 'simple', stop reason = signal SIGABRT
* frame #0: 0x000000000044ffe4 simple`runtime.raise at <autogenerated>:1
frame #1: 0x0000000000438cfb simple`runtime.dieFromSignal(sig=<unavailable>) at signal_unix.go:424
frame #2: 0x0000000000438ec9 simple`runtime.crash at signal_unix.go:525
frame #3: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758
frame #4: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657
frame #5: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1
frame #6: 0x000000000042a980 simple at proc.go:1094
frame #7: 0x0000000000438ec9 simple`runtime.crash at signal_unix.go:525
frame #8: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758
frame #9: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657
frame #10: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1
frame #11: 0x000000000042a980 simple at proc.go:1094
frame #12: 0x00000000004268f5 simple`runtime.dopanic_m(gp=<unavailable>, pc=<unavailable>, sp=<unavailable>) at panic.go:758
frame #13: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657
frame #14: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1
frame #15: 0x000000000042a980 simple at proc.go:1094
frame #16: 0x000000000044bead simple`runtime.fatalpanic.func1 at panic.go:657
frame #17: 0x000000000044d066 simple`runtime.systemstack at <autogenerated>:1
After:
(lldb) bt
* thread #7, stop reason = signal SIGABRT
* frame #0: 0x0000000000450024 simple`runtime.raise at <autogenerated>:1
frame #1: 0x0000000000438d1b simple`runtime.dieFromSignal(sig=<unavailable>) at signal_unix.go:424
frame #2: 0x0000000000438ee9 simple`runtime.crash at signal_unix.go:525
frame #3: 0x00000000004264e3 simple`runtime.fatalpanic(msgs=<unavailable>) at panic.go:664
frame #4: 0x0000000000425f1b simple`runtime.gopanic(e=<unavailable>) at panic.go:537
frame #5: 0x0000000000470c62 simple`main.crash at simple.go:11
frame #6: 0x0000000000470c00 simple`main.main at simple.go:6
frame #7: 0x0000000000427be7 simple`runtime.main at proc.go:198
frame #8: 0x000000000044ef91 simple`runtime.goexit at <autogenerated>:1
Updates #22716
Change-Id: Ib5fa35c13662c1dac2f1eac8b59c4a5824b98d92
Reviewed-on: https://go-review.googlesource.com/110065
Run-TryBot: Elias Naur <elias.naur@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
2018-04-29 16:29:43 +02:00
|
|
|
//go:nosplit
|
2014-11-21 19:39:01 +01:00
|
|
|
func crash() {
|
|
|
|
|
notify(nil)
|
|
|
|
|
*(*int)(nil) = 0
|
|
|
|
|
}
|
|
|
|
|
|
2025-04-05 20:35:12 +02:00
|
|
|
// Don't read from /dev/random, since this device can only
|
|
|
|
|
// return a few hundred bits a second and would slow creation
|
|
|
|
|
// of Go processes down significantly.
|
|
|
|
|
//
|
2014-11-21 19:39:01 +01:00
|
|
|
//go:nosplit
|
math/rand, math/rand/v2: use ChaCha8 for global rand
Move ChaCha8 code into internal/chacha8rand and use it to implement
runtime.rand, which is used for the unseeded global source for
both math/rand and math/rand/v2. This also affects the calculation of
the start point for iteration over very very large maps (when the
32-bit fastrand is not big enough).
The benefit is that misuse of the global random number generators
in math/rand and math/rand/v2 in contexts where non-predictable
randomness is important for security reasons is no longer a
security problem, removing a common mistake among programmers
who are unaware of the different kinds of randomness.
The cost is an extra 304 bytes per thread stored in the m struct
plus 2-3ns more per random uint64 due to the more sophisticated
algorithm. Using PCG looks like it would cost about the same,
although I haven't benchmarked that.
Before this, the math/rand and math/rand/v2 global generator
was wyrand (https://github.com/wangyi-fudan/wyhash).
For math/rand, using wyrand instead of the Mitchell/Reeds/Thompson
ALFG was justifiable, since the latter was not any better.
But for math/rand/v2, the global generator really should be
at least as good as one of the well-studied, specific algorithms
provided directly by the package, and it's not.
(Wyrand is still reasonable for scheduling and cache decisions.)
Good randomness does have a cost: about twice wyrand.
Also rationalize the various runtime rand references.
goos: linux
goarch: amd64
pkg: math/rand/v2
cpu: AMD Ryzen 9 7950X 16-Core Processor
│ bbb48afeb7.amd64 │ 5cf807d1ea.amd64 │
│ sec/op │ sec/op vs base │
ChaCha8-32 1.862n ± 2% 1.861n ± 2% ~ (p=0.825 n=20)
PCG_DXSM-32 1.471n ± 1% 1.460n ± 2% ~ (p=0.153 n=20)
SourceUint64-32 1.636n ± 2% 1.582n ± 1% -3.30% (p=0.000 n=20)
GlobalInt64-32 2.087n ± 1% 3.663n ± 1% +75.54% (p=0.000 n=20)
GlobalInt64Parallel-32 0.1042n ± 1% 0.2026n ± 1% +94.48% (p=0.000 n=20)
GlobalUint64-32 2.263n ± 2% 3.724n ± 1% +64.57% (p=0.000 n=20)
GlobalUint64Parallel-32 0.1019n ± 1% 0.1973n ± 1% +93.67% (p=0.000 n=20)
Int64-32 1.771n ± 1% 1.774n ± 1% ~ (p=0.449 n=20)
Uint64-32 1.863n ± 2% 1.866n ± 1% ~ (p=0.364 n=20)
GlobalIntN1000-32 3.134n ± 3% 4.730n ± 2% +50.95% (p=0.000 n=20)
IntN1000-32 2.489n ± 1% 2.489n ± 1% ~ (p=0.683 n=20)
Int64N1000-32 2.521n ± 1% 2.516n ± 1% ~ (p=0.394 n=20)
Int64N1e8-32 2.479n ± 1% 2.478n ± 2% ~ (p=0.743 n=20)
Int64N1e9-32 2.530n ± 2% 2.514n ± 2% ~ (p=0.193 n=20)
Int64N2e9-32 2.501n ± 1% 2.494n ± 1% ~ (p=0.616 n=20)
Int64N1e18-32 3.227n ± 1% 3.205n ± 1% ~ (p=0.101 n=20)
Int64N2e18-32 3.647n ± 1% 3.599n ± 1% ~ (p=0.019 n=20)
Int64N4e18-32 5.135n ± 1% 5.069n ± 2% ~ (p=0.034 n=20)
Int32N1000-32 2.657n ± 1% 2.637n ± 1% ~ (p=0.180 n=20)
Int32N1e8-32 2.636n ± 1% 2.636n ± 1% ~ (p=0.763 n=20)
Int32N1e9-32 2.660n ± 2% 2.638n ± 1% ~ (p=0.358 n=20)
Int32N2e9-32 2.662n ± 2% 2.618n ± 2% ~ (p=0.064 n=20)
Float32-32 2.272n ± 2% 2.239n ± 2% ~ (p=0.194 n=20)
Float64-32 2.272n ± 1% 2.286n ± 2% ~ (p=0.763 n=20)
ExpFloat64-32 3.762n ± 1% 3.744n ± 1% ~ (p=0.171 n=20)
NormFloat64-32 3.706n ± 1% 3.655n ± 2% ~ (p=0.066 n=20)
Perm3-32 32.93n ± 3% 34.62n ± 1% +5.13% (p=0.000 n=20)
Perm30-32 202.9n ± 1% 204.0n ± 1% ~ (p=0.482 n=20)
Perm30ViaShuffle-32 115.0n ± 1% 114.9n ± 1% ~ (p=0.358 n=20)
ShuffleOverhead-32 112.8n ± 1% 112.7n ± 1% ~ (p=0.692 n=20)
Concurrent-32 2.107n ± 0% 3.725n ± 1% +76.75% (p=0.000 n=20)
goos: darwin
goarch: arm64
pkg: math/rand/v2
│ bbb48afeb7.arm64 │ 5cf807d1ea.arm64 │
│ sec/op │ sec/op vs base │
ChaCha8-8 2.480n ± 0% 2.429n ± 0% -2.04% (p=0.000 n=20)
PCG_DXSM-8 2.531n ± 0% 2.530n ± 0% ~ (p=0.877 n=20)
SourceUint64-8 2.534n ± 0% 2.533n ± 0% ~ (p=0.732 n=20)
GlobalInt64-8 2.172n ± 1% 4.794n ± 0% +120.67% (p=0.000 n=20)
GlobalInt64Parallel-8 0.4320n ± 0% 0.9605n ± 0% +122.32% (p=0.000 n=20)
GlobalUint64-8 2.182n ± 0% 4.770n ± 0% +118.58% (p=0.000 n=20)
GlobalUint64Parallel-8 0.4307n ± 0% 0.9583n ± 0% +122.51% (p=0.000 n=20)
Int64-8 4.107n ± 0% 4.104n ± 0% ~ (p=0.416 n=20)
Uint64-8 4.080n ± 0% 4.080n ± 0% ~ (p=0.052 n=20)
GlobalIntN1000-8 2.814n ± 2% 5.643n ± 0% +100.50% (p=0.000 n=20)
IntN1000-8 4.141n ± 0% 4.139n ± 0% ~ (p=0.140 n=20)
Int64N1000-8 4.140n ± 0% 4.140n ± 0% ~ (p=0.313 n=20)
Int64N1e8-8 4.140n ± 0% 4.139n ± 0% ~ (p=0.103 n=20)
Int64N1e9-8 4.139n ± 0% 4.140n ± 0% ~ (p=0.761 n=20)
Int64N2e9-8 4.140n ± 0% 4.140n ± 0% ~ (p=0.636 n=20)
Int64N1e18-8 5.266n ± 0% 5.326n ± 1% +1.14% (p=0.001 n=20)
Int64N2e18-8 6.052n ± 0% 6.167n ± 0% +1.90% (p=0.000 n=20)
Int64N4e18-8 8.826n ± 0% 9.051n ± 0% +2.55% (p=0.000 n=20)
Int32N1000-8 4.127n ± 0% 4.132n ± 0% +0.12% (p=0.000 n=20)
Int32N1e8-8 4.126n ± 0% 4.131n ± 0% +0.12% (p=0.000 n=20)
Int32N1e9-8 4.127n ± 0% 4.132n ± 0% +0.12% (p=0.000 n=20)
Int32N2e9-8 4.132n ± 0% 4.131n ± 0% ~ (p=0.017 n=20)
Float32-8 4.109n ± 0% 4.105n ± 0% ~ (p=0.379 n=20)
Float64-8 4.107n ± 0% 4.106n ± 0% ~ (p=0.867 n=20)
ExpFloat64-8 5.339n ± 0% 5.383n ± 0% +0.82% (p=0.000 n=20)
NormFloat64-8 5.735n ± 0% 5.737n ± 1% ~ (p=0.856 n=20)
Perm3-8 26.65n ± 0% 26.80n ± 1% +0.58% (p=0.000 n=20)
Perm30-8 194.8n ± 1% 197.0n ± 0% +1.18% (p=0.000 n=20)
Perm30ViaShuffle-8 156.6n ± 0% 157.6n ± 1% +0.61% (p=0.000 n=20)
ShuffleOverhead-8 124.9n ± 0% 125.5n ± 0% +0.52% (p=0.000 n=20)
Concurrent-8 2.434n ± 3% 5.066n ± 0% +108.09% (p=0.000 n=20)
goos: linux
goarch: 386
pkg: math/rand/v2
cpu: AMD Ryzen 9 7950X 16-Core Processor
│ bbb48afeb7.386 │ 5cf807d1ea.386 │
│ sec/op │ sec/op vs base │
ChaCha8-32 11.295n ± 1% 4.748n ± 2% -57.96% (p=0.000 n=20)
PCG_DXSM-32 7.693n ± 1% 7.738n ± 2% ~ (p=0.542 n=20)
SourceUint64-32 7.658n ± 2% 7.622n ± 2% ~ (p=0.344 n=20)
GlobalInt64-32 3.473n ± 2% 7.526n ± 2% +116.73% (p=0.000 n=20)
GlobalInt64Parallel-32 0.3198n ± 0% 0.5444n ± 0% +70.22% (p=0.000 n=20)
GlobalUint64-32 3.612n ± 0% 7.575n ± 1% +109.69% (p=0.000 n=20)
GlobalUint64Parallel-32 0.3168n ± 0% 0.5403n ± 0% +70.51% (p=0.000 n=20)
Int64-32 7.673n ± 2% 7.789n ± 1% ~ (p=0.122 n=20)
Uint64-32 7.773n ± 1% 7.827n ± 2% ~ (p=0.920 n=20)
GlobalIntN1000-32 6.268n ± 1% 9.581n ± 1% +52.87% (p=0.000 n=20)
IntN1000-32 10.33n ± 2% 10.45n ± 1% ~ (p=0.233 n=20)
Int64N1000-32 10.98n ± 2% 11.01n ± 1% ~ (p=0.401 n=20)
Int64N1e8-32 11.19n ± 2% 10.97n ± 1% ~ (p=0.033 n=20)
Int64N1e9-32 11.06n ± 1% 11.08n ± 1% ~ (p=0.498 n=20)
Int64N2e9-32 11.10n ± 1% 11.01n ± 2% ~ (p=0.995 n=20)
Int64N1e18-32 15.23n ± 2% 15.04n ± 1% ~ (p=0.973 n=20)
Int64N2e18-32 15.89n ± 1% 15.85n ± 1% ~ (p=0.409 n=20)
Int64N4e18-32 18.96n ± 2% 19.34n ± 2% ~ (p=0.048 n=20)
Int32N1000-32 10.46n ± 2% 10.44n ± 2% ~ (p=0.480 n=20)
Int32N1e8-32 10.46n ± 2% 10.49n ± 2% ~ (p=0.951 n=20)
Int32N1e9-32 10.28n ± 2% 10.26n ± 1% ~ (p=0.431 n=20)
Int32N2e9-32 10.50n ± 2% 10.44n ± 2% ~ (p=0.249 n=20)
Float32-32 13.80n ± 2% 13.80n ± 2% ~ (p=0.751 n=20)
Float64-32 23.55n ± 2% 23.87n ± 0% ~ (p=0.408 n=20)
ExpFloat64-32 15.36n ± 1% 15.29n ± 2% ~ (p=0.316 n=20)
NormFloat64-32 13.57n ± 1% 13.79n ± 1% +1.66% (p=0.005 n=20)
Perm3-32 45.70n ± 2% 46.99n ± 2% +2.81% (p=0.001 n=20)
Perm30-32 399.0n ± 1% 403.8n ± 1% +1.19% (p=0.006 n=20)
Perm30ViaShuffle-32 349.0n ± 1% 350.4n ± 1% ~ (p=0.909 n=20)
ShuffleOverhead-32 322.3n ± 1% 323.8n ± 1% ~ (p=0.410 n=20)
Concurrent-32 3.331n ± 1% 7.312n ± 1% +119.50% (p=0.000 n=20)
For #61716.
Change-Id: Ibdddeed85c34d9ae397289dc899e04d4845f9ed2
Reviewed-on: https://go-review.googlesource.com/c/go/+/516860
Reviewed-by: Michael Pratt <mpratt@google.com>
Reviewed-by: Filippo Valsorda <filippo@golang.org>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2023-08-06 13:26:28 +10:00
|
|
|
func readRandom(r []byte) int {
|
2025-04-05 20:35:12 +02:00
|
|
|
return 0
|
2014-11-21 19:39:01 +01:00
|
|
|
}
|
|
|
|
|
|
2015-12-26 09:51:59 -08:00
|
|
|
func initsig(preinit bool) {
|
2020-05-18 09:34:17 +01:00
|
|
|
if !preinit {
|
2021-05-19 17:33:32 -04:00
|
|
|
notify(unsafe.Pointer(abi.FuncPCABI0(sigtramp)))
|
2020-05-18 09:34:17 +01:00
|
|
|
}
|
2014-11-21 19:39:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
func osyield() {
|
|
|
|
|
sleep(0)
|
|
|
|
|
}
|
|
|
|
|
|
runtime: clean up system calls during cgo callback init
During a cgocallback, the runtime calls needm to get an m.
The calls made during needm cannot themselves assume that
there is an m or a g (which is attached to the m).
In the old days of making direct system calls, the only thing
you had to do for such functions was mark them //go:nosplit,
to avoid the use of g in the stack split prologue.
But now, on operating systems that make system calls through
shared libraries and use code that saves state in the g or m
before doing so, it's not safe to assume g exists. In fact, it is
not even safe to call getg(), because it might fault deferencing
the TLS storage to find the g pointer (that storage may not be
initialized yet, at least on Windows, and perhaps on other systems
in the future).
The specific routines that are problematic are usleep and osyield,
which are called during lock contention in lockextra, called
from needm.
All this is rather subtle and hidden, so in addition to fixing the
problem on Windows, this CL makes the fact of not running on
a g much clearer by introducing variants usleep_no_g and
osyield_no_g whose names should make clear that there is no g.
And then we can remove the various sketchy getg() == nil checks
in the existing routines.
As part of this cleanup, this CL also deletes onosstack on Windows.
onosstack is from back when the runtime was implemented in C.
It predates systemstack but does essentially the same thing.
Instead of having two different copies of this code, we can use
systemstack consistently. This way we need not port onosstack
to each architecture.
This CL is part of a stack adding windows/arm64
support (#36439), intended to land in the Go 1.17 cycle.
This CL is, however, not windows/arm64-specific.
It is cleanup meant to make the port (and future ports) easier.
Change-Id: I3352de1fd0a3c26267c6e209063e6e86abd26187
Reviewed-on: https://go-review.googlesource.com/c/go/+/288793
Trust: Russ Cox <rsc@golang.org>
Trust: Jason A. Donenfeld <Jason@zx2c4.com>
Reviewed-by: Cherry Zhang <cherryyz@google.com>
Reviewed-by: Jason A. Donenfeld <Jason@zx2c4.com>
2021-01-30 07:07:42 -05:00
|
|
|
//go:nosplit
|
|
|
|
|
func osyield_no_g() {
|
|
|
|
|
osyield()
|
|
|
|
|
}
|
|
|
|
|
|
2014-11-21 19:39:01 +01:00
|
|
|
//go:nosplit
|
|
|
|
|
func usleep(µs uint32) {
|
|
|
|
|
ms := int32(µs / 1000)
|
|
|
|
|
if ms == 0 {
|
|
|
|
|
ms = 1
|
|
|
|
|
}
|
|
|
|
|
sleep(ms)
|
|
|
|
|
}
|
|
|
|
|
|
runtime: clean up system calls during cgo callback init
During a cgocallback, the runtime calls needm to get an m.
The calls made during needm cannot themselves assume that
there is an m or a g (which is attached to the m).
In the old days of making direct system calls, the only thing
you had to do for such functions was mark them //go:nosplit,
to avoid the use of g in the stack split prologue.
But now, on operating systems that make system calls through
shared libraries and use code that saves state in the g or m
before doing so, it's not safe to assume g exists. In fact, it is
not even safe to call getg(), because it might fault deferencing
the TLS storage to find the g pointer (that storage may not be
initialized yet, at least on Windows, and perhaps on other systems
in the future).
The specific routines that are problematic are usleep and osyield,
which are called during lock contention in lockextra, called
from needm.
All this is rather subtle and hidden, so in addition to fixing the
problem on Windows, this CL makes the fact of not running on
a g much clearer by introducing variants usleep_no_g and
osyield_no_g whose names should make clear that there is no g.
And then we can remove the various sketchy getg() == nil checks
in the existing routines.
As part of this cleanup, this CL also deletes onosstack on Windows.
onosstack is from back when the runtime was implemented in C.
It predates systemstack but does essentially the same thing.
Instead of having two different copies of this code, we can use
systemstack consistently. This way we need not port onosstack
to each architecture.
This CL is part of a stack adding windows/arm64
support (#36439), intended to land in the Go 1.17 cycle.
This CL is, however, not windows/arm64-specific.
It is cleanup meant to make the port (and future ports) easier.
Change-Id: I3352de1fd0a3c26267c6e209063e6e86abd26187
Reviewed-on: https://go-review.googlesource.com/c/go/+/288793
Trust: Russ Cox <rsc@golang.org>
Trust: Jason A. Donenfeld <Jason@zx2c4.com>
Reviewed-by: Cherry Zhang <cherryyz@google.com>
Reviewed-by: Jason A. Donenfeld <Jason@zx2c4.com>
2021-01-30 07:07:42 -05:00
|
|
|
//go:nosplit
|
|
|
|
|
func usleep_no_g(usec uint32) {
|
|
|
|
|
usleep(usec)
|
|
|
|
|
}
|
|
|
|
|
|
2014-11-21 19:39:01 +01:00
|
|
|
var goexits = []byte("go: exit ")
|
2016-03-25 12:50:35 +00:00
|
|
|
var emptystatus = []byte("\x00")
|
|
|
|
|
var exiting uint32
|
2014-11-21 19:39:01 +01:00
|
|
|
|
|
|
|
|
func goexitsall(status *byte) {
|
|
|
|
|
var buf [_ERRMAX]byte
|
2016-03-25 12:50:35 +00:00
|
|
|
if !atomic.Cas(&exiting, 0, 1) {
|
|
|
|
|
return
|
|
|
|
|
}
|
2016-03-09 16:16:05 +00:00
|
|
|
getg().m.locks++
|
2014-11-21 19:39:01 +01:00
|
|
|
n := copy(buf[:], goexits)
|
|
|
|
|
n = copy(buf[n:], gostringnocopy(status))
|
|
|
|
|
pid := getpid()
|
2015-11-02 14:09:24 -05:00
|
|
|
for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink {
|
2016-03-25 12:50:35 +00:00
|
|
|
if mp.procid != 0 && mp.procid != pid {
|
2014-11-21 19:39:01 +01:00
|
|
|
postnote(mp.procid, buf[:])
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-03-09 16:16:05 +00:00
|
|
|
getg().m.locks--
|
2014-11-21 19:39:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var procdir = []byte("/proc/")
|
|
|
|
|
var notefile = []byte("/note\x00")
|
|
|
|
|
|
|
|
|
|
func postnote(pid uint64, msg []byte) int {
|
|
|
|
|
var buf [128]byte
|
|
|
|
|
var tmp [32]byte
|
|
|
|
|
n := copy(buf[:], procdir)
|
|
|
|
|
n += copy(buf[n:], itoa(tmp[:], pid))
|
|
|
|
|
copy(buf[n:], notefile)
|
|
|
|
|
fd := open(&buf[0], _OWRITE, 0)
|
|
|
|
|
if fd < 0 {
|
|
|
|
|
return -1
|
|
|
|
|
}
|
|
|
|
|
len := findnull(&msg[0])
|
2019-09-01 10:37:44 -04:00
|
|
|
if write1(uintptr(fd), unsafe.Pointer(&msg[0]), int32(len)) != int32(len) {
|
2015-04-13 19:37:04 -04:00
|
|
|
closefd(fd)
|
2014-11-21 19:39:01 +01:00
|
|
|
return -1
|
|
|
|
|
}
|
2015-04-13 19:37:04 -04:00
|
|
|
closefd(fd)
|
2014-11-21 19:39:01 +01:00
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
2017-09-26 12:35:54 +10:00
|
|
|
func exit(e int32) {
|
2014-11-21 19:39:01 +01:00
|
|
|
var status []byte
|
|
|
|
|
if e == 0 {
|
2016-03-25 12:50:35 +00:00
|
|
|
status = emptystatus
|
2014-11-21 19:39:01 +01:00
|
|
|
} else {
|
|
|
|
|
// build error string
|
|
|
|
|
var tmp [32]byte
|
2022-05-13 13:16:58 -04:00
|
|
|
sl := itoa(tmp[:len(tmp)-1], uint64(e))
|
|
|
|
|
// Don't append, rely on the existing data being zero.
|
runtime: pass correct string to exits on Plan 9
In CL 405901 the definition of exit in the Plan 9 go runtime
was changed like so:
- status = append(itoa(tmp[:len(tmp)-1], uint64(e)), 0)
+ sl := itoa(tmp[:len(tmp)-1], uint64(e))
+ // Don't append, rely on the existing data being zero.
+ status = tmp[:len(sl)+1]
However, itoa only puts the converted number "somewhere" in the buffer.
Specifically, it builds it from the end of the buffer towards the start,
meaning the first byte of the buffer is a 0 byte, and the resulting string
that's passed to exits is empty, leading to a falsely successful exit.
This change uses the returned value from itoa, rather than the buffer
that was passed in, so that we start from the correct location in the
string.
Fixes #53669
Change-Id: I63f0c7641fc6f55250857dc17a1eeb12ae0c2e10
Reviewed-on: https://go-review.googlesource.com/c/go/+/415680
Reviewed-by: David Chase <drchase@google.com>
Reviewed-by: Ian Lance Taylor <iant@google.com>
2022-07-03 12:26:30 -04:00
|
|
|
status = sl[:len(sl)+1]
|
2014-11-21 19:39:01 +01:00
|
|
|
}
|
|
|
|
|
goexitsall(&status[0])
|
|
|
|
|
exits(&status[0])
|
|
|
|
|
}
|
|
|
|
|
|
2015-03-29 10:20:54 -04:00
|
|
|
// May run with m.p==nil, so write barriers are not allowed.
|
2022-01-30 20:13:43 -05:00
|
|
|
//
|
runtime: disallow write barriers in handoffp and callees
handoffp by definition runs without a P, so it's not allowed to have
write barriers. It doesn't have any right now, but mark it
nowritebarrier to disallow any creeping in in the future. handoffp in
turns calls startm, newm, and newosproc, all of which are "below Go"
and make sense to run without a P, so disallow write barriers in these
as well.
For most functions, we've done this because they may race with
stoptheworld() and hence must not have write barriers. For these
functions, it's a little different: the world can't stop while we're
in handoffp, so this race isn't present. But we implement this
restriction with a somewhat broader rule that you can't have a write
barrier without a P. We like this rule because it's simple and means
that our write barriers can depend on there being a P, even though
this rule is actually a little broader than necessary. Hence, even
though there's no danger of the race in these functions, we want to
adhere to the broader rule.
Change-Id: Ie22319c30eea37d703eb52f5c7ca5da872030b88
Reviewed-on: https://go-review.googlesource.com/8130
Run-TryBot: Austin Clements <austin@google.com>
Reviewed-by: Minux Ma <minux@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-03-26 15:50:22 -04:00
|
|
|
//go:nowritebarrier
|
2018-04-23 07:30:32 -07:00
|
|
|
func newosproc(mp *m) {
|
2014-11-21 19:39:01 +01:00
|
|
|
if false {
|
|
|
|
|
print("newosproc mp=", mp, " ostk=", &mp, "\n")
|
|
|
|
|
}
|
|
|
|
|
pid := rfork(_RFPROC | _RFMEM | _RFNOWAIT)
|
|
|
|
|
if pid < 0 {
|
2014-12-27 20:58:00 -08:00
|
|
|
throw("newosproc: rfork failed")
|
2014-11-21 19:39:01 +01:00
|
|
|
}
|
|
|
|
|
if pid == 0 {
|
|
|
|
|
tstart_plan9(mp)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-10-18 12:01:18 -04:00
|
|
|
func exitThread(wait *atomic.Uint32) {
|
runtime: make it possible to exit Go-created threads
Currently, threads created by the runtime exist until the whole
program exits. For #14592 and #20395, we want to be able to exit and
clean up threads created by the runtime. This commit implements that
mechanism.
The main difficulty is how to clean up the g0 stack. In cgo mode and
on Solaris and Windows where the OS manages thread stacks, we simply
arrange to return from mstart and let the system clean up the thread.
If the runtime allocated the g0 stack, then we use a new exitThread
syscall wrapper that arranges to clear a flag in the M once the stack
can safely be reaped and call the thread termination syscall.
exitThread is based on the existing exit1 wrapper, which was always
meant to terminate the calling thread. However, exit1 has never been
used since it was introduced 9 years ago, so it was broken on several
platforms. exitThread also has the additional complication of having
to flag that the stack is unused, which requires some tricks on
platforms that use the stack for syscalls.
This still leaves the problem of how to reap the unused g0 stacks. For
this, we move the M from allm to a new freem list as part of the M
exiting. Later, allocm scans the freem list, finds Ms that are marked
as done with their stack, removes these from the list and frees their
g0 stacks. This also allows these Ms to be garbage collected.
This CL does not yet use any of this functionality. Follow-up CLs
will. Likewise, there are no new tests in this CL because we'll need
follow-up functionality to test it.
Change-Id: Ic851ee74227b6d39c6fc1219fc71b45d3004bc63
Reviewed-on: https://go-review.googlesource.com/46037
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2017-06-16 15:54:21 -04:00
|
|
|
// We should never reach exitThread on Plan 9 because we let
|
|
|
|
|
// the OS clean up threads.
|
|
|
|
|
throw("exitThread")
|
|
|
|
|
}
|
|
|
|
|
|
2014-11-21 19:39:01 +01:00
|
|
|
//go:nosplit
|
2015-10-21 18:36:05 -07:00
|
|
|
func semacreate(mp *m) {
|
2014-11-21 19:39:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
func semasleep(ns int64) int {
|
2022-07-20 11:43:30 -04:00
|
|
|
gp := getg()
|
2014-11-21 19:39:01 +01:00
|
|
|
if ns >= 0 {
|
2025-10-29 13:37:52 -04:00
|
|
|
ms := int32(ns / 1000000)
|
2014-11-21 19:39:01 +01:00
|
|
|
if ms == 0 {
|
|
|
|
|
ms = 1
|
|
|
|
|
}
|
2022-07-20 11:43:30 -04:00
|
|
|
ret := plan9_tsemacquire(&gp.m.waitsemacount, ms)
|
2014-11-21 19:39:01 +01:00
|
|
|
if ret == 1 {
|
|
|
|
|
return 0 // success
|
|
|
|
|
}
|
|
|
|
|
return -1 // timeout or interrupted
|
|
|
|
|
}
|
2022-07-20 11:43:30 -04:00
|
|
|
for plan9_semacquire(&gp.m.waitsemacount, 1) < 0 {
|
2014-11-21 19:39:01 +01:00
|
|
|
// interrupted; try again (c.f. lock_sema.go)
|
|
|
|
|
}
|
|
|
|
|
return 0 // success
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
func semawakeup(mp *m) {
|
|
|
|
|
plan9_semrelease(&mp.waitsemacount, 1)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
func read(fd int32, buf unsafe.Pointer, n int32) int32 {
|
|
|
|
|
return pread(fd, buf, n, -1)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
2019-09-01 10:37:44 -04:00
|
|
|
func write1(fd uintptr, buf unsafe.Pointer, n int32) int32 {
|
|
|
|
|
return pwrite(int32(fd), buf, n, -1)
|
2014-11-21 19:39:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var _badsignal = []byte("runtime: signal received on thread not created by Go.\n")
|
|
|
|
|
|
2016-03-01 23:21:55 +00:00
|
|
|
// This runs on a foreign stack, without an m or a g. No stack split.
|
2022-01-30 20:13:43 -05:00
|
|
|
//
|
2014-11-21 19:39:01 +01:00
|
|
|
//go:nosplit
|
|
|
|
|
func badsignal2() {
|
|
|
|
|
pwrite(2, unsafe.Pointer(&_badsignal[0]), int32(len(_badsignal)), -1)
|
|
|
|
|
exits(&_badsignal[0])
|
|
|
|
|
}
|
|
|
|
|
|
runtime: minor simplifications to signal code
Change setsig, setsigstack, getsig, raise, raiseproc to take uint32 for
signal number parameter, as that is the type mostly used for signal
numbers. Same for dieFromSignal, sigInstallGoHandler, raisebadsignal.
Remove setsig restart parameter, as it is always either true or
irrelevant.
Don't check the handler in setsigstack, as the only caller does that
anyhow.
Don't bother to convert the handler from sigtramp to sighandler in
getsig, as it will never be called when the handler is sigtramp or
sighandler.
Don't check the return value from rt_sigaction in the GNU/Linux version
of setsigstack; no other setsigstack checks it, and it never fails.
Change-Id: I6bbd677e048a77eddf974dd3d017bc3c560fbd48
Reviewed-on: https://go-review.googlesource.com/29953
Run-TryBot: Ian Lance Taylor <iant@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2016-09-27 22:24:51 -07:00
|
|
|
func raisebadsignal(sig uint32) {
|
2015-07-21 22:34:48 -07:00
|
|
|
badsignal2()
|
|
|
|
|
}
|
|
|
|
|
|
2014-12-29 17:35:42 +01:00
|
|
|
func _atoi(b []byte) int {
|
2014-11-21 19:39:01 +01:00
|
|
|
n := 0
|
|
|
|
|
for len(b) > 0 && '0' <= b[0] && b[0] <= '9' {
|
|
|
|
|
n = n*10 + int(b[0]) - '0'
|
|
|
|
|
b = b[1:]
|
|
|
|
|
}
|
|
|
|
|
return n
|
|
|
|
|
}
|
2016-05-04 01:42:13 -06:00
|
|
|
|
|
|
|
|
func signame(sig uint32) string {
|
|
|
|
|
if sig >= uint32(len(sigtable)) {
|
|
|
|
|
return ""
|
|
|
|
|
}
|
|
|
|
|
return sigtable[sig].name
|
|
|
|
|
}
|
2019-10-08 13:23:51 -04:00
|
|
|
|
|
|
|
|
const preemptMSupported = false
|
|
|
|
|
|
|
|
|
|
func preemptM(mp *m) {
|
|
|
|
|
// Not currently supported.
|
|
|
|
|
//
|
|
|
|
|
// TODO: Use a note like we use signals on POSIX OSes
|
|
|
|
|
}
|
2025-03-11 11:23:24 -04:00
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
func readtime(t *uint64, min, n int) int {
|
|
|
|
|
if bintimeFD < 0 {
|
|
|
|
|
fatal("/dev/bintime not opened")
|
|
|
|
|
}
|
|
|
|
|
const uint64size = 8
|
|
|
|
|
r := pread(bintimeFD, unsafe.Pointer(t), int32(n*uint64size), 0)
|
|
|
|
|
if int(r) < min*uint64size {
|
|
|
|
|
fatal("cannot read /dev/bintime")
|
|
|
|
|
}
|
|
|
|
|
return int(r) / uint64size
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// timesplit returns u/1e9, u%1e9
|
|
|
|
|
func timesplit(u uint64) (sec int64, nsec int32)
|
|
|
|
|
|
|
|
|
|
func frombe(u uint64) uint64 {
|
|
|
|
|
b := (*[8]byte)(unsafe.Pointer(&u))
|
2025-04-08 10:00:13 +00:00
|
|
|
return byteorder.BEUint64(b[:])
|
2025-03-11 11:23:24 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
func nanotime1() int64 {
|
|
|
|
|
var t [4]uint64
|
|
|
|
|
if readtime(&t[0], 1, 4) == 4 {
|
|
|
|
|
// long read indicates new kernel sending monotonic time
|
|
|
|
|
// (https://github.com/rsc/plan9/commit/baf076425).
|
|
|
|
|
return int64(frombe(t[3]))
|
|
|
|
|
}
|
|
|
|
|
// fall back to unix time
|
|
|
|
|
return int64(frombe(t[0]))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
func walltime() (sec int64, nsec int32) {
|
|
|
|
|
var t [1]uint64
|
|
|
|
|
readtime(&t[0], 1, 1)
|
|
|
|
|
return timesplit(frombe(t[0]))
|
|
|
|
|
}
|