mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
In CL 656755, the readRandom function was modified to read an integer from /dev/random. However, on Plan 9, /dev/random can only return a few hundred bits a second. The issue is that readRandom is called by randinit, which is called at the creation of Go processes. Consequently, it lead the Go programs to be very slow on Plan 9. This change reverts the change done in CL 656755 to make the readRandom function always returning 0 on Plan 9. Change-Id: Ibe1bf7e4c8cbc82998e4f5e1331f5e29a047c4fc Cq-Include-Trybots: luci.golang.try:gotip-plan9-arm Reviewed-on: https://go-review.googlesource.com/c/go/+/663195 Reviewed-by: Michael Pratt <mpratt@google.com> Reviewed-by: Dmitri Shuralyov <dmitshur@golang.org> Reviewed-by: Richard Miller <millerresearch@gmail.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
596 lines
12 KiB
Go
596 lines
12 KiB
Go
// Copyright 2010 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
package runtime
|
|
|
|
import (
|
|
"internal/abi"
|
|
"internal/byteorder"
|
|
"internal/runtime/atomic"
|
|
"internal/stringslite"
|
|
"unsafe"
|
|
)
|
|
|
|
type mOS struct {
|
|
waitsemacount uint32
|
|
notesig *int8
|
|
errstr *byte
|
|
ignoreHangup bool
|
|
}
|
|
|
|
func dupfd(old, new int32) int32
|
|
func closefd(fd int32) int32
|
|
|
|
//go:noescape
|
|
func open(name *byte, mode, perm int32) int32
|
|
|
|
//go:noescape
|
|
func pread(fd int32, buf unsafe.Pointer, nbytes int32, offset int64) int32
|
|
|
|
//go:noescape
|
|
func pwrite(fd int32, buf unsafe.Pointer, nbytes int32, offset int64) int32
|
|
|
|
func seek(fd int32, offset int64, whence int32) int64
|
|
|
|
//go:noescape
|
|
func exits(msg *byte)
|
|
|
|
//go:noescape
|
|
func brk_(addr unsafe.Pointer) int32
|
|
|
|
func sleep(ms int32) int32
|
|
|
|
func rfork(flags int32) int32
|
|
|
|
//go:noescape
|
|
func plan9_semacquire(addr *uint32, block int32) int32
|
|
|
|
//go:noescape
|
|
func plan9_tsemacquire(addr *uint32, ms int32) int32
|
|
|
|
//go:noescape
|
|
func plan9_semrelease(addr *uint32, count int32) int32
|
|
|
|
//go:noescape
|
|
func notify(fn unsafe.Pointer) int32
|
|
|
|
func noted(mode int32) int32
|
|
|
|
//go:noescape
|
|
func nsec(*int64) int64
|
|
|
|
//go:noescape
|
|
func sigtramp(ureg, note unsafe.Pointer)
|
|
|
|
func setfpmasks()
|
|
|
|
//go:noescape
|
|
func tstart_plan9(newm *m)
|
|
|
|
func errstr() string
|
|
|
|
type _Plink uintptr
|
|
|
|
func sigpanic() {
|
|
gp := getg()
|
|
if !canpanic() {
|
|
throw("unexpected signal during runtime execution")
|
|
}
|
|
|
|
note := gostringnocopy((*byte)(unsafe.Pointer(gp.m.notesig)))
|
|
switch gp.sig {
|
|
case _SIGRFAULT, _SIGWFAULT:
|
|
i := indexNoFloat(note, "addr=")
|
|
if i >= 0 {
|
|
i += 5
|
|
} else if i = indexNoFloat(note, "va="); i >= 0 {
|
|
i += 3
|
|
} else {
|
|
panicmem()
|
|
}
|
|
addr := note[i:]
|
|
gp.sigcode1 = uintptr(atolwhex(addr))
|
|
if gp.sigcode1 < 0x1000 {
|
|
panicmem()
|
|
}
|
|
if gp.paniconfault {
|
|
panicmemAddr(gp.sigcode1)
|
|
}
|
|
if inUserArenaChunk(gp.sigcode1) {
|
|
// We could check that the arena chunk is explicitly set to fault,
|
|
// but the fact that we faulted on accessing it is enough to prove
|
|
// that it is.
|
|
print("accessed data from freed user arena ", hex(gp.sigcode1), "\n")
|
|
} else {
|
|
print("unexpected fault address ", hex(gp.sigcode1), "\n")
|
|
}
|
|
throw("fault")
|
|
case _SIGTRAP:
|
|
if gp.paniconfault {
|
|
panicmem()
|
|
}
|
|
throw(note)
|
|
case _SIGINTDIV:
|
|
panicdivide()
|
|
case _SIGFLOAT:
|
|
panicfloat()
|
|
default:
|
|
panic(errorString(note))
|
|
}
|
|
}
|
|
|
|
// indexNoFloat is bytealg.IndexString but safe to use in a note
|
|
// handler.
|
|
func indexNoFloat(s, t string) int {
|
|
if len(t) == 0 {
|
|
return 0
|
|
}
|
|
for i := 0; i < len(s); i++ {
|
|
if s[i] == t[0] && stringslite.HasPrefix(s[i:], t) {
|
|
return i
|
|
}
|
|
}
|
|
return -1
|
|
}
|
|
|
|
func atolwhex(p string) int64 {
|
|
for stringslite.HasPrefix(p, " ") || stringslite.HasPrefix(p, "\t") {
|
|
p = p[1:]
|
|
}
|
|
neg := false
|
|
if stringslite.HasPrefix(p, "-") || stringslite.HasPrefix(p, "+") {
|
|
neg = p[0] == '-'
|
|
p = p[1:]
|
|
for stringslite.HasPrefix(p, " ") || stringslite.HasPrefix(p, "\t") {
|
|
p = p[1:]
|
|
}
|
|
}
|
|
var n int64
|
|
switch {
|
|
case stringslite.HasPrefix(p, "0x"), stringslite.HasPrefix(p, "0X"):
|
|
p = p[2:]
|
|
for ; len(p) > 0; p = p[1:] {
|
|
if '0' <= p[0] && p[0] <= '9' {
|
|
n = n*16 + int64(p[0]-'0')
|
|
} else if 'a' <= p[0] && p[0] <= 'f' {
|
|
n = n*16 + int64(p[0]-'a'+10)
|
|
} else if 'A' <= p[0] && p[0] <= 'F' {
|
|
n = n*16 + int64(p[0]-'A'+10)
|
|
} else {
|
|
break
|
|
}
|
|
}
|
|
case stringslite.HasPrefix(p, "0"):
|
|
for ; len(p) > 0 && '0' <= p[0] && p[0] <= '7'; p = p[1:] {
|
|
n = n*8 + int64(p[0]-'0')
|
|
}
|
|
default:
|
|
for ; len(p) > 0 && '0' <= p[0] && p[0] <= '9'; p = p[1:] {
|
|
n = n*10 + int64(p[0]-'0')
|
|
}
|
|
}
|
|
if neg {
|
|
n = -n
|
|
}
|
|
return n
|
|
}
|
|
|
|
type sigset struct{}
|
|
|
|
// Called to initialize a new m (including the bootstrap m).
|
|
// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
|
|
func mpreinit(mp *m) {
|
|
// Initialize stack and goroutine for note handling.
|
|
mp.gsignal = malg(32 * 1024)
|
|
mp.gsignal.m = mp
|
|
mp.notesig = (*int8)(mallocgc(_ERRMAX, nil, true))
|
|
// Initialize stack for handling strings from the
|
|
// errstr system call, as used in package syscall.
|
|
mp.errstr = (*byte)(mallocgc(_ERRMAX, nil, true))
|
|
}
|
|
|
|
func sigsave(p *sigset) {
|
|
}
|
|
|
|
func msigrestore(sigmask sigset) {
|
|
}
|
|
|
|
//go:nosplit
|
|
//go:nowritebarrierrec
|
|
func clearSignalHandlers() {
|
|
}
|
|
|
|
func sigblock(exiting bool) {
|
|
}
|
|
|
|
// Called to initialize a new m (including the bootstrap m).
|
|
// Called on the new thread, cannot allocate memory.
|
|
func minit() {
|
|
if atomic.Load(&exiting) != 0 {
|
|
exits(&emptystatus[0])
|
|
}
|
|
// Mask all SSE floating-point exceptions
|
|
// when running on the 64-bit kernel.
|
|
setfpmasks()
|
|
}
|
|
|
|
// Called from dropm to undo the effect of an minit.
|
|
func unminit() {
|
|
}
|
|
|
|
// Called from mexit, but not from dropm, to undo the effect of thread-owned
|
|
// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
|
|
//
|
|
// This always runs without a P, so //go:nowritebarrierrec is required.
|
|
//
|
|
//go:nowritebarrierrec
|
|
func mdestroy(mp *m) {
|
|
}
|
|
|
|
var sysstat = []byte("/dev/sysstat\x00")
|
|
|
|
func getproccount() int32 {
|
|
var buf [2048]byte
|
|
fd := open(&sysstat[0], _OREAD|_OCEXEC, 0)
|
|
if fd < 0 {
|
|
return 1
|
|
}
|
|
ncpu := int32(0)
|
|
for {
|
|
n := read(fd, unsafe.Pointer(&buf), int32(len(buf)))
|
|
if n <= 0 {
|
|
break
|
|
}
|
|
for i := int32(0); i < n; i++ {
|
|
if buf[i] == '\n' {
|
|
ncpu++
|
|
}
|
|
}
|
|
}
|
|
closefd(fd)
|
|
if ncpu == 0 {
|
|
ncpu = 1
|
|
}
|
|
return ncpu
|
|
}
|
|
|
|
var devswap = []byte("/dev/swap\x00")
|
|
var pagesize = []byte(" pagesize\n")
|
|
|
|
func getPageSize() uintptr {
|
|
var buf [2048]byte
|
|
var pos int
|
|
fd := open(&devswap[0], _OREAD|_OCEXEC, 0)
|
|
if fd < 0 {
|
|
// There's not much we can do if /dev/swap doesn't
|
|
// exist. However, nothing in the memory manager uses
|
|
// this on Plan 9, so it also doesn't really matter.
|
|
return minPhysPageSize
|
|
}
|
|
for pos < len(buf) {
|
|
n := read(fd, unsafe.Pointer(&buf[pos]), int32(len(buf)-pos))
|
|
if n <= 0 {
|
|
break
|
|
}
|
|
pos += int(n)
|
|
}
|
|
closefd(fd)
|
|
text := buf[:pos]
|
|
// Find "<n> pagesize" line.
|
|
bol := 0
|
|
for i, c := range text {
|
|
if c == '\n' {
|
|
bol = i + 1
|
|
}
|
|
if bytesHasPrefix(text[i:], pagesize) {
|
|
// Parse number at the beginning of this line.
|
|
return uintptr(_atoi(text[bol:]))
|
|
}
|
|
}
|
|
// Again, the page size doesn't really matter, so use a fallback.
|
|
return minPhysPageSize
|
|
}
|
|
|
|
func bytesHasPrefix(s, prefix []byte) bool {
|
|
if len(s) < len(prefix) {
|
|
return false
|
|
}
|
|
for i, p := range prefix {
|
|
if s[i] != p {
|
|
return false
|
|
}
|
|
}
|
|
return true
|
|
}
|
|
|
|
var pid = []byte("#c/pid\x00")
|
|
|
|
func getpid() uint64 {
|
|
var b [20]byte
|
|
fd := open(&pid[0], 0, 0)
|
|
if fd >= 0 {
|
|
read(fd, unsafe.Pointer(&b), int32(len(b)))
|
|
closefd(fd)
|
|
}
|
|
c := b[:]
|
|
for c[0] == ' ' || c[0] == '\t' {
|
|
c = c[1:]
|
|
}
|
|
return uint64(_atoi(c))
|
|
}
|
|
|
|
var (
|
|
bintimeFD int32 = -1
|
|
|
|
bintimeDev = []byte("/dev/bintime\x00")
|
|
randomDev = []byte("/dev/random\x00")
|
|
)
|
|
|
|
func osinit() {
|
|
physPageSize = getPageSize()
|
|
initBloc()
|
|
ncpu = getproccount()
|
|
getg().m.procid = getpid()
|
|
|
|
fd := open(&bintimeDev[0], _OREAD|_OCEXEC, 0)
|
|
if fd < 0 {
|
|
fatal("cannot open /dev/bintime")
|
|
}
|
|
bintimeFD = fd
|
|
|
|
// Move fd high up, to avoid conflicts with smaller ones
|
|
// that programs might hard code, and to make exec's job easier.
|
|
// Plan 9 allocates chunks of DELTAFD=20 fds in a row,
|
|
// so 18 is near the top of what's possible.
|
|
if bintimeFD < 18 {
|
|
if dupfd(bintimeFD, 18) < 0 {
|
|
fatal("cannot dup /dev/bintime onto 18")
|
|
}
|
|
closefd(bintimeFD)
|
|
bintimeFD = 18
|
|
}
|
|
}
|
|
|
|
//go:nosplit
|
|
func crash() {
|
|
notify(nil)
|
|
*(*int)(nil) = 0
|
|
}
|
|
|
|
// Don't read from /dev/random, since this device can only
|
|
// return a few hundred bits a second and would slow creation
|
|
// of Go processes down significantly.
|
|
//
|
|
//go:nosplit
|
|
func readRandom(r []byte) int {
|
|
return 0
|
|
}
|
|
|
|
func initsig(preinit bool) {
|
|
if !preinit {
|
|
notify(unsafe.Pointer(abi.FuncPCABI0(sigtramp)))
|
|
}
|
|
}
|
|
|
|
//go:nosplit
|
|
func osyield() {
|
|
sleep(0)
|
|
}
|
|
|
|
//go:nosplit
|
|
func osyield_no_g() {
|
|
osyield()
|
|
}
|
|
|
|
//go:nosplit
|
|
func usleep(µs uint32) {
|
|
ms := int32(µs / 1000)
|
|
if ms == 0 {
|
|
ms = 1
|
|
}
|
|
sleep(ms)
|
|
}
|
|
|
|
//go:nosplit
|
|
func usleep_no_g(usec uint32) {
|
|
usleep(usec)
|
|
}
|
|
|
|
var goexits = []byte("go: exit ")
|
|
var emptystatus = []byte("\x00")
|
|
var exiting uint32
|
|
|
|
func goexitsall(status *byte) {
|
|
var buf [_ERRMAX]byte
|
|
if !atomic.Cas(&exiting, 0, 1) {
|
|
return
|
|
}
|
|
getg().m.locks++
|
|
n := copy(buf[:], goexits)
|
|
n = copy(buf[n:], gostringnocopy(status))
|
|
pid := getpid()
|
|
for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink {
|
|
if mp.procid != 0 && mp.procid != pid {
|
|
postnote(mp.procid, buf[:])
|
|
}
|
|
}
|
|
getg().m.locks--
|
|
}
|
|
|
|
var procdir = []byte("/proc/")
|
|
var notefile = []byte("/note\x00")
|
|
|
|
func postnote(pid uint64, msg []byte) int {
|
|
var buf [128]byte
|
|
var tmp [32]byte
|
|
n := copy(buf[:], procdir)
|
|
n += copy(buf[n:], itoa(tmp[:], pid))
|
|
copy(buf[n:], notefile)
|
|
fd := open(&buf[0], _OWRITE, 0)
|
|
if fd < 0 {
|
|
return -1
|
|
}
|
|
len := findnull(&msg[0])
|
|
if write1(uintptr(fd), unsafe.Pointer(&msg[0]), int32(len)) != int32(len) {
|
|
closefd(fd)
|
|
return -1
|
|
}
|
|
closefd(fd)
|
|
return 0
|
|
}
|
|
|
|
//go:nosplit
|
|
func exit(e int32) {
|
|
var status []byte
|
|
if e == 0 {
|
|
status = emptystatus
|
|
} else {
|
|
// build error string
|
|
var tmp [32]byte
|
|
sl := itoa(tmp[:len(tmp)-1], uint64(e))
|
|
// Don't append, rely on the existing data being zero.
|
|
status = sl[:len(sl)+1]
|
|
}
|
|
goexitsall(&status[0])
|
|
exits(&status[0])
|
|
}
|
|
|
|
// May run with m.p==nil, so write barriers are not allowed.
|
|
//
|
|
//go:nowritebarrier
|
|
func newosproc(mp *m) {
|
|
if false {
|
|
print("newosproc mp=", mp, " ostk=", &mp, "\n")
|
|
}
|
|
pid := rfork(_RFPROC | _RFMEM | _RFNOWAIT)
|
|
if pid < 0 {
|
|
throw("newosproc: rfork failed")
|
|
}
|
|
if pid == 0 {
|
|
tstart_plan9(mp)
|
|
}
|
|
}
|
|
|
|
func exitThread(wait *atomic.Uint32) {
|
|
// We should never reach exitThread on Plan 9 because we let
|
|
// the OS clean up threads.
|
|
throw("exitThread")
|
|
}
|
|
|
|
//go:nosplit
|
|
func semacreate(mp *m) {
|
|
}
|
|
|
|
//go:nosplit
|
|
func semasleep(ns int64) int {
|
|
gp := getg()
|
|
if ns >= 0 {
|
|
ms := timediv(ns, 1000000, nil)
|
|
if ms == 0 {
|
|
ms = 1
|
|
}
|
|
ret := plan9_tsemacquire(&gp.m.waitsemacount, ms)
|
|
if ret == 1 {
|
|
return 0 // success
|
|
}
|
|
return -1 // timeout or interrupted
|
|
}
|
|
for plan9_semacquire(&gp.m.waitsemacount, 1) < 0 {
|
|
// interrupted; try again (c.f. lock_sema.go)
|
|
}
|
|
return 0 // success
|
|
}
|
|
|
|
//go:nosplit
|
|
func semawakeup(mp *m) {
|
|
plan9_semrelease(&mp.waitsemacount, 1)
|
|
}
|
|
|
|
//go:nosplit
|
|
func read(fd int32, buf unsafe.Pointer, n int32) int32 {
|
|
return pread(fd, buf, n, -1)
|
|
}
|
|
|
|
//go:nosplit
|
|
func write1(fd uintptr, buf unsafe.Pointer, n int32) int32 {
|
|
return pwrite(int32(fd), buf, n, -1)
|
|
}
|
|
|
|
var _badsignal = []byte("runtime: signal received on thread not created by Go.\n")
|
|
|
|
// This runs on a foreign stack, without an m or a g. No stack split.
|
|
//
|
|
//go:nosplit
|
|
func badsignal2() {
|
|
pwrite(2, unsafe.Pointer(&_badsignal[0]), int32(len(_badsignal)), -1)
|
|
exits(&_badsignal[0])
|
|
}
|
|
|
|
func raisebadsignal(sig uint32) {
|
|
badsignal2()
|
|
}
|
|
|
|
func _atoi(b []byte) int {
|
|
n := 0
|
|
for len(b) > 0 && '0' <= b[0] && b[0] <= '9' {
|
|
n = n*10 + int(b[0]) - '0'
|
|
b = b[1:]
|
|
}
|
|
return n
|
|
}
|
|
|
|
func signame(sig uint32) string {
|
|
if sig >= uint32(len(sigtable)) {
|
|
return ""
|
|
}
|
|
return sigtable[sig].name
|
|
}
|
|
|
|
const preemptMSupported = false
|
|
|
|
func preemptM(mp *m) {
|
|
// Not currently supported.
|
|
//
|
|
// TODO: Use a note like we use signals on POSIX OSes
|
|
}
|
|
|
|
//go:nosplit
|
|
func readtime(t *uint64, min, n int) int {
|
|
if bintimeFD < 0 {
|
|
fatal("/dev/bintime not opened")
|
|
}
|
|
const uint64size = 8
|
|
r := pread(bintimeFD, unsafe.Pointer(t), int32(n*uint64size), 0)
|
|
if int(r) < min*uint64size {
|
|
fatal("cannot read /dev/bintime")
|
|
}
|
|
return int(r) / uint64size
|
|
}
|
|
|
|
// timesplit returns u/1e9, u%1e9
|
|
func timesplit(u uint64) (sec int64, nsec int32)
|
|
|
|
func frombe(u uint64) uint64 {
|
|
b := (*[8]byte)(unsafe.Pointer(&u))
|
|
return byteorder.BEUint64(b[:])
|
|
}
|
|
|
|
//go:nosplit
|
|
func nanotime1() int64 {
|
|
var t [4]uint64
|
|
if readtime(&t[0], 1, 4) == 4 {
|
|
// long read indicates new kernel sending monotonic time
|
|
// (https://github.com/rsc/plan9/commit/baf076425).
|
|
return int64(frombe(t[3]))
|
|
}
|
|
// fall back to unix time
|
|
return int64(frombe(t[0]))
|
|
}
|
|
|
|
//go:nosplit
|
|
func walltime() (sec int64, nsec int32) {
|
|
var t [1]uint64
|
|
readtime(&t[0], 1, 1)
|
|
return timesplit(frombe(t[0]))
|
|
}
|