mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime: deduplicate Windows stdcall
There is no need to have a dedicated stdcall variant for each number of arguments. Instead, we can use a variadic function that accepts any number of arguments and handles them uniformly. While here, improve documentation of syscall_syscalln to make it clear that it should not be used within the runtime package. Change-Id: I022afc7f28d969fd7307bb2b1f4594246ac38d18 Reviewed-on: https://go-review.googlesource.com/c/go/+/691215 LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Michael Pratt <mpratt@google.com> Reviewed-by: Mark Freeman <mark@golang.org>
This commit is contained in:
parent
ef40549786
commit
e666972a67
8 changed files with 120 additions and 186 deletions
|
|
@ -18,7 +18,7 @@ var (
|
|||
|
||||
func NumberOfProcessors() int32 {
|
||||
var info systeminfo
|
||||
stdcall1(_GetSystemInfo, uintptr(unsafe.Pointer(&info)))
|
||||
stdcall(_GetSystemInfo, uintptr(unsafe.Pointer(&info)))
|
||||
return int32(info.dwnumberofprocessors)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -26,11 +26,11 @@ const (
|
|||
//
|
||||
//go:nosplit
|
||||
func sysAllocOS(n uintptr, _ string) unsafe.Pointer {
|
||||
return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_COMMIT|_MEM_RESERVE, _PAGE_READWRITE))
|
||||
return unsafe.Pointer(stdcall(_VirtualAlloc, 0, n, _MEM_COMMIT|_MEM_RESERVE, _PAGE_READWRITE))
|
||||
}
|
||||
|
||||
func sysUnusedOS(v unsafe.Pointer, n uintptr) {
|
||||
r := stdcall3(_VirtualFree, uintptr(v), n, _MEM_DECOMMIT)
|
||||
r := stdcall(_VirtualFree, uintptr(v), n, _MEM_DECOMMIT)
|
||||
if r != 0 {
|
||||
return
|
||||
}
|
||||
|
|
@ -46,7 +46,7 @@ func sysUnusedOS(v unsafe.Pointer, n uintptr) {
|
|||
// in the worst case, but that's fast enough.
|
||||
for n > 0 {
|
||||
small := n
|
||||
for small >= 4096 && stdcall3(_VirtualFree, uintptr(v), small, _MEM_DECOMMIT) == 0 {
|
||||
for small >= 4096 && stdcall(_VirtualFree, uintptr(v), small, _MEM_DECOMMIT) == 0 {
|
||||
small /= 2
|
||||
small &^= 4096 - 1
|
||||
}
|
||||
|
|
@ -60,7 +60,7 @@ func sysUnusedOS(v unsafe.Pointer, n uintptr) {
|
|||
}
|
||||
|
||||
func sysUsedOS(v unsafe.Pointer, n uintptr) {
|
||||
p := stdcall4(_VirtualAlloc, uintptr(v), n, _MEM_COMMIT, _PAGE_READWRITE)
|
||||
p := stdcall(_VirtualAlloc, uintptr(v), n, _MEM_COMMIT, _PAGE_READWRITE)
|
||||
if p == uintptr(v) {
|
||||
return
|
||||
}
|
||||
|
|
@ -71,7 +71,7 @@ func sysUsedOS(v unsafe.Pointer, n uintptr) {
|
|||
k := n
|
||||
for k > 0 {
|
||||
small := k
|
||||
for small >= 4096 && stdcall4(_VirtualAlloc, uintptr(v), small, _MEM_COMMIT, _PAGE_READWRITE) == 0 {
|
||||
for small >= 4096 && stdcall(_VirtualAlloc, uintptr(v), small, _MEM_COMMIT, _PAGE_READWRITE) == 0 {
|
||||
small /= 2
|
||||
small &^= 4096 - 1
|
||||
}
|
||||
|
|
@ -105,7 +105,7 @@ func sysHugePageCollapseOS(v unsafe.Pointer, n uintptr) {
|
|||
//
|
||||
//go:nosplit
|
||||
func sysFreeOS(v unsafe.Pointer, n uintptr) {
|
||||
r := stdcall3(_VirtualFree, uintptr(v), 0, _MEM_RELEASE)
|
||||
r := stdcall(_VirtualFree, uintptr(v), 0, _MEM_RELEASE)
|
||||
if r == 0 {
|
||||
print("runtime: VirtualFree of ", n, " bytes failed with errno=", getlasterror(), "\n")
|
||||
throw("runtime: failed to release pages")
|
||||
|
|
@ -121,13 +121,13 @@ func sysReserveOS(v unsafe.Pointer, n uintptr, _ string) unsafe.Pointer {
|
|||
// v is just a hint.
|
||||
// First try at v.
|
||||
// This will fail if any of [v, v+n) is already reserved.
|
||||
v = unsafe.Pointer(stdcall4(_VirtualAlloc, uintptr(v), n, _MEM_RESERVE, _PAGE_READWRITE))
|
||||
v = unsafe.Pointer(stdcall(_VirtualAlloc, uintptr(v), n, _MEM_RESERVE, _PAGE_READWRITE))
|
||||
if v != nil {
|
||||
return v
|
||||
}
|
||||
|
||||
// Next let the kernel choose the address.
|
||||
return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_RESERVE, _PAGE_READWRITE))
|
||||
return unsafe.Pointer(stdcall(_VirtualAlloc, 0, n, _MEM_RESERVE, _PAGE_READWRITE))
|
||||
}
|
||||
|
||||
func sysMapOS(v unsafe.Pointer, n uintptr, _ string) {
|
||||
|
|
|
|||
|
|
@ -102,7 +102,7 @@ var (
|
|||
)
|
||||
|
||||
func netpollinit() {
|
||||
iocphandle = stdcall4(_CreateIoCompletionPort, _INVALID_HANDLE_VALUE, 0, 0, _DWORD_MAX)
|
||||
iocphandle = stdcall(_CreateIoCompletionPort, _INVALID_HANDLE_VALUE, 0, 0, _DWORD_MAX)
|
||||
if iocphandle == 0 {
|
||||
println("runtime: CreateIoCompletionPort failed (errno=", getlasterror(), ")")
|
||||
throw("runtime: netpollinit failed")
|
||||
|
|
@ -115,7 +115,7 @@ func netpollIsPollDescriptor(fd uintptr) bool {
|
|||
|
||||
func netpollopen(fd uintptr, pd *pollDesc) int32 {
|
||||
key := packNetpollKey(netpollSourceReady, pd)
|
||||
if stdcall4(_CreateIoCompletionPort, fd, iocphandle, key, 0) == 0 {
|
||||
if stdcall(_CreateIoCompletionPort, fd, iocphandle, key, 0) == 0 {
|
||||
return int32(getlasterror())
|
||||
}
|
||||
return 0
|
||||
|
|
@ -137,7 +137,7 @@ func netpollBreak() {
|
|||
}
|
||||
|
||||
key := packNetpollKey(netpollSourceBreak, nil)
|
||||
if stdcall4(_PostQueuedCompletionStatus, iocphandle, 0, key, 0) == 0 {
|
||||
if stdcall(_PostQueuedCompletionStatus, iocphandle, 0, key, 0) == 0 {
|
||||
println("runtime: netpoll: PostQueuedCompletionStatus failed (errno=", getlasterror(), ")")
|
||||
throw("runtime: netpoll: PostQueuedCompletionStatus failed")
|
||||
}
|
||||
|
|
@ -197,7 +197,7 @@ func netpoll(delay int64) (gList, int32) {
|
|||
if delay != 0 {
|
||||
mp.blocked = true
|
||||
}
|
||||
if stdcall6(_GetQueuedCompletionStatusEx, iocphandle, uintptr(unsafe.Pointer(&entries[0])), uintptr(n), uintptr(unsafe.Pointer(&n)), uintptr(wait), 0) == 0 {
|
||||
if stdcall(_GetQueuedCompletionStatusEx, iocphandle, uintptr(unsafe.Pointer(&entries[0])), uintptr(n), uintptr(unsafe.Pointer(&n)), uintptr(wait), 0) == 0 {
|
||||
mp.blocked = false
|
||||
errno := getlasterror()
|
||||
if errno == _WAIT_TIMEOUT {
|
||||
|
|
@ -256,7 +256,7 @@ func netpollQueueTimer(delay int64) (signaled bool) {
|
|||
// such as a netpollBreak, so we can get to this point with a timer that hasn't
|
||||
// expired yet. In this case, the completion packet can still be picked up by
|
||||
// another thread, so defer the cancellation until it is really necessary.
|
||||
errno := stdcall2(_NtCancelWaitCompletionPacket, mp.waitIocpHandle, 1)
|
||||
errno := stdcall(_NtCancelWaitCompletionPacket, mp.waitIocpHandle, 1)
|
||||
switch errno {
|
||||
case STATUS_CANCELLED:
|
||||
// STATUS_CANCELLED is returned when the associated timer has already expired,
|
||||
|
|
@ -264,12 +264,12 @@ func netpollQueueTimer(delay int64) (signaled bool) {
|
|||
fallthrough
|
||||
case STATUS_SUCCESS:
|
||||
dt := -delay / 100 // relative sleep (negative), 100ns units
|
||||
if stdcall6(_SetWaitableTimer, mp.waitIocpTimer, uintptr(unsafe.Pointer(&dt)), 0, 0, 0, 0) == 0 {
|
||||
if stdcall(_SetWaitableTimer, mp.waitIocpTimer, uintptr(unsafe.Pointer(&dt)), 0, 0, 0, 0) == 0 {
|
||||
println("runtime: SetWaitableTimer failed; errno=", getlasterror())
|
||||
throw("runtime: netpoll failed")
|
||||
}
|
||||
key := packNetpollKey(netpollSourceTimer, nil)
|
||||
if errno := stdcall8(_NtAssociateWaitCompletionPacket, mp.waitIocpHandle, iocphandle, mp.waitIocpTimer, key, 0, 0, 0, uintptr(unsafe.Pointer(&signaled))); errno != 0 {
|
||||
if errno := stdcall(_NtAssociateWaitCompletionPacket, mp.waitIocpHandle, iocphandle, mp.waitIocpTimer, key, 0, 0, 0, uintptr(unsafe.Pointer(&signaled))); errno != 0 {
|
||||
println("runtime: NtAssociateWaitCompletionPacket failed; errno=", errno)
|
||||
throw("runtime: netpoll failed")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -220,7 +220,7 @@ func windowsFindfunc(lib uintptr, name []byte) stdFunction {
|
|||
if name[len(name)-1] != 0 {
|
||||
throw("usage")
|
||||
}
|
||||
f := stdcall2(_GetProcAddress, lib, uintptr(unsafe.Pointer(&name[0])))
|
||||
f := stdcall(_GetProcAddress, lib, uintptr(unsafe.Pointer(&name[0])))
|
||||
return stdFunction(unsafe.Pointer(f))
|
||||
}
|
||||
|
||||
|
|
@ -229,7 +229,7 @@ var sysDirectory [_MAX_PATH + 1]byte
|
|||
var sysDirectoryLen uintptr
|
||||
|
||||
func initSysDirectory() {
|
||||
l := stdcall2(_GetSystemDirectoryA, uintptr(unsafe.Pointer(&sysDirectory[0])), uintptr(len(sysDirectory)-1))
|
||||
l := stdcall(_GetSystemDirectoryA, uintptr(unsafe.Pointer(&sysDirectory[0])), uintptr(len(sysDirectory)-1))
|
||||
if l == 0 || l > uintptr(len(sysDirectory)-1) {
|
||||
throw("Unable to determine system directory")
|
||||
}
|
||||
|
|
@ -244,20 +244,20 @@ func windows_GetSystemDirectory() string {
|
|||
|
||||
func windowsLoadSystemLib(name []uint16) uintptr {
|
||||
const _LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x00000800
|
||||
return stdcall3(_LoadLibraryExW, uintptr(unsafe.Pointer(&name[0])), 0, _LOAD_LIBRARY_SEARCH_SYSTEM32)
|
||||
return stdcall(_LoadLibraryExW, uintptr(unsafe.Pointer(&name[0])), 0, _LOAD_LIBRARY_SEARCH_SYSTEM32)
|
||||
}
|
||||
|
||||
//go:linkname windows_QueryPerformanceCounter internal/syscall/windows.QueryPerformanceCounter
|
||||
func windows_QueryPerformanceCounter() int64 {
|
||||
var counter int64
|
||||
stdcall1(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&counter)))
|
||||
stdcall(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&counter)))
|
||||
return counter
|
||||
}
|
||||
|
||||
//go:linkname windows_QueryPerformanceFrequency internal/syscall/windows.QueryPerformanceFrequency
|
||||
func windows_QueryPerformanceFrequency() int64 {
|
||||
var frequency int64
|
||||
stdcall1(_QueryPerformanceFrequency, uintptr(unsafe.Pointer(&frequency)))
|
||||
stdcall(_QueryPerformanceFrequency, uintptr(unsafe.Pointer(&frequency)))
|
||||
return frequency
|
||||
}
|
||||
|
||||
|
|
@ -308,7 +308,7 @@ func monitorSuspendResume() {
|
|||
var fn any = func(context uintptr, changeType uint32, setting uintptr) uintptr {
|
||||
for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink {
|
||||
if mp.resumesema != 0 {
|
||||
stdcall1(_SetEvent, mp.resumesema)
|
||||
stdcall(_SetEvent, mp.resumesema)
|
||||
}
|
||||
}
|
||||
return 0
|
||||
|
|
@ -317,13 +317,13 @@ func monitorSuspendResume() {
|
|||
callback: compileCallback(*efaceOf(&fn), true),
|
||||
}
|
||||
handle := uintptr(0)
|
||||
stdcall3(powerRegisterSuspendResumeNotification, _DEVICE_NOTIFY_CALLBACK,
|
||||
stdcall(powerRegisterSuspendResumeNotification, _DEVICE_NOTIFY_CALLBACK,
|
||||
uintptr(unsafe.Pointer(¶ms)), uintptr(unsafe.Pointer(&handle)))
|
||||
}
|
||||
|
||||
func getCPUCount() int32 {
|
||||
var mask, sysmask uintptr
|
||||
ret := stdcall3(_GetProcessAffinityMask, currentProcess, uintptr(unsafe.Pointer(&mask)), uintptr(unsafe.Pointer(&sysmask)))
|
||||
ret := stdcall(_GetProcessAffinityMask, currentProcess, uintptr(unsafe.Pointer(&mask)), uintptr(unsafe.Pointer(&sysmask)))
|
||||
if ret != 0 {
|
||||
n := 0
|
||||
maskbits := int(unsafe.Sizeof(mask) * 8)
|
||||
|
|
@ -338,13 +338,13 @@ func getCPUCount() int32 {
|
|||
}
|
||||
// use GetSystemInfo if GetProcessAffinityMask fails
|
||||
var info systeminfo
|
||||
stdcall1(_GetSystemInfo, uintptr(unsafe.Pointer(&info)))
|
||||
stdcall(_GetSystemInfo, uintptr(unsafe.Pointer(&info)))
|
||||
return int32(info.dwnumberofprocessors)
|
||||
}
|
||||
|
||||
func getPageSize() uintptr {
|
||||
var info systeminfo
|
||||
stdcall1(_GetSystemInfo, uintptr(unsafe.Pointer(&info)))
|
||||
stdcall(_GetSystemInfo, uintptr(unsafe.Pointer(&info)))
|
||||
return uintptr(info.dwpagesize)
|
||||
}
|
||||
|
||||
|
|
@ -383,9 +383,9 @@ func osRelax(relax bool) uint32 {
|
|||
}
|
||||
|
||||
if relax {
|
||||
return uint32(stdcall1(_timeEndPeriod, 1))
|
||||
return uint32(stdcall(_timeEndPeriod, 1))
|
||||
} else {
|
||||
return uint32(stdcall1(_timeBeginPeriod, 1))
|
||||
return uint32(stdcall(_timeBeginPeriod, 1))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -414,7 +414,7 @@ func createHighResTimer() uintptr {
|
|||
_TIMER_QUERY_STATE = 0x0001
|
||||
_TIMER_MODIFY_STATE = 0x0002
|
||||
)
|
||||
return stdcall4(_CreateWaitableTimerExW, 0, 0,
|
||||
return stdcall(_CreateWaitableTimerExW, 0, 0,
|
||||
_CREATE_WAITABLE_TIMER_HIGH_RESOLUTION,
|
||||
_SYNCHRONIZE|_TIMER_QUERY_STATE|_TIMER_MODIFY_STATE)
|
||||
}
|
||||
|
|
@ -424,7 +424,7 @@ func initHighResTimer() {
|
|||
if h != 0 {
|
||||
haveHighResTimer = true
|
||||
haveHighResSleep = _NtCreateWaitCompletionPacket != nil
|
||||
stdcall1(_CloseHandle, h)
|
||||
stdcall(_CloseHandle, h)
|
||||
} else {
|
||||
// Only load winmm.dll if we need it.
|
||||
// This avoids a dependency on winmm.dll for Go programs
|
||||
|
|
@ -456,7 +456,7 @@ func initLongPathSupport() {
|
|||
// Check that we're ≥ 10.0.15063.
|
||||
info := _OSVERSIONINFOW{}
|
||||
info.osVersionInfoSize = uint32(unsafe.Sizeof(info))
|
||||
stdcall1(_RtlGetVersion, uintptr(unsafe.Pointer(&info)))
|
||||
stdcall(_RtlGetVersion, uintptr(unsafe.Pointer(&info)))
|
||||
if info.majorVersion < 10 || (info.majorVersion == 10 && info.minorVersion == 0 && info.buildNumber < 15063) {
|
||||
return
|
||||
}
|
||||
|
|
@ -464,7 +464,7 @@ func initLongPathSupport() {
|
|||
// Set the IsLongPathAwareProcess flag of the PEB's bit field.
|
||||
// This flag is not documented, but it's known to be used
|
||||
// by Windows to enable long path support.
|
||||
bitField := (*byte)(unsafe.Pointer(stdcall0(_RtlGetCurrentPeb) + PebBitFieldOffset))
|
||||
bitField := (*byte)(unsafe.Pointer(stdcall(_RtlGetCurrentPeb) + PebBitFieldOffset))
|
||||
*bitField |= IsLongPathAwareProcess
|
||||
|
||||
canUseLongPaths = true
|
||||
|
|
@ -493,13 +493,13 @@ func osinit() {
|
|||
// of dedicated threads -- GUI, IO, computational, etc. Go processes use
|
||||
// equivalent threads that all do a mix of GUI, IO, computations, etc.
|
||||
// In such context dynamic priority boosting does nothing but harm, so we turn it off.
|
||||
stdcall2(_SetProcessPriorityBoost, currentProcess, 1)
|
||||
stdcall(_SetProcessPriorityBoost, currentProcess, 1)
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func readRandom(r []byte) int {
|
||||
n := 0
|
||||
if stdcall2(_ProcessPrng, uintptr(unsafe.Pointer(&r[0])), uintptr(len(r)))&0xff != 0 {
|
||||
if stdcall(_ProcessPrng, uintptr(unsafe.Pointer(&r[0])), uintptr(len(r)))&0xff != 0 {
|
||||
n = len(r)
|
||||
}
|
||||
return n
|
||||
|
|
@ -509,7 +509,7 @@ func goenvs() {
|
|||
// strings is a pointer to environment variable pairs in the form:
|
||||
// "envA=valA\x00envB=valB\x00\x00" (in UTF-16)
|
||||
// Two consecutive zero bytes end the list.
|
||||
strings := unsafe.Pointer(stdcall0(_GetEnvironmentStringsW))
|
||||
strings := unsafe.Pointer(stdcall(_GetEnvironmentStringsW))
|
||||
p := (*[1 << 24]uint16)(strings)[:]
|
||||
|
||||
n := 0
|
||||
|
|
@ -533,13 +533,13 @@ func goenvs() {
|
|||
p = p[1:] // skip nil byte
|
||||
}
|
||||
|
||||
stdcall1(_FreeEnvironmentStringsW, uintptr(strings))
|
||||
stdcall(_FreeEnvironmentStringsW, uintptr(strings))
|
||||
|
||||
// We call these all the way here, late in init, so that malloc works
|
||||
// for the callback functions these generate.
|
||||
var fn any = ctrlHandler
|
||||
ctrlHandlerPC := compileCallback(*efaceOf(&fn), true)
|
||||
stdcall2(_SetConsoleCtrlHandler, ctrlHandlerPC, 1)
|
||||
stdcall(_SetConsoleCtrlHandler, ctrlHandlerPC, 1)
|
||||
|
||||
monitorSuspendResume()
|
||||
}
|
||||
|
|
@ -555,7 +555,7 @@ func exit(code int32) {
|
|||
// kills the suspending thread, and then this thread suspends.
|
||||
lock(&suspendLock)
|
||||
atomic.Store(&exiting, 1)
|
||||
stdcall1(_ExitProcess, uintptr(code))
|
||||
stdcall(_ExitProcess, uintptr(code))
|
||||
}
|
||||
|
||||
// write1 must be nosplit because it's used as a last resort in
|
||||
|
|
@ -571,9 +571,9 @@ func write1(fd uintptr, buf unsafe.Pointer, n int32) int32 {
|
|||
var handle uintptr
|
||||
switch fd {
|
||||
case 1:
|
||||
handle = stdcall1(_GetStdHandle, _STD_OUTPUT_HANDLE)
|
||||
handle = stdcall(_GetStdHandle, _STD_OUTPUT_HANDLE)
|
||||
case 2:
|
||||
handle = stdcall1(_GetStdHandle, _STD_ERROR_HANDLE)
|
||||
handle = stdcall(_GetStdHandle, _STD_ERROR_HANDLE)
|
||||
default:
|
||||
// assume fd is real windows handle.
|
||||
handle = fd
|
||||
|
|
@ -589,7 +589,7 @@ func write1(fd uintptr, buf unsafe.Pointer, n int32) int32 {
|
|||
|
||||
if !isASCII {
|
||||
var m uint32
|
||||
isConsole := stdcall2(_GetConsoleMode, handle, uintptr(unsafe.Pointer(&m))) != 0
|
||||
isConsole := stdcall(_GetConsoleMode, handle, uintptr(unsafe.Pointer(&m))) != 0
|
||||
// If this is a console output, various non-unicode code pages can be in use.
|
||||
// Use the dedicated WriteConsole call to ensure unicode is printed correctly.
|
||||
if isConsole {
|
||||
|
|
@ -597,7 +597,7 @@ func write1(fd uintptr, buf unsafe.Pointer, n int32) int32 {
|
|||
}
|
||||
}
|
||||
var written uint32
|
||||
stdcall5(_WriteFile, handle, uintptr(buf), uintptr(n), uintptr(unsafe.Pointer(&written)), 0)
|
||||
stdcall(_WriteFile, handle, uintptr(buf), uintptr(n), uintptr(unsafe.Pointer(&written)), 0)
|
||||
return int32(written)
|
||||
}
|
||||
|
||||
|
|
@ -650,7 +650,7 @@ func writeConsoleUTF16(handle uintptr, b []uint16) {
|
|||
return
|
||||
}
|
||||
var written uint32
|
||||
stdcall5(_WriteConsoleW,
|
||||
stdcall(_WriteConsoleW,
|
||||
handle,
|
||||
uintptr(unsafe.Pointer(&b[0])),
|
||||
uintptr(l),
|
||||
|
|
@ -671,7 +671,7 @@ func semasleep(ns int64) int32 {
|
|||
|
||||
var result uintptr
|
||||
if ns < 0 {
|
||||
result = stdcall2(_WaitForSingleObject, getg().m.waitsema, uintptr(_INFINITE))
|
||||
result = stdcall(_WaitForSingleObject, getg().m.waitsema, uintptr(_INFINITE))
|
||||
} else {
|
||||
start := nanotime()
|
||||
elapsed := int64(0)
|
||||
|
|
@ -680,7 +680,7 @@ func semasleep(ns int64) int32 {
|
|||
if ms == 0 {
|
||||
ms = 1
|
||||
}
|
||||
result = stdcall4(_WaitForMultipleObjects, 2,
|
||||
result = stdcall(_WaitForMultipleObjects, 2,
|
||||
uintptr(unsafe.Pointer(&[2]uintptr{getg().m.waitsema, getg().m.resumesema})),
|
||||
0, uintptr(ms))
|
||||
if result != _WAIT_OBJECT_0+1 {
|
||||
|
|
@ -723,7 +723,7 @@ func semasleep(ns int64) int32 {
|
|||
|
||||
//go:nosplit
|
||||
func semawakeup(mp *m) {
|
||||
if stdcall1(_SetEvent, mp.waitsema) == 0 {
|
||||
if stdcall(_SetEvent, mp.waitsema) == 0 {
|
||||
systemstack(func() {
|
||||
print("runtime: setevent failed; errno=", getlasterror(), "\n")
|
||||
throw("runtime.semawakeup")
|
||||
|
|
@ -736,20 +736,20 @@ func semacreate(mp *m) {
|
|||
if mp.waitsema != 0 {
|
||||
return
|
||||
}
|
||||
mp.waitsema = stdcall4(_CreateEventA, 0, 0, 0, 0)
|
||||
mp.waitsema = stdcall(_CreateEventA, 0, 0, 0, 0)
|
||||
if mp.waitsema == 0 {
|
||||
systemstack(func() {
|
||||
print("runtime: createevent failed; errno=", getlasterror(), "\n")
|
||||
throw("runtime.semacreate")
|
||||
})
|
||||
}
|
||||
mp.resumesema = stdcall4(_CreateEventA, 0, 0, 0, 0)
|
||||
mp.resumesema = stdcall(_CreateEventA, 0, 0, 0, 0)
|
||||
if mp.resumesema == 0 {
|
||||
systemstack(func() {
|
||||
print("runtime: createevent failed; errno=", getlasterror(), "\n")
|
||||
throw("runtime.semacreate")
|
||||
})
|
||||
stdcall1(_CloseHandle, mp.waitsema)
|
||||
stdcall(_CloseHandle, mp.waitsema)
|
||||
mp.waitsema = 0
|
||||
}
|
||||
}
|
||||
|
|
@ -762,7 +762,7 @@ func semacreate(mp *m) {
|
|||
//go:nosplit
|
||||
func newosproc(mp *m) {
|
||||
// We pass 0 for the stack size to use the default for this binary.
|
||||
thandle := stdcall6(_CreateThread, 0, 0,
|
||||
thandle := stdcall(_CreateThread, 0, 0,
|
||||
abi.FuncPCABI0(tstart_stdcall), uintptr(unsafe.Pointer(mp)),
|
||||
0, 0)
|
||||
|
||||
|
|
@ -780,7 +780,7 @@ func newosproc(mp *m) {
|
|||
}
|
||||
|
||||
// Close thandle to avoid leaking the thread object if it exits.
|
||||
stdcall1(_CloseHandle, thandle)
|
||||
stdcall(_CloseHandle, thandle)
|
||||
}
|
||||
|
||||
// Used by the C library build mode. On Linux this function would allocate a
|
||||
|
|
@ -828,7 +828,7 @@ func sigblock(exiting bool) {
|
|||
// Called on the new thread, cannot allocate Go memory.
|
||||
func minit() {
|
||||
var thandle uintptr
|
||||
if stdcall7(_DuplicateHandle, currentProcess, currentThread, currentProcess, uintptr(unsafe.Pointer(&thandle)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 {
|
||||
if stdcall(_DuplicateHandle, currentProcess, currentThread, currentProcess, uintptr(unsafe.Pointer(&thandle)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 {
|
||||
print("runtime.minit: duplicatehandle failed; errno=", getlasterror(), "\n")
|
||||
throw("runtime.minit: duplicatehandle failed")
|
||||
}
|
||||
|
|
@ -836,7 +836,7 @@ func minit() {
|
|||
mp := getg().m
|
||||
lock(&mp.threadLock)
|
||||
mp.thread = thandle
|
||||
mp.procid = uint64(stdcall0(_GetCurrentThreadId))
|
||||
mp.procid = uint64(stdcall(_GetCurrentThreadId))
|
||||
|
||||
// Configure usleep timer, if possible.
|
||||
if mp.highResTimer == 0 && haveHighResTimer {
|
||||
|
|
@ -853,7 +853,7 @@ func minit() {
|
|||
throw("CreateWaitableTimerEx when creating timer failed")
|
||||
}
|
||||
const GENERIC_ALL = 0x10000000
|
||||
errno := stdcall3(_NtCreateWaitCompletionPacket, uintptr(unsafe.Pointer(&mp.waitIocpHandle)), GENERIC_ALL, 0)
|
||||
errno := stdcall(_NtCreateWaitCompletionPacket, uintptr(unsafe.Pointer(&mp.waitIocpHandle)), GENERIC_ALL, 0)
|
||||
if mp.waitIocpHandle == 0 {
|
||||
print("runtime: NtCreateWaitCompletionPacket failed; errno=", errno, "\n")
|
||||
throw("NtCreateWaitCompletionPacket failed")
|
||||
|
|
@ -864,7 +864,7 @@ func minit() {
|
|||
// Query the true stack base from the OS. Currently we're
|
||||
// running on a small assumed stack.
|
||||
var mbi memoryBasicInformation
|
||||
res := stdcall3(_VirtualQuery, uintptr(unsafe.Pointer(&mbi)), uintptr(unsafe.Pointer(&mbi)), unsafe.Sizeof(mbi))
|
||||
res := stdcall(_VirtualQuery, uintptr(unsafe.Pointer(&mbi)), uintptr(unsafe.Pointer(&mbi)), unsafe.Sizeof(mbi))
|
||||
if res == 0 {
|
||||
print("runtime: VirtualQuery failed; errno=", getlasterror(), "\n")
|
||||
throw("VirtualQuery for stack base failed")
|
||||
|
|
@ -896,7 +896,7 @@ func unminit() {
|
|||
mp := getg().m
|
||||
lock(&mp.threadLock)
|
||||
if mp.thread != 0 {
|
||||
stdcall1(_CloseHandle, mp.thread)
|
||||
stdcall(_CloseHandle, mp.thread)
|
||||
mp.thread = 0
|
||||
}
|
||||
unlock(&mp.threadLock)
|
||||
|
|
@ -913,49 +913,59 @@ func unminit() {
|
|||
//go:nosplit
|
||||
func mdestroy(mp *m) {
|
||||
if mp.highResTimer != 0 {
|
||||
stdcall1(_CloseHandle, mp.highResTimer)
|
||||
stdcall(_CloseHandle, mp.highResTimer)
|
||||
mp.highResTimer = 0
|
||||
}
|
||||
if mp.waitIocpTimer != 0 {
|
||||
stdcall1(_CloseHandle, mp.waitIocpTimer)
|
||||
stdcall(_CloseHandle, mp.waitIocpTimer)
|
||||
mp.waitIocpTimer = 0
|
||||
}
|
||||
if mp.waitIocpHandle != 0 {
|
||||
stdcall1(_CloseHandle, mp.waitIocpHandle)
|
||||
stdcall(_CloseHandle, mp.waitIocpHandle)
|
||||
mp.waitIocpHandle = 0
|
||||
}
|
||||
if mp.waitsema != 0 {
|
||||
stdcall1(_CloseHandle, mp.waitsema)
|
||||
stdcall(_CloseHandle, mp.waitsema)
|
||||
mp.waitsema = 0
|
||||
}
|
||||
if mp.resumesema != 0 {
|
||||
stdcall1(_CloseHandle, mp.resumesema)
|
||||
stdcall(_CloseHandle, mp.resumesema)
|
||||
mp.resumesema = 0
|
||||
}
|
||||
}
|
||||
|
||||
// stdcall_no_g calls asmstdcall on os stack without using g.
|
||||
// stdcall_no_g is like [stdcall] but can be called without a G.
|
||||
//
|
||||
//go:nowritebarrier
|
||||
//go:nosplit
|
||||
func stdcall_no_g(fn stdFunction, n int, args uintptr) uintptr {
|
||||
//go:uintptrkeepalive
|
||||
func stdcall_no_g(fn stdFunction, args ...uintptr) uintptr {
|
||||
call := windows.StdCallInfo{
|
||||
Fn: uintptr(unsafe.Pointer(fn)),
|
||||
N: uintptr(n),
|
||||
Args: args,
|
||||
N: uintptr(len(args)),
|
||||
}
|
||||
if len(args) > 0 {
|
||||
call.Args = uintptr(abi.NoEscape(unsafe.Pointer(&args[0])))
|
||||
}
|
||||
windows.StdCall(&call)
|
||||
return call.R1
|
||||
}
|
||||
|
||||
// Calling stdcall on os stack.
|
||||
// stdcall calls fn with the given arguments using the stdcall calling convention.
|
||||
// Must be called from the system stack.
|
||||
// May run during STW, so write barriers are not allowed.
|
||||
//
|
||||
//go:nowritebarrier
|
||||
//go:nosplit
|
||||
func stdcall(fn stdFunction) uintptr {
|
||||
//go:uintptrkeepalive
|
||||
func stdcall(fn stdFunction, args ...uintptr) uintptr {
|
||||
gp := getg()
|
||||
mp := gp.m
|
||||
mp.stdCallInfo.Fn = uintptr(unsafe.Pointer(fn))
|
||||
mp.stdCallInfo.N = uintptr(len(args))
|
||||
if len(args) > 0 {
|
||||
mp.stdCallInfo.Args = uintptr(abi.NoEscape(unsafe.Pointer(&args[0])))
|
||||
}
|
||||
resetLibcall := false
|
||||
if mp.profilehz != 0 && mp.libcallsp == 0 {
|
||||
// leave pc/sp for cpu profiler
|
||||
|
|
@ -973,105 +983,24 @@ func stdcall(fn stdFunction) uintptr {
|
|||
return mp.stdCallInfo.R1
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func stdcall0(fn stdFunction) uintptr {
|
||||
mp := getg().m
|
||||
mp.stdCallInfo.N = 0
|
||||
mp.stdCallInfo.Args = 0
|
||||
return stdcall(fn)
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
//go:cgo_unsafe_args
|
||||
func stdcall1(fn stdFunction, a0 uintptr) uintptr {
|
||||
mp := getg().m
|
||||
mp.stdCallInfo.N = 1
|
||||
mp.stdCallInfo.Args = uintptr(noescape(unsafe.Pointer(&a0)))
|
||||
return stdcall(fn)
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
//go:cgo_unsafe_args
|
||||
func stdcall2(fn stdFunction, a0, a1 uintptr) uintptr {
|
||||
mp := getg().m
|
||||
mp.stdCallInfo.N = 2
|
||||
mp.stdCallInfo.Args = uintptr(noescape(unsafe.Pointer(&a0)))
|
||||
return stdcall(fn)
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
//go:cgo_unsafe_args
|
||||
func stdcall3(fn stdFunction, a0, a1, a2 uintptr) uintptr {
|
||||
mp := getg().m
|
||||
mp.stdCallInfo.N = 3
|
||||
mp.stdCallInfo.Args = uintptr(noescape(unsafe.Pointer(&a0)))
|
||||
return stdcall(fn)
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
//go:cgo_unsafe_args
|
||||
func stdcall4(fn stdFunction, a0, a1, a2, a3 uintptr) uintptr {
|
||||
mp := getg().m
|
||||
mp.stdCallInfo.N = 4
|
||||
mp.stdCallInfo.Args = uintptr(noescape(unsafe.Pointer(&a0)))
|
||||
return stdcall(fn)
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
//go:cgo_unsafe_args
|
||||
func stdcall5(fn stdFunction, a0, a1, a2, a3, a4 uintptr) uintptr {
|
||||
mp := getg().m
|
||||
mp.stdCallInfo.N = 5
|
||||
mp.stdCallInfo.Args = uintptr(noescape(unsafe.Pointer(&a0)))
|
||||
return stdcall(fn)
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
//go:cgo_unsafe_args
|
||||
func stdcall6(fn stdFunction, a0, a1, a2, a3, a4, a5 uintptr) uintptr {
|
||||
mp := getg().m
|
||||
mp.stdCallInfo.N = 6
|
||||
mp.stdCallInfo.Args = uintptr(noescape(unsafe.Pointer(&a0)))
|
||||
return stdcall(fn)
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
//go:cgo_unsafe_args
|
||||
func stdcall7(fn stdFunction, a0, a1, a2, a3, a4, a5, a6 uintptr) uintptr {
|
||||
mp := getg().m
|
||||
mp.stdCallInfo.N = 7
|
||||
mp.stdCallInfo.Args = uintptr(noescape(unsafe.Pointer(&a0)))
|
||||
return stdcall(fn)
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
//go:cgo_unsafe_args
|
||||
func stdcall8(fn stdFunction, a0, a1, a2, a3, a4, a5, a6, a7 uintptr) uintptr {
|
||||
mp := getg().m
|
||||
mp.stdCallInfo.N = 8
|
||||
mp.stdCallInfo.Args = uintptr(noescape(unsafe.Pointer(&a0)))
|
||||
return stdcall(fn)
|
||||
}
|
||||
|
||||
// These must run on the system stack only.
|
||||
|
||||
//go:nosplit
|
||||
func osyield_no_g() {
|
||||
stdcall_no_g(_SwitchToThread, 0, 0)
|
||||
stdcall_no_g(_SwitchToThread)
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func osyield() {
|
||||
systemstack(func() {
|
||||
stdcall0(_SwitchToThread)
|
||||
stdcall(_SwitchToThread)
|
||||
})
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func usleep_no_g(us uint32) {
|
||||
timeout := uintptr(us) / 1000 // ms units
|
||||
args := [...]uintptr{_INVALID_HANDLE_VALUE, timeout}
|
||||
stdcall_no_g(_WaitForSingleObject, len(args), uintptr(noescape(unsafe.Pointer(&args[0]))))
|
||||
stdcall_no_g(_WaitForSingleObject, _INVALID_HANDLE_VALUE, timeout)
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
|
|
@ -1083,13 +1012,13 @@ func usleep(us uint32) {
|
|||
if haveHighResTimer && getg().m.highResTimer != 0 {
|
||||
h = getg().m.highResTimer
|
||||
dt := -10 * int64(us) // relative sleep (negative), 100ns units
|
||||
stdcall6(_SetWaitableTimer, h, uintptr(unsafe.Pointer(&dt)), 0, 0, 0, 0)
|
||||
stdcall(_SetWaitableTimer, h, uintptr(unsafe.Pointer(&dt)), 0, 0, 0, 0)
|
||||
timeout = _INFINITE
|
||||
} else {
|
||||
h = _INVALID_HANDLE_VALUE
|
||||
timeout = uintptr(us) / 1000 // ms units
|
||||
}
|
||||
stdcall2(_WaitForSingleObject, h, timeout)
|
||||
stdcall(_WaitForSingleObject, h, timeout)
|
||||
})
|
||||
}
|
||||
|
||||
|
|
@ -1130,7 +1059,7 @@ func profilem(mp *m, thread uintptr) {
|
|||
c = (*context)(unsafe.Pointer((uintptr(unsafe.Pointer(&cbuf[15]))) &^ 15))
|
||||
|
||||
c.contextflags = _CONTEXT_CONTROL
|
||||
stdcall2(_GetThreadContext, thread, uintptr(unsafe.Pointer(c)))
|
||||
stdcall(_GetThreadContext, thread, uintptr(unsafe.Pointer(c)))
|
||||
|
||||
gp := gFromSP(mp, c.sp())
|
||||
|
||||
|
|
@ -1151,10 +1080,10 @@ func gFromSP(mp *m, sp uintptr) *g {
|
|||
}
|
||||
|
||||
func profileLoop() {
|
||||
stdcall2(_SetThreadPriority, currentThread, _THREAD_PRIORITY_HIGHEST)
|
||||
stdcall(_SetThreadPriority, currentThread, _THREAD_PRIORITY_HIGHEST)
|
||||
|
||||
for {
|
||||
stdcall2(_WaitForSingleObject, profiletimer, _INFINITE)
|
||||
stdcall(_WaitForSingleObject, profiletimer, _INFINITE)
|
||||
first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
|
||||
for mp := first; mp != nil; mp = mp.alllink {
|
||||
if mp == getg().m {
|
||||
|
|
@ -1172,7 +1101,7 @@ func profileLoop() {
|
|||
}
|
||||
// Acquire our own handle to the thread.
|
||||
var thread uintptr
|
||||
if stdcall7(_DuplicateHandle, currentProcess, mp.thread, currentProcess, uintptr(unsafe.Pointer(&thread)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 {
|
||||
if stdcall(_DuplicateHandle, currentProcess, mp.thread, currentProcess, uintptr(unsafe.Pointer(&thread)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 {
|
||||
print("runtime: duplicatehandle failed; errno=", getlasterror(), "\n")
|
||||
throw("duplicatehandle failed")
|
||||
}
|
||||
|
|
@ -1182,9 +1111,9 @@ func profileLoop() {
|
|||
// above and the SuspendThread. The handle
|
||||
// will remain valid, but SuspendThread may
|
||||
// fail.
|
||||
if int32(stdcall1(_SuspendThread, thread)) == -1 {
|
||||
if int32(stdcall(_SuspendThread, thread)) == -1 {
|
||||
// The thread no longer exists.
|
||||
stdcall1(_CloseHandle, thread)
|
||||
stdcall(_CloseHandle, thread)
|
||||
continue
|
||||
}
|
||||
if mp.profilehz != 0 && !mp.blocked {
|
||||
|
|
@ -1192,8 +1121,8 @@ func profileLoop() {
|
|||
// was in the process of shutting down.
|
||||
profilem(mp, thread)
|
||||
}
|
||||
stdcall1(_ResumeThread, thread)
|
||||
stdcall1(_CloseHandle, thread)
|
||||
stdcall(_ResumeThread, thread)
|
||||
stdcall(_CloseHandle, thread)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1204,7 +1133,7 @@ func setProcessCPUProfiler(hz int32) {
|
|||
if haveHighResTimer {
|
||||
timer = createHighResTimer()
|
||||
} else {
|
||||
timer = stdcall3(_CreateWaitableTimerA, 0, 0, 0)
|
||||
timer = stdcall(_CreateWaitableTimerA, 0, 0, 0)
|
||||
}
|
||||
atomic.Storeuintptr(&profiletimer, timer)
|
||||
newm(profileLoop, nil, -1)
|
||||
|
|
@ -1221,7 +1150,7 @@ func setThreadCPUProfiler(hz int32) {
|
|||
}
|
||||
due = int64(ms) * -10000
|
||||
}
|
||||
stdcall6(_SetWaitableTimer, profiletimer, uintptr(unsafe.Pointer(&due)), uintptr(ms), 0, 0, 0)
|
||||
stdcall(_SetWaitableTimer, profiletimer, uintptr(unsafe.Pointer(&due)), uintptr(ms), 0, 0, 0)
|
||||
atomic.Store((*uint32)(unsafe.Pointer(&getg().m.profilehz)), uint32(hz))
|
||||
}
|
||||
|
||||
|
|
@ -1254,7 +1183,7 @@ func preemptM(mp *m) {
|
|||
return
|
||||
}
|
||||
var thread uintptr
|
||||
if stdcall7(_DuplicateHandle, currentProcess, mp.thread, currentProcess, uintptr(unsafe.Pointer(&thread)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 {
|
||||
if stdcall(_DuplicateHandle, currentProcess, mp.thread, currentProcess, uintptr(unsafe.Pointer(&thread)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 {
|
||||
print("runtime.preemptM: duplicatehandle failed; errno=", getlasterror(), "\n")
|
||||
throw("runtime.preemptM: duplicatehandle failed")
|
||||
}
|
||||
|
|
@ -1274,9 +1203,9 @@ func preemptM(mp *m) {
|
|||
lock(&suspendLock)
|
||||
|
||||
// Suspend the thread.
|
||||
if int32(stdcall1(_SuspendThread, thread)) == -1 {
|
||||
if int32(stdcall(_SuspendThread, thread)) == -1 {
|
||||
unlock(&suspendLock)
|
||||
stdcall1(_CloseHandle, thread)
|
||||
stdcall(_CloseHandle, thread)
|
||||
atomic.Store(&mp.preemptExtLock, 0)
|
||||
// The thread no longer exists. This shouldn't be
|
||||
// possible, but just acknowledge the request.
|
||||
|
|
@ -1293,7 +1222,7 @@ func preemptM(mp *m) {
|
|||
// We have to get the thread context before inspecting the M
|
||||
// because SuspendThread only requests a suspend.
|
||||
// GetThreadContext actually blocks until it's suspended.
|
||||
stdcall2(_GetThreadContext, thread, uintptr(unsafe.Pointer(c)))
|
||||
stdcall(_GetThreadContext, thread, uintptr(unsafe.Pointer(c)))
|
||||
|
||||
unlock(&suspendLock)
|
||||
|
||||
|
|
@ -1304,7 +1233,7 @@ func preemptM(mp *m) {
|
|||
// Inject call to asyncPreempt
|
||||
targetPC := abi.FuncPCABI0(asyncPreempt)
|
||||
c.pushCall(targetPC, resumePC)
|
||||
stdcall2(_SetThreadContext, thread, uintptr(unsafe.Pointer(c)))
|
||||
stdcall(_SetThreadContext, thread, uintptr(unsafe.Pointer(c)))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1313,8 +1242,8 @@ func preemptM(mp *m) {
|
|||
// Acknowledge the preemption.
|
||||
mp.preemptGen.Add(1)
|
||||
|
||||
stdcall1(_ResumeThread, thread)
|
||||
stdcall1(_CloseHandle, thread)
|
||||
stdcall(_ResumeThread, thread)
|
||||
stdcall(_CloseHandle, thread)
|
||||
}
|
||||
|
||||
// osPreemptExtEnter is called before entering external code that may
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import "unsafe"
|
|||
//go:nosplit
|
||||
func cputicks() int64 {
|
||||
var counter int64
|
||||
stdcall1(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&counter)))
|
||||
stdcall(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&counter)))
|
||||
return counter
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -9,6 +9,6 @@ import "unsafe"
|
|||
//go:nosplit
|
||||
func cputicks() int64 {
|
||||
var counter int64
|
||||
stdcall1(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&counter)))
|
||||
stdcall(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&counter)))
|
||||
return counter
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,24 +18,24 @@ const (
|
|||
)
|
||||
|
||||
func preventErrorDialogs() {
|
||||
errormode := stdcall0(_GetErrorMode)
|
||||
stdcall1(_SetErrorMode, errormode|_SEM_FAILCRITICALERRORS|_SEM_NOGPFAULTERRORBOX|_SEM_NOOPENFILEERRORBOX)
|
||||
errormode := stdcall(_GetErrorMode)
|
||||
stdcall(_SetErrorMode, errormode|_SEM_FAILCRITICALERRORS|_SEM_NOGPFAULTERRORBOX|_SEM_NOOPENFILEERRORBOX)
|
||||
|
||||
// Disable WER fault reporting UI.
|
||||
// Do this even if WER is disabled as a whole,
|
||||
// as WER might be enabled later with setTraceback("wer")
|
||||
// and we still want the fault reporting UI to be disabled if this happens.
|
||||
var werflags uintptr
|
||||
stdcall2(_WerGetFlags, currentProcess, uintptr(unsafe.Pointer(&werflags)))
|
||||
stdcall1(_WerSetFlags, werflags|_WER_FAULT_REPORTING_NO_UI)
|
||||
stdcall(_WerGetFlags, currentProcess, uintptr(unsafe.Pointer(&werflags)))
|
||||
stdcall(_WerSetFlags, werflags|_WER_FAULT_REPORTING_NO_UI)
|
||||
}
|
||||
|
||||
// enableWER re-enables Windows error reporting without fault reporting UI.
|
||||
func enableWER() {
|
||||
// re-enable Windows Error Reporting
|
||||
errormode := stdcall0(_GetErrorMode)
|
||||
errormode := stdcall(_GetErrorMode)
|
||||
if errormode&_SEM_NOGPFAULTERRORBOX != 0 {
|
||||
stdcall1(_SetErrorMode, errormode^_SEM_NOGPFAULTERRORBOX)
|
||||
stdcall(_SetErrorMode, errormode^_SEM_NOGPFAULTERRORBOX)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -47,14 +47,14 @@ func sehtramp()
|
|||
func sigresume()
|
||||
|
||||
func initExceptionHandler() {
|
||||
stdcall2(_AddVectoredExceptionHandler, 1, abi.FuncPCABI0(exceptiontramp))
|
||||
stdcall(_AddVectoredExceptionHandler, 1, abi.FuncPCABI0(exceptiontramp))
|
||||
if GOARCH == "386" {
|
||||
// use SetUnhandledExceptionFilter for windows-386.
|
||||
// note: SetUnhandledExceptionFilter handler won't be called, if debugging.
|
||||
stdcall1(_SetUnhandledExceptionFilter, abi.FuncPCABI0(lastcontinuetramp))
|
||||
stdcall(_SetUnhandledExceptionFilter, abi.FuncPCABI0(lastcontinuetramp))
|
||||
} else {
|
||||
stdcall2(_AddVectoredContinueHandler, 1, abi.FuncPCABI0(firstcontinuetramp))
|
||||
stdcall2(_AddVectoredContinueHandler, 0, abi.FuncPCABI0(lastcontinuetramp))
|
||||
stdcall(_AddVectoredContinueHandler, 1, abi.FuncPCABI0(firstcontinuetramp))
|
||||
stdcall(_AddVectoredContinueHandler, 0, abi.FuncPCABI0(lastcontinuetramp))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -279,11 +279,11 @@ func sehhandler(_ *exceptionrecord, _ uint64, _ *context, dctxt *_DISPATCHER_CON
|
|||
ctxt := dctxt.ctx()
|
||||
var base, sp uintptr
|
||||
for {
|
||||
entry := stdcall3(_RtlLookupFunctionEntry, ctxt.ip(), uintptr(unsafe.Pointer(&base)), 0)
|
||||
entry := stdcall(_RtlLookupFunctionEntry, ctxt.ip(), uintptr(unsafe.Pointer(&base)), 0)
|
||||
if entry == 0 {
|
||||
break
|
||||
}
|
||||
stdcall8(_RtlVirtualUnwind, 0, base, ctxt.ip(), entry, uintptr(unsafe.Pointer(ctxt)), 0, uintptr(unsafe.Pointer(&sp)), 0)
|
||||
stdcall(_RtlVirtualUnwind, 0, base, ctxt.ip(), entry, uintptr(unsafe.Pointer(ctxt)), 0, uintptr(unsafe.Pointer(&sp)), 0)
|
||||
if sp < gp.stack.lo || gp.stack.hi <= sp {
|
||||
break
|
||||
}
|
||||
|
|
@ -467,7 +467,7 @@ func dieFromException(info *exceptionrecord, r *context) {
|
|||
}
|
||||
}
|
||||
const FAIL_FAST_GENERATE_EXCEPTION_ADDRESS = 0x1
|
||||
stdcall3(_RaiseFailFastException, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(r)), FAIL_FAST_GENERATE_EXCEPTION_ADDRESS)
|
||||
stdcall(_RaiseFailFastException, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(r)), FAIL_FAST_GENERATE_EXCEPTION_ADDRESS)
|
||||
}
|
||||
|
||||
// gsignalStack is unused on Windows.
|
||||
|
|
|
|||
|
|
@ -412,6 +412,11 @@ func callbackWrap(a *callbackArgs) {
|
|||
}
|
||||
}
|
||||
|
||||
// syscall_syscalln calls fn with args[:n].
|
||||
// It is used to implement [syscall.SyscallN].
|
||||
// It shouldn't be used in the runtime package,
|
||||
// use [stdcall] instead.
|
||||
//
|
||||
//go:linkname syscall_syscalln syscall.syscalln
|
||||
//go:nosplit
|
||||
//go:uintptrkeepalive
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue