mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime: deduplicate Windows stdcall
There is no need to have a dedicated stdcall variant for each number of arguments. Instead, we can use a variadic function that accepts any number of arguments and handles them uniformly. While here, improve documentation of syscall_syscalln to make it clear that it should not be used within the runtime package. Change-Id: I022afc7f28d969fd7307bb2b1f4594246ac38d18 Reviewed-on: https://go-review.googlesource.com/c/go/+/691215 LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Michael Pratt <mpratt@google.com> Reviewed-by: Mark Freeman <mark@golang.org>
This commit is contained in:
parent
ef40549786
commit
e666972a67
8 changed files with 120 additions and 186 deletions
|
|
@ -18,7 +18,7 @@ var (
|
||||||
|
|
||||||
func NumberOfProcessors() int32 {
|
func NumberOfProcessors() int32 {
|
||||||
var info systeminfo
|
var info systeminfo
|
||||||
stdcall1(_GetSystemInfo, uintptr(unsafe.Pointer(&info)))
|
stdcall(_GetSystemInfo, uintptr(unsafe.Pointer(&info)))
|
||||||
return int32(info.dwnumberofprocessors)
|
return int32(info.dwnumberofprocessors)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -26,11 +26,11 @@ const (
|
||||||
//
|
//
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
func sysAllocOS(n uintptr, _ string) unsafe.Pointer {
|
func sysAllocOS(n uintptr, _ string) unsafe.Pointer {
|
||||||
return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_COMMIT|_MEM_RESERVE, _PAGE_READWRITE))
|
return unsafe.Pointer(stdcall(_VirtualAlloc, 0, n, _MEM_COMMIT|_MEM_RESERVE, _PAGE_READWRITE))
|
||||||
}
|
}
|
||||||
|
|
||||||
func sysUnusedOS(v unsafe.Pointer, n uintptr) {
|
func sysUnusedOS(v unsafe.Pointer, n uintptr) {
|
||||||
r := stdcall3(_VirtualFree, uintptr(v), n, _MEM_DECOMMIT)
|
r := stdcall(_VirtualFree, uintptr(v), n, _MEM_DECOMMIT)
|
||||||
if r != 0 {
|
if r != 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -46,7 +46,7 @@ func sysUnusedOS(v unsafe.Pointer, n uintptr) {
|
||||||
// in the worst case, but that's fast enough.
|
// in the worst case, but that's fast enough.
|
||||||
for n > 0 {
|
for n > 0 {
|
||||||
small := n
|
small := n
|
||||||
for small >= 4096 && stdcall3(_VirtualFree, uintptr(v), small, _MEM_DECOMMIT) == 0 {
|
for small >= 4096 && stdcall(_VirtualFree, uintptr(v), small, _MEM_DECOMMIT) == 0 {
|
||||||
small /= 2
|
small /= 2
|
||||||
small &^= 4096 - 1
|
small &^= 4096 - 1
|
||||||
}
|
}
|
||||||
|
|
@ -60,7 +60,7 @@ func sysUnusedOS(v unsafe.Pointer, n uintptr) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func sysUsedOS(v unsafe.Pointer, n uintptr) {
|
func sysUsedOS(v unsafe.Pointer, n uintptr) {
|
||||||
p := stdcall4(_VirtualAlloc, uintptr(v), n, _MEM_COMMIT, _PAGE_READWRITE)
|
p := stdcall(_VirtualAlloc, uintptr(v), n, _MEM_COMMIT, _PAGE_READWRITE)
|
||||||
if p == uintptr(v) {
|
if p == uintptr(v) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -71,7 +71,7 @@ func sysUsedOS(v unsafe.Pointer, n uintptr) {
|
||||||
k := n
|
k := n
|
||||||
for k > 0 {
|
for k > 0 {
|
||||||
small := k
|
small := k
|
||||||
for small >= 4096 && stdcall4(_VirtualAlloc, uintptr(v), small, _MEM_COMMIT, _PAGE_READWRITE) == 0 {
|
for small >= 4096 && stdcall(_VirtualAlloc, uintptr(v), small, _MEM_COMMIT, _PAGE_READWRITE) == 0 {
|
||||||
small /= 2
|
small /= 2
|
||||||
small &^= 4096 - 1
|
small &^= 4096 - 1
|
||||||
}
|
}
|
||||||
|
|
@ -105,7 +105,7 @@ func sysHugePageCollapseOS(v unsafe.Pointer, n uintptr) {
|
||||||
//
|
//
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
func sysFreeOS(v unsafe.Pointer, n uintptr) {
|
func sysFreeOS(v unsafe.Pointer, n uintptr) {
|
||||||
r := stdcall3(_VirtualFree, uintptr(v), 0, _MEM_RELEASE)
|
r := stdcall(_VirtualFree, uintptr(v), 0, _MEM_RELEASE)
|
||||||
if r == 0 {
|
if r == 0 {
|
||||||
print("runtime: VirtualFree of ", n, " bytes failed with errno=", getlasterror(), "\n")
|
print("runtime: VirtualFree of ", n, " bytes failed with errno=", getlasterror(), "\n")
|
||||||
throw("runtime: failed to release pages")
|
throw("runtime: failed to release pages")
|
||||||
|
|
@ -121,13 +121,13 @@ func sysReserveOS(v unsafe.Pointer, n uintptr, _ string) unsafe.Pointer {
|
||||||
// v is just a hint.
|
// v is just a hint.
|
||||||
// First try at v.
|
// First try at v.
|
||||||
// This will fail if any of [v, v+n) is already reserved.
|
// This will fail if any of [v, v+n) is already reserved.
|
||||||
v = unsafe.Pointer(stdcall4(_VirtualAlloc, uintptr(v), n, _MEM_RESERVE, _PAGE_READWRITE))
|
v = unsafe.Pointer(stdcall(_VirtualAlloc, uintptr(v), n, _MEM_RESERVE, _PAGE_READWRITE))
|
||||||
if v != nil {
|
if v != nil {
|
||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
|
||||||
// Next let the kernel choose the address.
|
// Next let the kernel choose the address.
|
||||||
return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_RESERVE, _PAGE_READWRITE))
|
return unsafe.Pointer(stdcall(_VirtualAlloc, 0, n, _MEM_RESERVE, _PAGE_READWRITE))
|
||||||
}
|
}
|
||||||
|
|
||||||
func sysMapOS(v unsafe.Pointer, n uintptr, _ string) {
|
func sysMapOS(v unsafe.Pointer, n uintptr, _ string) {
|
||||||
|
|
|
||||||
|
|
@ -102,7 +102,7 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
func netpollinit() {
|
func netpollinit() {
|
||||||
iocphandle = stdcall4(_CreateIoCompletionPort, _INVALID_HANDLE_VALUE, 0, 0, _DWORD_MAX)
|
iocphandle = stdcall(_CreateIoCompletionPort, _INVALID_HANDLE_VALUE, 0, 0, _DWORD_MAX)
|
||||||
if iocphandle == 0 {
|
if iocphandle == 0 {
|
||||||
println("runtime: CreateIoCompletionPort failed (errno=", getlasterror(), ")")
|
println("runtime: CreateIoCompletionPort failed (errno=", getlasterror(), ")")
|
||||||
throw("runtime: netpollinit failed")
|
throw("runtime: netpollinit failed")
|
||||||
|
|
@ -115,7 +115,7 @@ func netpollIsPollDescriptor(fd uintptr) bool {
|
||||||
|
|
||||||
func netpollopen(fd uintptr, pd *pollDesc) int32 {
|
func netpollopen(fd uintptr, pd *pollDesc) int32 {
|
||||||
key := packNetpollKey(netpollSourceReady, pd)
|
key := packNetpollKey(netpollSourceReady, pd)
|
||||||
if stdcall4(_CreateIoCompletionPort, fd, iocphandle, key, 0) == 0 {
|
if stdcall(_CreateIoCompletionPort, fd, iocphandle, key, 0) == 0 {
|
||||||
return int32(getlasterror())
|
return int32(getlasterror())
|
||||||
}
|
}
|
||||||
return 0
|
return 0
|
||||||
|
|
@ -137,7 +137,7 @@ func netpollBreak() {
|
||||||
}
|
}
|
||||||
|
|
||||||
key := packNetpollKey(netpollSourceBreak, nil)
|
key := packNetpollKey(netpollSourceBreak, nil)
|
||||||
if stdcall4(_PostQueuedCompletionStatus, iocphandle, 0, key, 0) == 0 {
|
if stdcall(_PostQueuedCompletionStatus, iocphandle, 0, key, 0) == 0 {
|
||||||
println("runtime: netpoll: PostQueuedCompletionStatus failed (errno=", getlasterror(), ")")
|
println("runtime: netpoll: PostQueuedCompletionStatus failed (errno=", getlasterror(), ")")
|
||||||
throw("runtime: netpoll: PostQueuedCompletionStatus failed")
|
throw("runtime: netpoll: PostQueuedCompletionStatus failed")
|
||||||
}
|
}
|
||||||
|
|
@ -197,7 +197,7 @@ func netpoll(delay int64) (gList, int32) {
|
||||||
if delay != 0 {
|
if delay != 0 {
|
||||||
mp.blocked = true
|
mp.blocked = true
|
||||||
}
|
}
|
||||||
if stdcall6(_GetQueuedCompletionStatusEx, iocphandle, uintptr(unsafe.Pointer(&entries[0])), uintptr(n), uintptr(unsafe.Pointer(&n)), uintptr(wait), 0) == 0 {
|
if stdcall(_GetQueuedCompletionStatusEx, iocphandle, uintptr(unsafe.Pointer(&entries[0])), uintptr(n), uintptr(unsafe.Pointer(&n)), uintptr(wait), 0) == 0 {
|
||||||
mp.blocked = false
|
mp.blocked = false
|
||||||
errno := getlasterror()
|
errno := getlasterror()
|
||||||
if errno == _WAIT_TIMEOUT {
|
if errno == _WAIT_TIMEOUT {
|
||||||
|
|
@ -256,7 +256,7 @@ func netpollQueueTimer(delay int64) (signaled bool) {
|
||||||
// such as a netpollBreak, so we can get to this point with a timer that hasn't
|
// such as a netpollBreak, so we can get to this point with a timer that hasn't
|
||||||
// expired yet. In this case, the completion packet can still be picked up by
|
// expired yet. In this case, the completion packet can still be picked up by
|
||||||
// another thread, so defer the cancellation until it is really necessary.
|
// another thread, so defer the cancellation until it is really necessary.
|
||||||
errno := stdcall2(_NtCancelWaitCompletionPacket, mp.waitIocpHandle, 1)
|
errno := stdcall(_NtCancelWaitCompletionPacket, mp.waitIocpHandle, 1)
|
||||||
switch errno {
|
switch errno {
|
||||||
case STATUS_CANCELLED:
|
case STATUS_CANCELLED:
|
||||||
// STATUS_CANCELLED is returned when the associated timer has already expired,
|
// STATUS_CANCELLED is returned when the associated timer has already expired,
|
||||||
|
|
@ -264,12 +264,12 @@ func netpollQueueTimer(delay int64) (signaled bool) {
|
||||||
fallthrough
|
fallthrough
|
||||||
case STATUS_SUCCESS:
|
case STATUS_SUCCESS:
|
||||||
dt := -delay / 100 // relative sleep (negative), 100ns units
|
dt := -delay / 100 // relative sleep (negative), 100ns units
|
||||||
if stdcall6(_SetWaitableTimer, mp.waitIocpTimer, uintptr(unsafe.Pointer(&dt)), 0, 0, 0, 0) == 0 {
|
if stdcall(_SetWaitableTimer, mp.waitIocpTimer, uintptr(unsafe.Pointer(&dt)), 0, 0, 0, 0) == 0 {
|
||||||
println("runtime: SetWaitableTimer failed; errno=", getlasterror())
|
println("runtime: SetWaitableTimer failed; errno=", getlasterror())
|
||||||
throw("runtime: netpoll failed")
|
throw("runtime: netpoll failed")
|
||||||
}
|
}
|
||||||
key := packNetpollKey(netpollSourceTimer, nil)
|
key := packNetpollKey(netpollSourceTimer, nil)
|
||||||
if errno := stdcall8(_NtAssociateWaitCompletionPacket, mp.waitIocpHandle, iocphandle, mp.waitIocpTimer, key, 0, 0, 0, uintptr(unsafe.Pointer(&signaled))); errno != 0 {
|
if errno := stdcall(_NtAssociateWaitCompletionPacket, mp.waitIocpHandle, iocphandle, mp.waitIocpTimer, key, 0, 0, 0, uintptr(unsafe.Pointer(&signaled))); errno != 0 {
|
||||||
println("runtime: NtAssociateWaitCompletionPacket failed; errno=", errno)
|
println("runtime: NtAssociateWaitCompletionPacket failed; errno=", errno)
|
||||||
throw("runtime: netpoll failed")
|
throw("runtime: netpoll failed")
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -220,7 +220,7 @@ func windowsFindfunc(lib uintptr, name []byte) stdFunction {
|
||||||
if name[len(name)-1] != 0 {
|
if name[len(name)-1] != 0 {
|
||||||
throw("usage")
|
throw("usage")
|
||||||
}
|
}
|
||||||
f := stdcall2(_GetProcAddress, lib, uintptr(unsafe.Pointer(&name[0])))
|
f := stdcall(_GetProcAddress, lib, uintptr(unsafe.Pointer(&name[0])))
|
||||||
return stdFunction(unsafe.Pointer(f))
|
return stdFunction(unsafe.Pointer(f))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -229,7 +229,7 @@ var sysDirectory [_MAX_PATH + 1]byte
|
||||||
var sysDirectoryLen uintptr
|
var sysDirectoryLen uintptr
|
||||||
|
|
||||||
func initSysDirectory() {
|
func initSysDirectory() {
|
||||||
l := stdcall2(_GetSystemDirectoryA, uintptr(unsafe.Pointer(&sysDirectory[0])), uintptr(len(sysDirectory)-1))
|
l := stdcall(_GetSystemDirectoryA, uintptr(unsafe.Pointer(&sysDirectory[0])), uintptr(len(sysDirectory)-1))
|
||||||
if l == 0 || l > uintptr(len(sysDirectory)-1) {
|
if l == 0 || l > uintptr(len(sysDirectory)-1) {
|
||||||
throw("Unable to determine system directory")
|
throw("Unable to determine system directory")
|
||||||
}
|
}
|
||||||
|
|
@ -244,20 +244,20 @@ func windows_GetSystemDirectory() string {
|
||||||
|
|
||||||
func windowsLoadSystemLib(name []uint16) uintptr {
|
func windowsLoadSystemLib(name []uint16) uintptr {
|
||||||
const _LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x00000800
|
const _LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x00000800
|
||||||
return stdcall3(_LoadLibraryExW, uintptr(unsafe.Pointer(&name[0])), 0, _LOAD_LIBRARY_SEARCH_SYSTEM32)
|
return stdcall(_LoadLibraryExW, uintptr(unsafe.Pointer(&name[0])), 0, _LOAD_LIBRARY_SEARCH_SYSTEM32)
|
||||||
}
|
}
|
||||||
|
|
||||||
//go:linkname windows_QueryPerformanceCounter internal/syscall/windows.QueryPerformanceCounter
|
//go:linkname windows_QueryPerformanceCounter internal/syscall/windows.QueryPerformanceCounter
|
||||||
func windows_QueryPerformanceCounter() int64 {
|
func windows_QueryPerformanceCounter() int64 {
|
||||||
var counter int64
|
var counter int64
|
||||||
stdcall1(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&counter)))
|
stdcall(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&counter)))
|
||||||
return counter
|
return counter
|
||||||
}
|
}
|
||||||
|
|
||||||
//go:linkname windows_QueryPerformanceFrequency internal/syscall/windows.QueryPerformanceFrequency
|
//go:linkname windows_QueryPerformanceFrequency internal/syscall/windows.QueryPerformanceFrequency
|
||||||
func windows_QueryPerformanceFrequency() int64 {
|
func windows_QueryPerformanceFrequency() int64 {
|
||||||
var frequency int64
|
var frequency int64
|
||||||
stdcall1(_QueryPerformanceFrequency, uintptr(unsafe.Pointer(&frequency)))
|
stdcall(_QueryPerformanceFrequency, uintptr(unsafe.Pointer(&frequency)))
|
||||||
return frequency
|
return frequency
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -308,7 +308,7 @@ func monitorSuspendResume() {
|
||||||
var fn any = func(context uintptr, changeType uint32, setting uintptr) uintptr {
|
var fn any = func(context uintptr, changeType uint32, setting uintptr) uintptr {
|
||||||
for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink {
|
for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink {
|
||||||
if mp.resumesema != 0 {
|
if mp.resumesema != 0 {
|
||||||
stdcall1(_SetEvent, mp.resumesema)
|
stdcall(_SetEvent, mp.resumesema)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0
|
return 0
|
||||||
|
|
@ -317,13 +317,13 @@ func monitorSuspendResume() {
|
||||||
callback: compileCallback(*efaceOf(&fn), true),
|
callback: compileCallback(*efaceOf(&fn), true),
|
||||||
}
|
}
|
||||||
handle := uintptr(0)
|
handle := uintptr(0)
|
||||||
stdcall3(powerRegisterSuspendResumeNotification, _DEVICE_NOTIFY_CALLBACK,
|
stdcall(powerRegisterSuspendResumeNotification, _DEVICE_NOTIFY_CALLBACK,
|
||||||
uintptr(unsafe.Pointer(¶ms)), uintptr(unsafe.Pointer(&handle)))
|
uintptr(unsafe.Pointer(¶ms)), uintptr(unsafe.Pointer(&handle)))
|
||||||
}
|
}
|
||||||
|
|
||||||
func getCPUCount() int32 {
|
func getCPUCount() int32 {
|
||||||
var mask, sysmask uintptr
|
var mask, sysmask uintptr
|
||||||
ret := stdcall3(_GetProcessAffinityMask, currentProcess, uintptr(unsafe.Pointer(&mask)), uintptr(unsafe.Pointer(&sysmask)))
|
ret := stdcall(_GetProcessAffinityMask, currentProcess, uintptr(unsafe.Pointer(&mask)), uintptr(unsafe.Pointer(&sysmask)))
|
||||||
if ret != 0 {
|
if ret != 0 {
|
||||||
n := 0
|
n := 0
|
||||||
maskbits := int(unsafe.Sizeof(mask) * 8)
|
maskbits := int(unsafe.Sizeof(mask) * 8)
|
||||||
|
|
@ -338,13 +338,13 @@ func getCPUCount() int32 {
|
||||||
}
|
}
|
||||||
// use GetSystemInfo if GetProcessAffinityMask fails
|
// use GetSystemInfo if GetProcessAffinityMask fails
|
||||||
var info systeminfo
|
var info systeminfo
|
||||||
stdcall1(_GetSystemInfo, uintptr(unsafe.Pointer(&info)))
|
stdcall(_GetSystemInfo, uintptr(unsafe.Pointer(&info)))
|
||||||
return int32(info.dwnumberofprocessors)
|
return int32(info.dwnumberofprocessors)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getPageSize() uintptr {
|
func getPageSize() uintptr {
|
||||||
var info systeminfo
|
var info systeminfo
|
||||||
stdcall1(_GetSystemInfo, uintptr(unsafe.Pointer(&info)))
|
stdcall(_GetSystemInfo, uintptr(unsafe.Pointer(&info)))
|
||||||
return uintptr(info.dwpagesize)
|
return uintptr(info.dwpagesize)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -383,9 +383,9 @@ func osRelax(relax bool) uint32 {
|
||||||
}
|
}
|
||||||
|
|
||||||
if relax {
|
if relax {
|
||||||
return uint32(stdcall1(_timeEndPeriod, 1))
|
return uint32(stdcall(_timeEndPeriod, 1))
|
||||||
} else {
|
} else {
|
||||||
return uint32(stdcall1(_timeBeginPeriod, 1))
|
return uint32(stdcall(_timeBeginPeriod, 1))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -414,7 +414,7 @@ func createHighResTimer() uintptr {
|
||||||
_TIMER_QUERY_STATE = 0x0001
|
_TIMER_QUERY_STATE = 0x0001
|
||||||
_TIMER_MODIFY_STATE = 0x0002
|
_TIMER_MODIFY_STATE = 0x0002
|
||||||
)
|
)
|
||||||
return stdcall4(_CreateWaitableTimerExW, 0, 0,
|
return stdcall(_CreateWaitableTimerExW, 0, 0,
|
||||||
_CREATE_WAITABLE_TIMER_HIGH_RESOLUTION,
|
_CREATE_WAITABLE_TIMER_HIGH_RESOLUTION,
|
||||||
_SYNCHRONIZE|_TIMER_QUERY_STATE|_TIMER_MODIFY_STATE)
|
_SYNCHRONIZE|_TIMER_QUERY_STATE|_TIMER_MODIFY_STATE)
|
||||||
}
|
}
|
||||||
|
|
@ -424,7 +424,7 @@ func initHighResTimer() {
|
||||||
if h != 0 {
|
if h != 0 {
|
||||||
haveHighResTimer = true
|
haveHighResTimer = true
|
||||||
haveHighResSleep = _NtCreateWaitCompletionPacket != nil
|
haveHighResSleep = _NtCreateWaitCompletionPacket != nil
|
||||||
stdcall1(_CloseHandle, h)
|
stdcall(_CloseHandle, h)
|
||||||
} else {
|
} else {
|
||||||
// Only load winmm.dll if we need it.
|
// Only load winmm.dll if we need it.
|
||||||
// This avoids a dependency on winmm.dll for Go programs
|
// This avoids a dependency on winmm.dll for Go programs
|
||||||
|
|
@ -456,7 +456,7 @@ func initLongPathSupport() {
|
||||||
// Check that we're ≥ 10.0.15063.
|
// Check that we're ≥ 10.0.15063.
|
||||||
info := _OSVERSIONINFOW{}
|
info := _OSVERSIONINFOW{}
|
||||||
info.osVersionInfoSize = uint32(unsafe.Sizeof(info))
|
info.osVersionInfoSize = uint32(unsafe.Sizeof(info))
|
||||||
stdcall1(_RtlGetVersion, uintptr(unsafe.Pointer(&info)))
|
stdcall(_RtlGetVersion, uintptr(unsafe.Pointer(&info)))
|
||||||
if info.majorVersion < 10 || (info.majorVersion == 10 && info.minorVersion == 0 && info.buildNumber < 15063) {
|
if info.majorVersion < 10 || (info.majorVersion == 10 && info.minorVersion == 0 && info.buildNumber < 15063) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -464,7 +464,7 @@ func initLongPathSupport() {
|
||||||
// Set the IsLongPathAwareProcess flag of the PEB's bit field.
|
// Set the IsLongPathAwareProcess flag of the PEB's bit field.
|
||||||
// This flag is not documented, but it's known to be used
|
// This flag is not documented, but it's known to be used
|
||||||
// by Windows to enable long path support.
|
// by Windows to enable long path support.
|
||||||
bitField := (*byte)(unsafe.Pointer(stdcall0(_RtlGetCurrentPeb) + PebBitFieldOffset))
|
bitField := (*byte)(unsafe.Pointer(stdcall(_RtlGetCurrentPeb) + PebBitFieldOffset))
|
||||||
*bitField |= IsLongPathAwareProcess
|
*bitField |= IsLongPathAwareProcess
|
||||||
|
|
||||||
canUseLongPaths = true
|
canUseLongPaths = true
|
||||||
|
|
@ -493,13 +493,13 @@ func osinit() {
|
||||||
// of dedicated threads -- GUI, IO, computational, etc. Go processes use
|
// of dedicated threads -- GUI, IO, computational, etc. Go processes use
|
||||||
// equivalent threads that all do a mix of GUI, IO, computations, etc.
|
// equivalent threads that all do a mix of GUI, IO, computations, etc.
|
||||||
// In such context dynamic priority boosting does nothing but harm, so we turn it off.
|
// In such context dynamic priority boosting does nothing but harm, so we turn it off.
|
||||||
stdcall2(_SetProcessPriorityBoost, currentProcess, 1)
|
stdcall(_SetProcessPriorityBoost, currentProcess, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
func readRandom(r []byte) int {
|
func readRandom(r []byte) int {
|
||||||
n := 0
|
n := 0
|
||||||
if stdcall2(_ProcessPrng, uintptr(unsafe.Pointer(&r[0])), uintptr(len(r)))&0xff != 0 {
|
if stdcall(_ProcessPrng, uintptr(unsafe.Pointer(&r[0])), uintptr(len(r)))&0xff != 0 {
|
||||||
n = len(r)
|
n = len(r)
|
||||||
}
|
}
|
||||||
return n
|
return n
|
||||||
|
|
@ -509,7 +509,7 @@ func goenvs() {
|
||||||
// strings is a pointer to environment variable pairs in the form:
|
// strings is a pointer to environment variable pairs in the form:
|
||||||
// "envA=valA\x00envB=valB\x00\x00" (in UTF-16)
|
// "envA=valA\x00envB=valB\x00\x00" (in UTF-16)
|
||||||
// Two consecutive zero bytes end the list.
|
// Two consecutive zero bytes end the list.
|
||||||
strings := unsafe.Pointer(stdcall0(_GetEnvironmentStringsW))
|
strings := unsafe.Pointer(stdcall(_GetEnvironmentStringsW))
|
||||||
p := (*[1 << 24]uint16)(strings)[:]
|
p := (*[1 << 24]uint16)(strings)[:]
|
||||||
|
|
||||||
n := 0
|
n := 0
|
||||||
|
|
@ -533,13 +533,13 @@ func goenvs() {
|
||||||
p = p[1:] // skip nil byte
|
p = p[1:] // skip nil byte
|
||||||
}
|
}
|
||||||
|
|
||||||
stdcall1(_FreeEnvironmentStringsW, uintptr(strings))
|
stdcall(_FreeEnvironmentStringsW, uintptr(strings))
|
||||||
|
|
||||||
// We call these all the way here, late in init, so that malloc works
|
// We call these all the way here, late in init, so that malloc works
|
||||||
// for the callback functions these generate.
|
// for the callback functions these generate.
|
||||||
var fn any = ctrlHandler
|
var fn any = ctrlHandler
|
||||||
ctrlHandlerPC := compileCallback(*efaceOf(&fn), true)
|
ctrlHandlerPC := compileCallback(*efaceOf(&fn), true)
|
||||||
stdcall2(_SetConsoleCtrlHandler, ctrlHandlerPC, 1)
|
stdcall(_SetConsoleCtrlHandler, ctrlHandlerPC, 1)
|
||||||
|
|
||||||
monitorSuspendResume()
|
monitorSuspendResume()
|
||||||
}
|
}
|
||||||
|
|
@ -555,7 +555,7 @@ func exit(code int32) {
|
||||||
// kills the suspending thread, and then this thread suspends.
|
// kills the suspending thread, and then this thread suspends.
|
||||||
lock(&suspendLock)
|
lock(&suspendLock)
|
||||||
atomic.Store(&exiting, 1)
|
atomic.Store(&exiting, 1)
|
||||||
stdcall1(_ExitProcess, uintptr(code))
|
stdcall(_ExitProcess, uintptr(code))
|
||||||
}
|
}
|
||||||
|
|
||||||
// write1 must be nosplit because it's used as a last resort in
|
// write1 must be nosplit because it's used as a last resort in
|
||||||
|
|
@ -571,9 +571,9 @@ func write1(fd uintptr, buf unsafe.Pointer, n int32) int32 {
|
||||||
var handle uintptr
|
var handle uintptr
|
||||||
switch fd {
|
switch fd {
|
||||||
case 1:
|
case 1:
|
||||||
handle = stdcall1(_GetStdHandle, _STD_OUTPUT_HANDLE)
|
handle = stdcall(_GetStdHandle, _STD_OUTPUT_HANDLE)
|
||||||
case 2:
|
case 2:
|
||||||
handle = stdcall1(_GetStdHandle, _STD_ERROR_HANDLE)
|
handle = stdcall(_GetStdHandle, _STD_ERROR_HANDLE)
|
||||||
default:
|
default:
|
||||||
// assume fd is real windows handle.
|
// assume fd is real windows handle.
|
||||||
handle = fd
|
handle = fd
|
||||||
|
|
@ -589,7 +589,7 @@ func write1(fd uintptr, buf unsafe.Pointer, n int32) int32 {
|
||||||
|
|
||||||
if !isASCII {
|
if !isASCII {
|
||||||
var m uint32
|
var m uint32
|
||||||
isConsole := stdcall2(_GetConsoleMode, handle, uintptr(unsafe.Pointer(&m))) != 0
|
isConsole := stdcall(_GetConsoleMode, handle, uintptr(unsafe.Pointer(&m))) != 0
|
||||||
// If this is a console output, various non-unicode code pages can be in use.
|
// If this is a console output, various non-unicode code pages can be in use.
|
||||||
// Use the dedicated WriteConsole call to ensure unicode is printed correctly.
|
// Use the dedicated WriteConsole call to ensure unicode is printed correctly.
|
||||||
if isConsole {
|
if isConsole {
|
||||||
|
|
@ -597,7 +597,7 @@ func write1(fd uintptr, buf unsafe.Pointer, n int32) int32 {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var written uint32
|
var written uint32
|
||||||
stdcall5(_WriteFile, handle, uintptr(buf), uintptr(n), uintptr(unsafe.Pointer(&written)), 0)
|
stdcall(_WriteFile, handle, uintptr(buf), uintptr(n), uintptr(unsafe.Pointer(&written)), 0)
|
||||||
return int32(written)
|
return int32(written)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -650,7 +650,7 @@ func writeConsoleUTF16(handle uintptr, b []uint16) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
var written uint32
|
var written uint32
|
||||||
stdcall5(_WriteConsoleW,
|
stdcall(_WriteConsoleW,
|
||||||
handle,
|
handle,
|
||||||
uintptr(unsafe.Pointer(&b[0])),
|
uintptr(unsafe.Pointer(&b[0])),
|
||||||
uintptr(l),
|
uintptr(l),
|
||||||
|
|
@ -671,7 +671,7 @@ func semasleep(ns int64) int32 {
|
||||||
|
|
||||||
var result uintptr
|
var result uintptr
|
||||||
if ns < 0 {
|
if ns < 0 {
|
||||||
result = stdcall2(_WaitForSingleObject, getg().m.waitsema, uintptr(_INFINITE))
|
result = stdcall(_WaitForSingleObject, getg().m.waitsema, uintptr(_INFINITE))
|
||||||
} else {
|
} else {
|
||||||
start := nanotime()
|
start := nanotime()
|
||||||
elapsed := int64(0)
|
elapsed := int64(0)
|
||||||
|
|
@ -680,7 +680,7 @@ func semasleep(ns int64) int32 {
|
||||||
if ms == 0 {
|
if ms == 0 {
|
||||||
ms = 1
|
ms = 1
|
||||||
}
|
}
|
||||||
result = stdcall4(_WaitForMultipleObjects, 2,
|
result = stdcall(_WaitForMultipleObjects, 2,
|
||||||
uintptr(unsafe.Pointer(&[2]uintptr{getg().m.waitsema, getg().m.resumesema})),
|
uintptr(unsafe.Pointer(&[2]uintptr{getg().m.waitsema, getg().m.resumesema})),
|
||||||
0, uintptr(ms))
|
0, uintptr(ms))
|
||||||
if result != _WAIT_OBJECT_0+1 {
|
if result != _WAIT_OBJECT_0+1 {
|
||||||
|
|
@ -723,7 +723,7 @@ func semasleep(ns int64) int32 {
|
||||||
|
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
func semawakeup(mp *m) {
|
func semawakeup(mp *m) {
|
||||||
if stdcall1(_SetEvent, mp.waitsema) == 0 {
|
if stdcall(_SetEvent, mp.waitsema) == 0 {
|
||||||
systemstack(func() {
|
systemstack(func() {
|
||||||
print("runtime: setevent failed; errno=", getlasterror(), "\n")
|
print("runtime: setevent failed; errno=", getlasterror(), "\n")
|
||||||
throw("runtime.semawakeup")
|
throw("runtime.semawakeup")
|
||||||
|
|
@ -736,20 +736,20 @@ func semacreate(mp *m) {
|
||||||
if mp.waitsema != 0 {
|
if mp.waitsema != 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
mp.waitsema = stdcall4(_CreateEventA, 0, 0, 0, 0)
|
mp.waitsema = stdcall(_CreateEventA, 0, 0, 0, 0)
|
||||||
if mp.waitsema == 0 {
|
if mp.waitsema == 0 {
|
||||||
systemstack(func() {
|
systemstack(func() {
|
||||||
print("runtime: createevent failed; errno=", getlasterror(), "\n")
|
print("runtime: createevent failed; errno=", getlasterror(), "\n")
|
||||||
throw("runtime.semacreate")
|
throw("runtime.semacreate")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
mp.resumesema = stdcall4(_CreateEventA, 0, 0, 0, 0)
|
mp.resumesema = stdcall(_CreateEventA, 0, 0, 0, 0)
|
||||||
if mp.resumesema == 0 {
|
if mp.resumesema == 0 {
|
||||||
systemstack(func() {
|
systemstack(func() {
|
||||||
print("runtime: createevent failed; errno=", getlasterror(), "\n")
|
print("runtime: createevent failed; errno=", getlasterror(), "\n")
|
||||||
throw("runtime.semacreate")
|
throw("runtime.semacreate")
|
||||||
})
|
})
|
||||||
stdcall1(_CloseHandle, mp.waitsema)
|
stdcall(_CloseHandle, mp.waitsema)
|
||||||
mp.waitsema = 0
|
mp.waitsema = 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -762,7 +762,7 @@ func semacreate(mp *m) {
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
func newosproc(mp *m) {
|
func newosproc(mp *m) {
|
||||||
// We pass 0 for the stack size to use the default for this binary.
|
// We pass 0 for the stack size to use the default for this binary.
|
||||||
thandle := stdcall6(_CreateThread, 0, 0,
|
thandle := stdcall(_CreateThread, 0, 0,
|
||||||
abi.FuncPCABI0(tstart_stdcall), uintptr(unsafe.Pointer(mp)),
|
abi.FuncPCABI0(tstart_stdcall), uintptr(unsafe.Pointer(mp)),
|
||||||
0, 0)
|
0, 0)
|
||||||
|
|
||||||
|
|
@ -780,7 +780,7 @@ func newosproc(mp *m) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close thandle to avoid leaking the thread object if it exits.
|
// Close thandle to avoid leaking the thread object if it exits.
|
||||||
stdcall1(_CloseHandle, thandle)
|
stdcall(_CloseHandle, thandle)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Used by the C library build mode. On Linux this function would allocate a
|
// Used by the C library build mode. On Linux this function would allocate a
|
||||||
|
|
@ -828,7 +828,7 @@ func sigblock(exiting bool) {
|
||||||
// Called on the new thread, cannot allocate Go memory.
|
// Called on the new thread, cannot allocate Go memory.
|
||||||
func minit() {
|
func minit() {
|
||||||
var thandle uintptr
|
var thandle uintptr
|
||||||
if stdcall7(_DuplicateHandle, currentProcess, currentThread, currentProcess, uintptr(unsafe.Pointer(&thandle)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 {
|
if stdcall(_DuplicateHandle, currentProcess, currentThread, currentProcess, uintptr(unsafe.Pointer(&thandle)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 {
|
||||||
print("runtime.minit: duplicatehandle failed; errno=", getlasterror(), "\n")
|
print("runtime.minit: duplicatehandle failed; errno=", getlasterror(), "\n")
|
||||||
throw("runtime.minit: duplicatehandle failed")
|
throw("runtime.minit: duplicatehandle failed")
|
||||||
}
|
}
|
||||||
|
|
@ -836,7 +836,7 @@ func minit() {
|
||||||
mp := getg().m
|
mp := getg().m
|
||||||
lock(&mp.threadLock)
|
lock(&mp.threadLock)
|
||||||
mp.thread = thandle
|
mp.thread = thandle
|
||||||
mp.procid = uint64(stdcall0(_GetCurrentThreadId))
|
mp.procid = uint64(stdcall(_GetCurrentThreadId))
|
||||||
|
|
||||||
// Configure usleep timer, if possible.
|
// Configure usleep timer, if possible.
|
||||||
if mp.highResTimer == 0 && haveHighResTimer {
|
if mp.highResTimer == 0 && haveHighResTimer {
|
||||||
|
|
@ -853,7 +853,7 @@ func minit() {
|
||||||
throw("CreateWaitableTimerEx when creating timer failed")
|
throw("CreateWaitableTimerEx when creating timer failed")
|
||||||
}
|
}
|
||||||
const GENERIC_ALL = 0x10000000
|
const GENERIC_ALL = 0x10000000
|
||||||
errno := stdcall3(_NtCreateWaitCompletionPacket, uintptr(unsafe.Pointer(&mp.waitIocpHandle)), GENERIC_ALL, 0)
|
errno := stdcall(_NtCreateWaitCompletionPacket, uintptr(unsafe.Pointer(&mp.waitIocpHandle)), GENERIC_ALL, 0)
|
||||||
if mp.waitIocpHandle == 0 {
|
if mp.waitIocpHandle == 0 {
|
||||||
print("runtime: NtCreateWaitCompletionPacket failed; errno=", errno, "\n")
|
print("runtime: NtCreateWaitCompletionPacket failed; errno=", errno, "\n")
|
||||||
throw("NtCreateWaitCompletionPacket failed")
|
throw("NtCreateWaitCompletionPacket failed")
|
||||||
|
|
@ -864,7 +864,7 @@ func minit() {
|
||||||
// Query the true stack base from the OS. Currently we're
|
// Query the true stack base from the OS. Currently we're
|
||||||
// running on a small assumed stack.
|
// running on a small assumed stack.
|
||||||
var mbi memoryBasicInformation
|
var mbi memoryBasicInformation
|
||||||
res := stdcall3(_VirtualQuery, uintptr(unsafe.Pointer(&mbi)), uintptr(unsafe.Pointer(&mbi)), unsafe.Sizeof(mbi))
|
res := stdcall(_VirtualQuery, uintptr(unsafe.Pointer(&mbi)), uintptr(unsafe.Pointer(&mbi)), unsafe.Sizeof(mbi))
|
||||||
if res == 0 {
|
if res == 0 {
|
||||||
print("runtime: VirtualQuery failed; errno=", getlasterror(), "\n")
|
print("runtime: VirtualQuery failed; errno=", getlasterror(), "\n")
|
||||||
throw("VirtualQuery for stack base failed")
|
throw("VirtualQuery for stack base failed")
|
||||||
|
|
@ -896,7 +896,7 @@ func unminit() {
|
||||||
mp := getg().m
|
mp := getg().m
|
||||||
lock(&mp.threadLock)
|
lock(&mp.threadLock)
|
||||||
if mp.thread != 0 {
|
if mp.thread != 0 {
|
||||||
stdcall1(_CloseHandle, mp.thread)
|
stdcall(_CloseHandle, mp.thread)
|
||||||
mp.thread = 0
|
mp.thread = 0
|
||||||
}
|
}
|
||||||
unlock(&mp.threadLock)
|
unlock(&mp.threadLock)
|
||||||
|
|
@ -913,49 +913,59 @@ func unminit() {
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
func mdestroy(mp *m) {
|
func mdestroy(mp *m) {
|
||||||
if mp.highResTimer != 0 {
|
if mp.highResTimer != 0 {
|
||||||
stdcall1(_CloseHandle, mp.highResTimer)
|
stdcall(_CloseHandle, mp.highResTimer)
|
||||||
mp.highResTimer = 0
|
mp.highResTimer = 0
|
||||||
}
|
}
|
||||||
if mp.waitIocpTimer != 0 {
|
if mp.waitIocpTimer != 0 {
|
||||||
stdcall1(_CloseHandle, mp.waitIocpTimer)
|
stdcall(_CloseHandle, mp.waitIocpTimer)
|
||||||
mp.waitIocpTimer = 0
|
mp.waitIocpTimer = 0
|
||||||
}
|
}
|
||||||
if mp.waitIocpHandle != 0 {
|
if mp.waitIocpHandle != 0 {
|
||||||
stdcall1(_CloseHandle, mp.waitIocpHandle)
|
stdcall(_CloseHandle, mp.waitIocpHandle)
|
||||||
mp.waitIocpHandle = 0
|
mp.waitIocpHandle = 0
|
||||||
}
|
}
|
||||||
if mp.waitsema != 0 {
|
if mp.waitsema != 0 {
|
||||||
stdcall1(_CloseHandle, mp.waitsema)
|
stdcall(_CloseHandle, mp.waitsema)
|
||||||
mp.waitsema = 0
|
mp.waitsema = 0
|
||||||
}
|
}
|
||||||
if mp.resumesema != 0 {
|
if mp.resumesema != 0 {
|
||||||
stdcall1(_CloseHandle, mp.resumesema)
|
stdcall(_CloseHandle, mp.resumesema)
|
||||||
mp.resumesema = 0
|
mp.resumesema = 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// stdcall_no_g calls asmstdcall on os stack without using g.
|
// stdcall_no_g is like [stdcall] but can be called without a G.
|
||||||
//
|
//
|
||||||
|
//go:nowritebarrier
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
func stdcall_no_g(fn stdFunction, n int, args uintptr) uintptr {
|
//go:uintptrkeepalive
|
||||||
|
func stdcall_no_g(fn stdFunction, args ...uintptr) uintptr {
|
||||||
call := windows.StdCallInfo{
|
call := windows.StdCallInfo{
|
||||||
Fn: uintptr(unsafe.Pointer(fn)),
|
Fn: uintptr(unsafe.Pointer(fn)),
|
||||||
N: uintptr(n),
|
N: uintptr(len(args)),
|
||||||
Args: args,
|
}
|
||||||
|
if len(args) > 0 {
|
||||||
|
call.Args = uintptr(abi.NoEscape(unsafe.Pointer(&args[0])))
|
||||||
}
|
}
|
||||||
windows.StdCall(&call)
|
windows.StdCall(&call)
|
||||||
return call.R1
|
return call.R1
|
||||||
}
|
}
|
||||||
|
|
||||||
// Calling stdcall on os stack.
|
// stdcall calls fn with the given arguments using the stdcall calling convention.
|
||||||
|
// Must be called from the system stack.
|
||||||
// May run during STW, so write barriers are not allowed.
|
// May run during STW, so write barriers are not allowed.
|
||||||
//
|
//
|
||||||
//go:nowritebarrier
|
//go:nowritebarrier
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
func stdcall(fn stdFunction) uintptr {
|
//go:uintptrkeepalive
|
||||||
|
func stdcall(fn stdFunction, args ...uintptr) uintptr {
|
||||||
gp := getg()
|
gp := getg()
|
||||||
mp := gp.m
|
mp := gp.m
|
||||||
mp.stdCallInfo.Fn = uintptr(unsafe.Pointer(fn))
|
mp.stdCallInfo.Fn = uintptr(unsafe.Pointer(fn))
|
||||||
|
mp.stdCallInfo.N = uintptr(len(args))
|
||||||
|
if len(args) > 0 {
|
||||||
|
mp.stdCallInfo.Args = uintptr(abi.NoEscape(unsafe.Pointer(&args[0])))
|
||||||
|
}
|
||||||
resetLibcall := false
|
resetLibcall := false
|
||||||
if mp.profilehz != 0 && mp.libcallsp == 0 {
|
if mp.profilehz != 0 && mp.libcallsp == 0 {
|
||||||
// leave pc/sp for cpu profiler
|
// leave pc/sp for cpu profiler
|
||||||
|
|
@ -973,105 +983,24 @@ func stdcall(fn stdFunction) uintptr {
|
||||||
return mp.stdCallInfo.R1
|
return mp.stdCallInfo.R1
|
||||||
}
|
}
|
||||||
|
|
||||||
//go:nosplit
|
|
||||||
func stdcall0(fn stdFunction) uintptr {
|
|
||||||
mp := getg().m
|
|
||||||
mp.stdCallInfo.N = 0
|
|
||||||
mp.stdCallInfo.Args = 0
|
|
||||||
return stdcall(fn)
|
|
||||||
}
|
|
||||||
|
|
||||||
//go:nosplit
|
|
||||||
//go:cgo_unsafe_args
|
|
||||||
func stdcall1(fn stdFunction, a0 uintptr) uintptr {
|
|
||||||
mp := getg().m
|
|
||||||
mp.stdCallInfo.N = 1
|
|
||||||
mp.stdCallInfo.Args = uintptr(noescape(unsafe.Pointer(&a0)))
|
|
||||||
return stdcall(fn)
|
|
||||||
}
|
|
||||||
|
|
||||||
//go:nosplit
|
|
||||||
//go:cgo_unsafe_args
|
|
||||||
func stdcall2(fn stdFunction, a0, a1 uintptr) uintptr {
|
|
||||||
mp := getg().m
|
|
||||||
mp.stdCallInfo.N = 2
|
|
||||||
mp.stdCallInfo.Args = uintptr(noescape(unsafe.Pointer(&a0)))
|
|
||||||
return stdcall(fn)
|
|
||||||
}
|
|
||||||
|
|
||||||
//go:nosplit
|
|
||||||
//go:cgo_unsafe_args
|
|
||||||
func stdcall3(fn stdFunction, a0, a1, a2 uintptr) uintptr {
|
|
||||||
mp := getg().m
|
|
||||||
mp.stdCallInfo.N = 3
|
|
||||||
mp.stdCallInfo.Args = uintptr(noescape(unsafe.Pointer(&a0)))
|
|
||||||
return stdcall(fn)
|
|
||||||
}
|
|
||||||
|
|
||||||
//go:nosplit
|
|
||||||
//go:cgo_unsafe_args
|
|
||||||
func stdcall4(fn stdFunction, a0, a1, a2, a3 uintptr) uintptr {
|
|
||||||
mp := getg().m
|
|
||||||
mp.stdCallInfo.N = 4
|
|
||||||
mp.stdCallInfo.Args = uintptr(noescape(unsafe.Pointer(&a0)))
|
|
||||||
return stdcall(fn)
|
|
||||||
}
|
|
||||||
|
|
||||||
//go:nosplit
|
|
||||||
//go:cgo_unsafe_args
|
|
||||||
func stdcall5(fn stdFunction, a0, a1, a2, a3, a4 uintptr) uintptr {
|
|
||||||
mp := getg().m
|
|
||||||
mp.stdCallInfo.N = 5
|
|
||||||
mp.stdCallInfo.Args = uintptr(noescape(unsafe.Pointer(&a0)))
|
|
||||||
return stdcall(fn)
|
|
||||||
}
|
|
||||||
|
|
||||||
//go:nosplit
|
|
||||||
//go:cgo_unsafe_args
|
|
||||||
func stdcall6(fn stdFunction, a0, a1, a2, a3, a4, a5 uintptr) uintptr {
|
|
||||||
mp := getg().m
|
|
||||||
mp.stdCallInfo.N = 6
|
|
||||||
mp.stdCallInfo.Args = uintptr(noescape(unsafe.Pointer(&a0)))
|
|
||||||
return stdcall(fn)
|
|
||||||
}
|
|
||||||
|
|
||||||
//go:nosplit
|
|
||||||
//go:cgo_unsafe_args
|
|
||||||
func stdcall7(fn stdFunction, a0, a1, a2, a3, a4, a5, a6 uintptr) uintptr {
|
|
||||||
mp := getg().m
|
|
||||||
mp.stdCallInfo.N = 7
|
|
||||||
mp.stdCallInfo.Args = uintptr(noescape(unsafe.Pointer(&a0)))
|
|
||||||
return stdcall(fn)
|
|
||||||
}
|
|
||||||
|
|
||||||
//go:nosplit
|
|
||||||
//go:cgo_unsafe_args
|
|
||||||
func stdcall8(fn stdFunction, a0, a1, a2, a3, a4, a5, a6, a7 uintptr) uintptr {
|
|
||||||
mp := getg().m
|
|
||||||
mp.stdCallInfo.N = 8
|
|
||||||
mp.stdCallInfo.Args = uintptr(noescape(unsafe.Pointer(&a0)))
|
|
||||||
return stdcall(fn)
|
|
||||||
}
|
|
||||||
|
|
||||||
// These must run on the system stack only.
|
// These must run on the system stack only.
|
||||||
|
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
func osyield_no_g() {
|
func osyield_no_g() {
|
||||||
stdcall_no_g(_SwitchToThread, 0, 0)
|
stdcall_no_g(_SwitchToThread)
|
||||||
}
|
}
|
||||||
|
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
func osyield() {
|
func osyield() {
|
||||||
systemstack(func() {
|
systemstack(func() {
|
||||||
stdcall0(_SwitchToThread)
|
stdcall(_SwitchToThread)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
func usleep_no_g(us uint32) {
|
func usleep_no_g(us uint32) {
|
||||||
timeout := uintptr(us) / 1000 // ms units
|
timeout := uintptr(us) / 1000 // ms units
|
||||||
args := [...]uintptr{_INVALID_HANDLE_VALUE, timeout}
|
stdcall_no_g(_WaitForSingleObject, _INVALID_HANDLE_VALUE, timeout)
|
||||||
stdcall_no_g(_WaitForSingleObject, len(args), uintptr(noescape(unsafe.Pointer(&args[0]))))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
|
|
@ -1083,13 +1012,13 @@ func usleep(us uint32) {
|
||||||
if haveHighResTimer && getg().m.highResTimer != 0 {
|
if haveHighResTimer && getg().m.highResTimer != 0 {
|
||||||
h = getg().m.highResTimer
|
h = getg().m.highResTimer
|
||||||
dt := -10 * int64(us) // relative sleep (negative), 100ns units
|
dt := -10 * int64(us) // relative sleep (negative), 100ns units
|
||||||
stdcall6(_SetWaitableTimer, h, uintptr(unsafe.Pointer(&dt)), 0, 0, 0, 0)
|
stdcall(_SetWaitableTimer, h, uintptr(unsafe.Pointer(&dt)), 0, 0, 0, 0)
|
||||||
timeout = _INFINITE
|
timeout = _INFINITE
|
||||||
} else {
|
} else {
|
||||||
h = _INVALID_HANDLE_VALUE
|
h = _INVALID_HANDLE_VALUE
|
||||||
timeout = uintptr(us) / 1000 // ms units
|
timeout = uintptr(us) / 1000 // ms units
|
||||||
}
|
}
|
||||||
stdcall2(_WaitForSingleObject, h, timeout)
|
stdcall(_WaitForSingleObject, h, timeout)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1130,7 +1059,7 @@ func profilem(mp *m, thread uintptr) {
|
||||||
c = (*context)(unsafe.Pointer((uintptr(unsafe.Pointer(&cbuf[15]))) &^ 15))
|
c = (*context)(unsafe.Pointer((uintptr(unsafe.Pointer(&cbuf[15]))) &^ 15))
|
||||||
|
|
||||||
c.contextflags = _CONTEXT_CONTROL
|
c.contextflags = _CONTEXT_CONTROL
|
||||||
stdcall2(_GetThreadContext, thread, uintptr(unsafe.Pointer(c)))
|
stdcall(_GetThreadContext, thread, uintptr(unsafe.Pointer(c)))
|
||||||
|
|
||||||
gp := gFromSP(mp, c.sp())
|
gp := gFromSP(mp, c.sp())
|
||||||
|
|
||||||
|
|
@ -1151,10 +1080,10 @@ func gFromSP(mp *m, sp uintptr) *g {
|
||||||
}
|
}
|
||||||
|
|
||||||
func profileLoop() {
|
func profileLoop() {
|
||||||
stdcall2(_SetThreadPriority, currentThread, _THREAD_PRIORITY_HIGHEST)
|
stdcall(_SetThreadPriority, currentThread, _THREAD_PRIORITY_HIGHEST)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
stdcall2(_WaitForSingleObject, profiletimer, _INFINITE)
|
stdcall(_WaitForSingleObject, profiletimer, _INFINITE)
|
||||||
first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
|
first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
|
||||||
for mp := first; mp != nil; mp = mp.alllink {
|
for mp := first; mp != nil; mp = mp.alllink {
|
||||||
if mp == getg().m {
|
if mp == getg().m {
|
||||||
|
|
@ -1172,7 +1101,7 @@ func profileLoop() {
|
||||||
}
|
}
|
||||||
// Acquire our own handle to the thread.
|
// Acquire our own handle to the thread.
|
||||||
var thread uintptr
|
var thread uintptr
|
||||||
if stdcall7(_DuplicateHandle, currentProcess, mp.thread, currentProcess, uintptr(unsafe.Pointer(&thread)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 {
|
if stdcall(_DuplicateHandle, currentProcess, mp.thread, currentProcess, uintptr(unsafe.Pointer(&thread)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 {
|
||||||
print("runtime: duplicatehandle failed; errno=", getlasterror(), "\n")
|
print("runtime: duplicatehandle failed; errno=", getlasterror(), "\n")
|
||||||
throw("duplicatehandle failed")
|
throw("duplicatehandle failed")
|
||||||
}
|
}
|
||||||
|
|
@ -1182,9 +1111,9 @@ func profileLoop() {
|
||||||
// above and the SuspendThread. The handle
|
// above and the SuspendThread. The handle
|
||||||
// will remain valid, but SuspendThread may
|
// will remain valid, but SuspendThread may
|
||||||
// fail.
|
// fail.
|
||||||
if int32(stdcall1(_SuspendThread, thread)) == -1 {
|
if int32(stdcall(_SuspendThread, thread)) == -1 {
|
||||||
// The thread no longer exists.
|
// The thread no longer exists.
|
||||||
stdcall1(_CloseHandle, thread)
|
stdcall(_CloseHandle, thread)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if mp.profilehz != 0 && !mp.blocked {
|
if mp.profilehz != 0 && !mp.blocked {
|
||||||
|
|
@ -1192,8 +1121,8 @@ func profileLoop() {
|
||||||
// was in the process of shutting down.
|
// was in the process of shutting down.
|
||||||
profilem(mp, thread)
|
profilem(mp, thread)
|
||||||
}
|
}
|
||||||
stdcall1(_ResumeThread, thread)
|
stdcall(_ResumeThread, thread)
|
||||||
stdcall1(_CloseHandle, thread)
|
stdcall(_CloseHandle, thread)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -1204,7 +1133,7 @@ func setProcessCPUProfiler(hz int32) {
|
||||||
if haveHighResTimer {
|
if haveHighResTimer {
|
||||||
timer = createHighResTimer()
|
timer = createHighResTimer()
|
||||||
} else {
|
} else {
|
||||||
timer = stdcall3(_CreateWaitableTimerA, 0, 0, 0)
|
timer = stdcall(_CreateWaitableTimerA, 0, 0, 0)
|
||||||
}
|
}
|
||||||
atomic.Storeuintptr(&profiletimer, timer)
|
atomic.Storeuintptr(&profiletimer, timer)
|
||||||
newm(profileLoop, nil, -1)
|
newm(profileLoop, nil, -1)
|
||||||
|
|
@ -1221,7 +1150,7 @@ func setThreadCPUProfiler(hz int32) {
|
||||||
}
|
}
|
||||||
due = int64(ms) * -10000
|
due = int64(ms) * -10000
|
||||||
}
|
}
|
||||||
stdcall6(_SetWaitableTimer, profiletimer, uintptr(unsafe.Pointer(&due)), uintptr(ms), 0, 0, 0)
|
stdcall(_SetWaitableTimer, profiletimer, uintptr(unsafe.Pointer(&due)), uintptr(ms), 0, 0, 0)
|
||||||
atomic.Store((*uint32)(unsafe.Pointer(&getg().m.profilehz)), uint32(hz))
|
atomic.Store((*uint32)(unsafe.Pointer(&getg().m.profilehz)), uint32(hz))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1254,7 +1183,7 @@ func preemptM(mp *m) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
var thread uintptr
|
var thread uintptr
|
||||||
if stdcall7(_DuplicateHandle, currentProcess, mp.thread, currentProcess, uintptr(unsafe.Pointer(&thread)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 {
|
if stdcall(_DuplicateHandle, currentProcess, mp.thread, currentProcess, uintptr(unsafe.Pointer(&thread)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 {
|
||||||
print("runtime.preemptM: duplicatehandle failed; errno=", getlasterror(), "\n")
|
print("runtime.preemptM: duplicatehandle failed; errno=", getlasterror(), "\n")
|
||||||
throw("runtime.preemptM: duplicatehandle failed")
|
throw("runtime.preemptM: duplicatehandle failed")
|
||||||
}
|
}
|
||||||
|
|
@ -1274,9 +1203,9 @@ func preemptM(mp *m) {
|
||||||
lock(&suspendLock)
|
lock(&suspendLock)
|
||||||
|
|
||||||
// Suspend the thread.
|
// Suspend the thread.
|
||||||
if int32(stdcall1(_SuspendThread, thread)) == -1 {
|
if int32(stdcall(_SuspendThread, thread)) == -1 {
|
||||||
unlock(&suspendLock)
|
unlock(&suspendLock)
|
||||||
stdcall1(_CloseHandle, thread)
|
stdcall(_CloseHandle, thread)
|
||||||
atomic.Store(&mp.preemptExtLock, 0)
|
atomic.Store(&mp.preemptExtLock, 0)
|
||||||
// The thread no longer exists. This shouldn't be
|
// The thread no longer exists. This shouldn't be
|
||||||
// possible, but just acknowledge the request.
|
// possible, but just acknowledge the request.
|
||||||
|
|
@ -1293,7 +1222,7 @@ func preemptM(mp *m) {
|
||||||
// We have to get the thread context before inspecting the M
|
// We have to get the thread context before inspecting the M
|
||||||
// because SuspendThread only requests a suspend.
|
// because SuspendThread only requests a suspend.
|
||||||
// GetThreadContext actually blocks until it's suspended.
|
// GetThreadContext actually blocks until it's suspended.
|
||||||
stdcall2(_GetThreadContext, thread, uintptr(unsafe.Pointer(c)))
|
stdcall(_GetThreadContext, thread, uintptr(unsafe.Pointer(c)))
|
||||||
|
|
||||||
unlock(&suspendLock)
|
unlock(&suspendLock)
|
||||||
|
|
||||||
|
|
@ -1304,7 +1233,7 @@ func preemptM(mp *m) {
|
||||||
// Inject call to asyncPreempt
|
// Inject call to asyncPreempt
|
||||||
targetPC := abi.FuncPCABI0(asyncPreempt)
|
targetPC := abi.FuncPCABI0(asyncPreempt)
|
||||||
c.pushCall(targetPC, resumePC)
|
c.pushCall(targetPC, resumePC)
|
||||||
stdcall2(_SetThreadContext, thread, uintptr(unsafe.Pointer(c)))
|
stdcall(_SetThreadContext, thread, uintptr(unsafe.Pointer(c)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1313,8 +1242,8 @@ func preemptM(mp *m) {
|
||||||
// Acknowledge the preemption.
|
// Acknowledge the preemption.
|
||||||
mp.preemptGen.Add(1)
|
mp.preemptGen.Add(1)
|
||||||
|
|
||||||
stdcall1(_ResumeThread, thread)
|
stdcall(_ResumeThread, thread)
|
||||||
stdcall1(_CloseHandle, thread)
|
stdcall(_CloseHandle, thread)
|
||||||
}
|
}
|
||||||
|
|
||||||
// osPreemptExtEnter is called before entering external code that may
|
// osPreemptExtEnter is called before entering external code that may
|
||||||
|
|
|
||||||
|
|
@ -9,7 +9,7 @@ import "unsafe"
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
func cputicks() int64 {
|
func cputicks() int64 {
|
||||||
var counter int64
|
var counter int64
|
||||||
stdcall1(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&counter)))
|
stdcall(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&counter)))
|
||||||
return counter
|
return counter
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -9,6 +9,6 @@ import "unsafe"
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
func cputicks() int64 {
|
func cputicks() int64 {
|
||||||
var counter int64
|
var counter int64
|
||||||
stdcall1(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&counter)))
|
stdcall(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&counter)))
|
||||||
return counter
|
return counter
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -18,24 +18,24 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
func preventErrorDialogs() {
|
func preventErrorDialogs() {
|
||||||
errormode := stdcall0(_GetErrorMode)
|
errormode := stdcall(_GetErrorMode)
|
||||||
stdcall1(_SetErrorMode, errormode|_SEM_FAILCRITICALERRORS|_SEM_NOGPFAULTERRORBOX|_SEM_NOOPENFILEERRORBOX)
|
stdcall(_SetErrorMode, errormode|_SEM_FAILCRITICALERRORS|_SEM_NOGPFAULTERRORBOX|_SEM_NOOPENFILEERRORBOX)
|
||||||
|
|
||||||
// Disable WER fault reporting UI.
|
// Disable WER fault reporting UI.
|
||||||
// Do this even if WER is disabled as a whole,
|
// Do this even if WER is disabled as a whole,
|
||||||
// as WER might be enabled later with setTraceback("wer")
|
// as WER might be enabled later with setTraceback("wer")
|
||||||
// and we still want the fault reporting UI to be disabled if this happens.
|
// and we still want the fault reporting UI to be disabled if this happens.
|
||||||
var werflags uintptr
|
var werflags uintptr
|
||||||
stdcall2(_WerGetFlags, currentProcess, uintptr(unsafe.Pointer(&werflags)))
|
stdcall(_WerGetFlags, currentProcess, uintptr(unsafe.Pointer(&werflags)))
|
||||||
stdcall1(_WerSetFlags, werflags|_WER_FAULT_REPORTING_NO_UI)
|
stdcall(_WerSetFlags, werflags|_WER_FAULT_REPORTING_NO_UI)
|
||||||
}
|
}
|
||||||
|
|
||||||
// enableWER re-enables Windows error reporting without fault reporting UI.
|
// enableWER re-enables Windows error reporting without fault reporting UI.
|
||||||
func enableWER() {
|
func enableWER() {
|
||||||
// re-enable Windows Error Reporting
|
// re-enable Windows Error Reporting
|
||||||
errormode := stdcall0(_GetErrorMode)
|
errormode := stdcall(_GetErrorMode)
|
||||||
if errormode&_SEM_NOGPFAULTERRORBOX != 0 {
|
if errormode&_SEM_NOGPFAULTERRORBOX != 0 {
|
||||||
stdcall1(_SetErrorMode, errormode^_SEM_NOGPFAULTERRORBOX)
|
stdcall(_SetErrorMode, errormode^_SEM_NOGPFAULTERRORBOX)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -47,14 +47,14 @@ func sehtramp()
|
||||||
func sigresume()
|
func sigresume()
|
||||||
|
|
||||||
func initExceptionHandler() {
|
func initExceptionHandler() {
|
||||||
stdcall2(_AddVectoredExceptionHandler, 1, abi.FuncPCABI0(exceptiontramp))
|
stdcall(_AddVectoredExceptionHandler, 1, abi.FuncPCABI0(exceptiontramp))
|
||||||
if GOARCH == "386" {
|
if GOARCH == "386" {
|
||||||
// use SetUnhandledExceptionFilter for windows-386.
|
// use SetUnhandledExceptionFilter for windows-386.
|
||||||
// note: SetUnhandledExceptionFilter handler won't be called, if debugging.
|
// note: SetUnhandledExceptionFilter handler won't be called, if debugging.
|
||||||
stdcall1(_SetUnhandledExceptionFilter, abi.FuncPCABI0(lastcontinuetramp))
|
stdcall(_SetUnhandledExceptionFilter, abi.FuncPCABI0(lastcontinuetramp))
|
||||||
} else {
|
} else {
|
||||||
stdcall2(_AddVectoredContinueHandler, 1, abi.FuncPCABI0(firstcontinuetramp))
|
stdcall(_AddVectoredContinueHandler, 1, abi.FuncPCABI0(firstcontinuetramp))
|
||||||
stdcall2(_AddVectoredContinueHandler, 0, abi.FuncPCABI0(lastcontinuetramp))
|
stdcall(_AddVectoredContinueHandler, 0, abi.FuncPCABI0(lastcontinuetramp))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -279,11 +279,11 @@ func sehhandler(_ *exceptionrecord, _ uint64, _ *context, dctxt *_DISPATCHER_CON
|
||||||
ctxt := dctxt.ctx()
|
ctxt := dctxt.ctx()
|
||||||
var base, sp uintptr
|
var base, sp uintptr
|
||||||
for {
|
for {
|
||||||
entry := stdcall3(_RtlLookupFunctionEntry, ctxt.ip(), uintptr(unsafe.Pointer(&base)), 0)
|
entry := stdcall(_RtlLookupFunctionEntry, ctxt.ip(), uintptr(unsafe.Pointer(&base)), 0)
|
||||||
if entry == 0 {
|
if entry == 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
stdcall8(_RtlVirtualUnwind, 0, base, ctxt.ip(), entry, uintptr(unsafe.Pointer(ctxt)), 0, uintptr(unsafe.Pointer(&sp)), 0)
|
stdcall(_RtlVirtualUnwind, 0, base, ctxt.ip(), entry, uintptr(unsafe.Pointer(ctxt)), 0, uintptr(unsafe.Pointer(&sp)), 0)
|
||||||
if sp < gp.stack.lo || gp.stack.hi <= sp {
|
if sp < gp.stack.lo || gp.stack.hi <= sp {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
@ -467,7 +467,7 @@ func dieFromException(info *exceptionrecord, r *context) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
const FAIL_FAST_GENERATE_EXCEPTION_ADDRESS = 0x1
|
const FAIL_FAST_GENERATE_EXCEPTION_ADDRESS = 0x1
|
||||||
stdcall3(_RaiseFailFastException, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(r)), FAIL_FAST_GENERATE_EXCEPTION_ADDRESS)
|
stdcall(_RaiseFailFastException, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(r)), FAIL_FAST_GENERATE_EXCEPTION_ADDRESS)
|
||||||
}
|
}
|
||||||
|
|
||||||
// gsignalStack is unused on Windows.
|
// gsignalStack is unused on Windows.
|
||||||
|
|
|
||||||
|
|
@ -412,6 +412,11 @@ func callbackWrap(a *callbackArgs) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// syscall_syscalln calls fn with args[:n].
|
||||||
|
// It is used to implement [syscall.SyscallN].
|
||||||
|
// It shouldn't be used in the runtime package,
|
||||||
|
// use [stdcall] instead.
|
||||||
|
//
|
||||||
//go:linkname syscall_syscalln syscall.syscalln
|
//go:linkname syscall_syscalln syscall.syscalln
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
//go:uintptrkeepalive
|
//go:uintptrkeepalive
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue