[dev.boringcrypto] all: merge master into dev.boringcrypto

Change-Id: Iba19903f0565b11c648e1fa6effc07b8f97dc322
This commit is contained in:
Roland Shoemaker 2020-11-18 10:55:34 -08:00
commit 906d6e362b
1114 changed files with 59665 additions and 28332 deletions

View file

@ -2,12 +2,56 @@ pkg encoding/json, method (*RawMessage) MarshalJSON() ([]uint8, error)
pkg math/big, const MaxBase = 36
pkg math/big, type Word uintptr
pkg net, func ListenUnixgram(string, *UnixAddr) (*UDPConn, error)
pkg os (linux-arm), const O_SYNC = 1052672
pkg os (linux-arm), const O_SYNC = 4096
pkg os (linux-arm-cgo), const O_SYNC = 1052672
pkg os (linux-arm-cgo), const O_SYNC = 4096
pkg os, const ModeAppend FileMode
pkg os, const ModeCharDevice FileMode
pkg os, const ModeDevice FileMode
pkg os, const ModeDir FileMode
pkg os, const ModeExclusive FileMode
pkg os, const ModeIrregular FileMode
pkg os, const ModeNamedPipe FileMode
pkg os, const ModePerm FileMode
pkg os, const ModeSetgid FileMode
pkg os, const ModeSetuid FileMode
pkg os, const ModeSocket FileMode
pkg os, const ModeSticky FileMode
pkg os, const ModeSymlink FileMode
pkg os, const ModeTemporary FileMode
pkg os, const ModeType = 2399141888
pkg os, const ModeType = 2399666176
pkg os (linux-arm), const O_SYNC = 4096
pkg os (linux-arm-cgo), const O_SYNC = 4096
pkg os (linux-arm), const O_SYNC = 1052672
pkg os (linux-arm-cgo), const O_SYNC = 1052672
pkg os, const ModeType FileMode
pkg os, func Chmod(string, FileMode) error
pkg os, func Lstat(string) (FileInfo, error)
pkg os, func Mkdir(string, FileMode) error
pkg os, func MkdirAll(string, FileMode) error
pkg os, func OpenFile(string, int, FileMode) (*File, error)
pkg os, func SameFile(FileInfo, FileInfo) bool
pkg os, func Stat(string) (FileInfo, error)
pkg os, method (*File) Chmod(FileMode) error
pkg os, method (*File) Readdir(int) ([]FileInfo, error)
pkg os, method (*File) Stat() (FileInfo, error)
pkg os, method (*PathError) Error() string
pkg os, method (*PathError) Timeout() bool
pkg os, method (*PathError) Unwrap() error
pkg os, method (FileMode) IsDir() bool
pkg os, method (FileMode) IsRegular() bool
pkg os, method (FileMode) Perm() FileMode
pkg os, method (FileMode) String() string
pkg os, type FileInfo interface { IsDir, ModTime, Mode, Name, Size, Sys }
pkg os, type FileInfo interface, IsDir() bool
pkg os, type FileInfo interface, ModTime() time.Time
pkg os, type FileInfo interface, Mode() FileMode
pkg os, type FileInfo interface, Name() string
pkg os, type FileInfo interface, Size() int64
pkg os, type FileInfo interface, Sys() interface{}
pkg os, type FileMode uint32
pkg os, type PathError struct
pkg os, type PathError struct, Err error
pkg os, type PathError struct, Op string
pkg os, type PathError struct, Path string
pkg syscall (darwin-amd64), const ImplementsGetwd = false
pkg syscall (darwin-amd64), func Fchflags(string, int) error
pkg syscall (darwin-amd64-cgo), const ImplementsGetwd = false
@ -18,22 +62,72 @@ pkg syscall (freebsd-386), const ELAST = 94
pkg syscall (freebsd-386), const ImplementsGetwd = false
pkg syscall (freebsd-386), const O_CLOEXEC = 0
pkg syscall (freebsd-386), func Fchflags(string, int) error
pkg syscall (freebsd-386), func Mknod(string, uint32, int) error
pkg syscall (freebsd-386), type Dirent struct, Fileno uint32
pkg syscall (freebsd-386), type Dirent struct, Namlen uint8
pkg syscall (freebsd-386), type Stat_t struct, Blksize uint32
pkg syscall (freebsd-386), type Stat_t struct, Dev uint32
pkg syscall (freebsd-386), type Stat_t struct, Gen uint32
pkg syscall (freebsd-386), type Stat_t struct, Ino uint32
pkg syscall (freebsd-386), type Stat_t struct, Lspare int32
pkg syscall (freebsd-386), type Stat_t struct, Nlink uint16
pkg syscall (freebsd-386), type Stat_t struct, Pad_cgo_0 [8]uint8
pkg syscall (freebsd-386), type Stat_t struct, Rdev uint32
pkg syscall (freebsd-386), type Statfs_t struct, Mntfromname [88]int8
pkg syscall (freebsd-386), type Statfs_t struct, Mntonname [88]int8
pkg syscall (freebsd-386-cgo), const AF_MAX = 38
pkg syscall (freebsd-386-cgo), const DLT_MATCHING_MAX = 242
pkg syscall (freebsd-386-cgo), const ELAST = 94
pkg syscall (freebsd-386-cgo), const ImplementsGetwd = false
pkg syscall (freebsd-386-cgo), const O_CLOEXEC = 0
pkg syscall (freebsd-386-cgo), func Mknod(string, uint32, int) error
pkg syscall (freebsd-386-cgo), type Dirent struct, Fileno uint32
pkg syscall (freebsd-386-cgo), type Dirent struct, Namlen uint8
pkg syscall (freebsd-386-cgo), type Stat_t struct, Blksize uint32
pkg syscall (freebsd-386-cgo), type Stat_t struct, Dev uint32
pkg syscall (freebsd-386-cgo), type Stat_t struct, Gen uint32
pkg syscall (freebsd-386-cgo), type Stat_t struct, Ino uint32
pkg syscall (freebsd-386-cgo), type Stat_t struct, Lspare int32
pkg syscall (freebsd-386-cgo), type Stat_t struct, Nlink uint16
pkg syscall (freebsd-386-cgo), type Stat_t struct, Pad_cgo_0 [8]uint8
pkg syscall (freebsd-386-cgo), type Stat_t struct, Rdev uint32
pkg syscall (freebsd-386-cgo), type Statfs_t struct, Mntfromname [88]int8
pkg syscall (freebsd-386-cgo), type Statfs_t struct, Mntonname [88]int8
pkg syscall (freebsd-amd64), const AF_MAX = 38
pkg syscall (freebsd-amd64), const DLT_MATCHING_MAX = 242
pkg syscall (freebsd-amd64), const ELAST = 94
pkg syscall (freebsd-amd64), const ImplementsGetwd = false
pkg syscall (freebsd-amd64), const O_CLOEXEC = 0
pkg syscall (freebsd-amd64), func Fchflags(string, int) error
pkg syscall (freebsd-amd64), func Mknod(string, uint32, int) error
pkg syscall (freebsd-amd64), type Dirent struct, Fileno uint32
pkg syscall (freebsd-amd64), type Dirent struct, Namlen uint8
pkg syscall (freebsd-amd64), type Stat_t struct, Blksize uint32
pkg syscall (freebsd-amd64), type Stat_t struct, Dev uint32
pkg syscall (freebsd-amd64), type Stat_t struct, Gen uint32
pkg syscall (freebsd-amd64), type Stat_t struct, Ino uint32
pkg syscall (freebsd-amd64), type Stat_t struct, Lspare int32
pkg syscall (freebsd-amd64), type Stat_t struct, Nlink uint16
pkg syscall (freebsd-amd64), type Stat_t struct, Rdev uint32
pkg syscall (freebsd-amd64), type Statfs_t struct, Mntfromname [88]int8
pkg syscall (freebsd-amd64), type Statfs_t struct, Mntonname [88]int8
pkg syscall (freebsd-amd64-cgo), const AF_MAX = 38
pkg syscall (freebsd-amd64-cgo), const DLT_MATCHING_MAX = 242
pkg syscall (freebsd-amd64-cgo), const ELAST = 94
pkg syscall (freebsd-amd64-cgo), const ImplementsGetwd = false
pkg syscall (freebsd-amd64-cgo), const O_CLOEXEC = 0
pkg syscall (freebsd-amd64-cgo), func Mknod(string, uint32, int) error
pkg syscall (freebsd-amd64-cgo), type Dirent struct, Fileno uint32
pkg syscall (freebsd-amd64-cgo), type Dirent struct, Namlen uint8
pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Blksize uint32
pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Dev uint32
pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Gen uint32
pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Ino uint32
pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Lspare int32
pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Nlink uint16
pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Rdev uint32
pkg syscall (freebsd-amd64-cgo), type Statfs_t struct, Mntfromname [88]int8
pkg syscall (freebsd-amd64-cgo), type Statfs_t struct, Mntonname [88]int8
pkg syscall (freebsd-arm), const AF_MAX = 38
pkg syscall (freebsd-arm), const BIOCGRTIMEOUT = 1074545262
pkg syscall (freebsd-arm), const BIOCSRTIMEOUT = 2148287085
@ -62,10 +156,22 @@ pkg syscall (freebsd-arm), const SizeofSockaddrDatalink = 56
pkg syscall (freebsd-arm), const SizeofSockaddrUnix = 108
pkg syscall (freebsd-arm), const TIOCTIMESTAMP = 1074558041
pkg syscall (freebsd-arm), func Fchflags(string, int) error
pkg syscall (freebsd-arm), func Mknod(string, uint32, int) error
pkg syscall (freebsd-arm), type BpfHdr struct, Pad_cgo_0 [2]uint8
pkg syscall (freebsd-arm), type Dirent struct, Fileno uint32
pkg syscall (freebsd-arm), type Dirent struct, Namlen uint8
pkg syscall (freebsd-arm), type RawSockaddrDatalink struct, Pad_cgo_0 [2]uint8
pkg syscall (freebsd-arm), type RawSockaddrUnix struct, Pad_cgo_0 [2]uint8
pkg syscall (freebsd-arm), type Stat_t struct, Blksize uint32
pkg syscall (freebsd-arm), type Stat_t struct, Dev uint32
pkg syscall (freebsd-arm), type Stat_t struct, Gen uint32
pkg syscall (freebsd-arm), type Stat_t struct, Ino uint32
pkg syscall (freebsd-arm), type Stat_t struct, Lspare int32
pkg syscall (freebsd-arm), type Stat_t struct, Nlink uint16
pkg syscall (freebsd-arm), type Stat_t struct, Pad_cgo_0 [4]uint8
pkg syscall (freebsd-arm), type Stat_t struct, Rdev uint32
pkg syscall (freebsd-arm), type Statfs_t struct, Mntfromname [88]int8
pkg syscall (freebsd-arm), type Statfs_t struct, Mntonname [88]int8
pkg syscall (freebsd-arm-cgo), const AF_MAX = 38
pkg syscall (freebsd-arm-cgo), const BIOCGRTIMEOUT = 1074545262
pkg syscall (freebsd-arm-cgo), const BIOCSRTIMEOUT = 2148287085
@ -94,10 +200,22 @@ pkg syscall (freebsd-arm-cgo), const SizeofSockaddrDatalink = 56
pkg syscall (freebsd-arm-cgo), const SizeofSockaddrUnix = 108
pkg syscall (freebsd-arm-cgo), const TIOCTIMESTAMP = 1074558041
pkg syscall (freebsd-arm-cgo), func Fchflags(string, int) error
pkg syscall (freebsd-arm-cgo), func Mknod(string, uint32, int) error
pkg syscall (freebsd-arm-cgo), type BpfHdr struct, Pad_cgo_0 [2]uint8
pkg syscall (freebsd-arm-cgo), type Dirent struct, Fileno uint32
pkg syscall (freebsd-arm-cgo), type Dirent struct, Namlen uint8
pkg syscall (freebsd-arm-cgo), type RawSockaddrDatalink struct, Pad_cgo_0 [2]uint8
pkg syscall (freebsd-arm-cgo), type RawSockaddrUnix struct, Pad_cgo_0 [2]uint8
pkg syscall (freebsd-arm-cgo), type Stat_t struct, Blksize uint32
pkg syscall (freebsd-arm-cgo), type Stat_t struct, Dev uint32
pkg syscall (freebsd-arm-cgo), type Stat_t struct, Gen uint32
pkg syscall (freebsd-arm-cgo), type Stat_t struct, Ino uint32
pkg syscall (freebsd-arm-cgo), type Stat_t struct, Lspare int32
pkg syscall (freebsd-arm-cgo), type Stat_t struct, Nlink uint16
pkg syscall (freebsd-arm-cgo), type Stat_t struct, Pad_cgo_0 [4]uint8
pkg syscall (freebsd-arm-cgo), type Stat_t struct, Rdev uint32
pkg syscall (freebsd-arm-cgo), type Statfs_t struct, Mntfromname [88]int8
pkg syscall (freebsd-arm-cgo), type Statfs_t struct, Mntonname [88]int8
pkg syscall (linux-386), type Cmsghdr struct, X__cmsg_data [0]uint8
pkg syscall (linux-386-cgo), type Cmsghdr struct, X__cmsg_data [0]uint8
pkg syscall (linux-amd64), type Cmsghdr struct, X__cmsg_data [0]uint8
@ -109,10 +227,10 @@ pkg syscall (netbsd-386-cgo), const ImplementsGetwd = false
pkg syscall (netbsd-amd64), const ImplementsGetwd = false
pkg syscall (netbsd-amd64-cgo), const ImplementsGetwd = false
pkg syscall (netbsd-arm), const ImplementsGetwd = false
pkg syscall (netbsd-arm-cgo), const ImplementsGetwd = false
pkg syscall (netbsd-arm), const SizeofIfData = 132
pkg syscall (netbsd-arm), func Fchflags(string, int) error
pkg syscall (netbsd-arm), type IfMsghdr struct, Pad_cgo_1 [4]uint8
pkg syscall (netbsd-arm-cgo), const ImplementsGetwd = false
pkg syscall (netbsd-arm-cgo), const SizeofIfData = 132
pkg syscall (netbsd-arm-cgo), func Fchflags(string, int) error
pkg syscall (netbsd-arm-cgo), type IfMsghdr struct, Pad_cgo_1 [4]uint8
@ -140,6 +258,7 @@ pkg syscall (openbsd-386), const SYS_GETITIMER = 86
pkg syscall (openbsd-386), const SYS_GETRUSAGE = 117
pkg syscall (openbsd-386), const SYS_GETTIMEOFDAY = 116
pkg syscall (openbsd-386), const SYS_KEVENT = 270
pkg syscall (openbsd-386), const SYS_KILL = 37
pkg syscall (openbsd-386), const SYS_LSTAT = 293
pkg syscall (openbsd-386), const SYS_NANOSLEEP = 240
pkg syscall (openbsd-386), const SYS_SELECT = 93
@ -193,6 +312,7 @@ pkg syscall (openbsd-386-cgo), const SYS_GETITIMER = 86
pkg syscall (openbsd-386-cgo), const SYS_GETRUSAGE = 117
pkg syscall (openbsd-386-cgo), const SYS_GETTIMEOFDAY = 116
pkg syscall (openbsd-386-cgo), const SYS_KEVENT = 270
pkg syscall (openbsd-386-cgo), const SYS_KILL = 37
pkg syscall (openbsd-386-cgo), const SYS_LSTAT = 293
pkg syscall (openbsd-386-cgo), const SYS_NANOSLEEP = 240
pkg syscall (openbsd-386-cgo), const SYS_SELECT = 93
@ -257,6 +377,7 @@ pkg syscall (openbsd-amd64), const SYS_GETITIMER = 86
pkg syscall (openbsd-amd64), const SYS_GETRUSAGE = 117
pkg syscall (openbsd-amd64), const SYS_GETTIMEOFDAY = 116
pkg syscall (openbsd-amd64), const SYS_KEVENT = 270
pkg syscall (openbsd-amd64), const SYS_KILL = 37
pkg syscall (openbsd-amd64), const SYS_LSTAT = 293
pkg syscall (openbsd-amd64), const SYS_NANOSLEEP = 240
pkg syscall (openbsd-amd64), const SYS_SELECT = 93
@ -320,6 +441,7 @@ pkg syscall (openbsd-amd64-cgo), const SYS_GETITIMER = 86
pkg syscall (openbsd-amd64-cgo), const SYS_GETRUSAGE = 117
pkg syscall (openbsd-amd64-cgo), const SYS_GETTIMEOFDAY = 116
pkg syscall (openbsd-amd64-cgo), const SYS_KEVENT = 270
pkg syscall (openbsd-amd64-cgo), const SYS_KILL = 37
pkg syscall (openbsd-amd64-cgo), const SYS_LSTAT = 293
pkg syscall (openbsd-amd64-cgo), const SYS_NANOSLEEP = 240
pkg syscall (openbsd-amd64-cgo), const SYS_SELECT = 93
@ -348,19 +470,6 @@ pkg syscall (openbsd-amd64-cgo), type Statfs_t struct, F_spare [3]uint32
pkg syscall (openbsd-amd64-cgo), type Statfs_t struct, Pad_cgo_1 [4]uint8
pkg syscall (openbsd-amd64-cgo), type Timespec struct, Pad_cgo_0 [4]uint8
pkg syscall (openbsd-amd64-cgo), type Timespec struct, Sec int32
pkg testing, func RegisterCover(Cover)
pkg testing, func MainStart(func(string, string) (bool, error), []InternalTest, []InternalBenchmark, []InternalExample) *M
pkg text/template/parse, type DotNode bool
pkg text/template/parse, type Node interface { Copy, String, Type }
pkg unicode, const Version = "6.2.0"
pkg unicode, const Version = "6.3.0"
pkg unicode, const Version = "7.0.0"
pkg unicode, const Version = "8.0.0"
pkg syscall (openbsd-386), const SYS_KILL = 37
pkg syscall (openbsd-386-cgo), const SYS_KILL = 37
pkg syscall (openbsd-amd64), const SYS_KILL = 37
pkg syscall (openbsd-amd64-cgo), const SYS_KILL = 37
pkg unicode, const Version = "9.0.0"
pkg syscall (windows-386), const TOKEN_ALL_ACCESS = 983295
pkg syscall (windows-386), type AddrinfoW struct, Addr uintptr
pkg syscall (windows-386), type CertChainPolicyPara struct, ExtraPolicyPara uintptr
@ -379,81 +488,16 @@ pkg syscall (windows-amd64), type CertRevocationInfo struct, CrlInfo uintptr
pkg syscall (windows-amd64), type CertRevocationInfo struct, OidSpecificInfo uintptr
pkg syscall (windows-amd64), type CertSimpleChain struct, TrustListInfo uintptr
pkg syscall (windows-amd64), type RawSockaddrAny struct, Pad [96]int8
pkg syscall (freebsd-386), func Mknod(string, uint32, int) error
pkg syscall (freebsd-386), type Dirent struct, Fileno uint32
pkg syscall (freebsd-386), type Dirent struct, Namlen uint8
pkg syscall (freebsd-386), type Stat_t struct, Blksize uint32
pkg syscall (freebsd-386), type Stat_t struct, Dev uint32
pkg syscall (freebsd-386), type Stat_t struct, Gen uint32
pkg syscall (freebsd-386), type Stat_t struct, Ino uint32
pkg syscall (freebsd-386), type Stat_t struct, Lspare int32
pkg syscall (freebsd-386), type Stat_t struct, Nlink uint16
pkg syscall (freebsd-386), type Stat_t struct, Pad_cgo_0 [8]uint8
pkg syscall (freebsd-386), type Stat_t struct, Rdev uint32
pkg syscall (freebsd-386), type Statfs_t struct, Mntfromname [88]int8
pkg syscall (freebsd-386), type Statfs_t struct, Mntonname [88]int8
pkg syscall (freebsd-386-cgo), func Mknod(string, uint32, int) error
pkg syscall (freebsd-386-cgo), type Dirent struct, Fileno uint32
pkg syscall (freebsd-386-cgo), type Dirent struct, Namlen uint8
pkg syscall (freebsd-386-cgo), type Stat_t struct, Blksize uint32
pkg syscall (freebsd-386-cgo), type Stat_t struct, Dev uint32
pkg syscall (freebsd-386-cgo), type Stat_t struct, Gen uint32
pkg syscall (freebsd-386-cgo), type Stat_t struct, Ino uint32
pkg syscall (freebsd-386-cgo), type Stat_t struct, Lspare int32
pkg syscall (freebsd-386-cgo), type Stat_t struct, Nlink uint16
pkg syscall (freebsd-386-cgo), type Stat_t struct, Pad_cgo_0 [8]uint8
pkg syscall (freebsd-386-cgo), type Stat_t struct, Rdev uint32
pkg syscall (freebsd-386-cgo), type Statfs_t struct, Mntfromname [88]int8
pkg syscall (freebsd-386-cgo), type Statfs_t struct, Mntonname [88]int8
pkg syscall (freebsd-amd64), func Mknod(string, uint32, int) error
pkg syscall (freebsd-amd64), type Dirent struct, Fileno uint32
pkg syscall (freebsd-amd64), type Dirent struct, Namlen uint8
pkg syscall (freebsd-amd64), type Stat_t struct, Blksize uint32
pkg syscall (freebsd-amd64), type Stat_t struct, Dev uint32
pkg syscall (freebsd-amd64), type Stat_t struct, Gen uint32
pkg syscall (freebsd-amd64), type Stat_t struct, Ino uint32
pkg syscall (freebsd-amd64), type Stat_t struct, Lspare int32
pkg syscall (freebsd-amd64), type Stat_t struct, Nlink uint16
pkg syscall (freebsd-amd64), type Stat_t struct, Rdev uint32
pkg syscall (freebsd-amd64), type Statfs_t struct, Mntfromname [88]int8
pkg syscall (freebsd-amd64), type Statfs_t struct, Mntonname [88]int8
pkg syscall (freebsd-amd64-cgo), func Mknod(string, uint32, int) error
pkg syscall (freebsd-amd64-cgo), type Dirent struct, Fileno uint32
pkg syscall (freebsd-amd64-cgo), type Dirent struct, Namlen uint8
pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Blksize uint32
pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Dev uint32
pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Gen uint32
pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Ino uint32
pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Lspare int32
pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Nlink uint16
pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Rdev uint32
pkg syscall (freebsd-amd64-cgo), type Statfs_t struct, Mntfromname [88]int8
pkg syscall (freebsd-amd64-cgo), type Statfs_t struct, Mntonname [88]int8
pkg syscall (freebsd-arm), func Mknod(string, uint32, int) error
pkg syscall (freebsd-arm), type Dirent struct, Fileno uint32
pkg syscall (freebsd-arm), type Dirent struct, Namlen uint8
pkg syscall (freebsd-arm), type Stat_t struct, Blksize uint32
pkg syscall (freebsd-arm), type Stat_t struct, Dev uint32
pkg syscall (freebsd-arm), type Stat_t struct, Gen uint32
pkg syscall (freebsd-arm), type Stat_t struct, Ino uint32
pkg syscall (freebsd-arm), type Stat_t struct, Lspare int32
pkg syscall (freebsd-arm), type Stat_t struct, Nlink uint16
pkg syscall (freebsd-arm), type Stat_t struct, Rdev uint32
pkg syscall (freebsd-arm), type Statfs_t struct, Mntfromname [88]int8
pkg syscall (freebsd-arm), type Statfs_t struct, Mntonname [88]int8
pkg syscall (freebsd-arm-cgo), func Mknod(string, uint32, int) error
pkg syscall (freebsd-arm-cgo), type Dirent struct, Fileno uint32
pkg syscall (freebsd-arm-cgo), type Dirent struct, Namlen uint8
pkg syscall (freebsd-arm-cgo), type Stat_t struct, Blksize uint32
pkg syscall (freebsd-arm-cgo), type Stat_t struct, Dev uint32
pkg syscall (freebsd-arm-cgo), type Stat_t struct, Gen uint32
pkg syscall (freebsd-arm-cgo), type Stat_t struct, Ino uint32
pkg syscall (freebsd-arm-cgo), type Stat_t struct, Lspare int32
pkg syscall (freebsd-arm-cgo), type Stat_t struct, Nlink uint16
pkg syscall (freebsd-arm-cgo), type Stat_t struct, Rdev uint32
pkg syscall (freebsd-arm-cgo), type Statfs_t struct, Mntfromname [88]int8
pkg syscall (freebsd-arm-cgo), type Statfs_t struct, Mntonname [88]int8
pkg testing, func MainStart(func(string, string) (bool, error), []InternalTest, []InternalBenchmark, []InternalExample) *M
pkg testing, func RegisterCover(Cover)
pkg text/scanner, const GoTokens = 1012
pkg text/template/parse, type DotNode bool
pkg text/template/parse, type Node interface { Copy, String, Type }
pkg unicode, const Version = "10.0.0"
pkg unicode, const Version = "11.0.0"
pkg unicode, const Version = "12.0.0"
pkg unicode, const Version = "6.2.0"
pkg unicode, const Version = "6.3.0"
pkg unicode, const Version = "7.0.0"
pkg unicode, const Version = "8.0.0"
pkg unicode, const Version = "9.0.0"

View file

@ -1,8 +1,436 @@
pkg unicode, const Version = "13.0.0"
pkg unicode, var Chorasmian *RangeTable
pkg unicode, var Dives_Akuru *RangeTable
pkg unicode, var Khitan_Small_Script *RangeTable
pkg unicode, var Yezidi *RangeTable
pkg archive/zip, method (*ReadCloser) Open(string) (fs.File, error)
pkg archive/zip, method (*Reader) Open(string) (fs.File, error)
pkg debug/elf, const DT_ADDRRNGHI = 1879047935
pkg debug/elf, const DT_ADDRRNGHI DynTag
pkg debug/elf, const DT_ADDRRNGLO = 1879047680
pkg debug/elf, const DT_ADDRRNGLO DynTag
pkg debug/elf, const DT_AUDIT = 1879047932
pkg debug/elf, const DT_AUDIT DynTag
pkg debug/elf, const DT_AUXILIARY = 2147483645
pkg debug/elf, const DT_AUXILIARY DynTag
pkg debug/elf, const DT_CHECKSUM = 1879047672
pkg debug/elf, const DT_CHECKSUM DynTag
pkg debug/elf, const DT_CONFIG = 1879047930
pkg debug/elf, const DT_CONFIG DynTag
pkg debug/elf, const DT_DEPAUDIT = 1879047931
pkg debug/elf, const DT_DEPAUDIT DynTag
pkg debug/elf, const DT_FEATURE = 1879047676
pkg debug/elf, const DT_FEATURE DynTag
pkg debug/elf, const DT_FILTER = 2147483647
pkg debug/elf, const DT_FILTER DynTag
pkg debug/elf, const DT_FLAGS_1 = 1879048187
pkg debug/elf, const DT_FLAGS_1 DynTag
pkg debug/elf, const DT_GNU_CONFLICT = 1879047928
pkg debug/elf, const DT_GNU_CONFLICT DynTag
pkg debug/elf, const DT_GNU_CONFLICTSZ = 1879047670
pkg debug/elf, const DT_GNU_CONFLICTSZ DynTag
pkg debug/elf, const DT_GNU_HASH = 1879047925
pkg debug/elf, const DT_GNU_HASH DynTag
pkg debug/elf, const DT_GNU_LIBLIST = 1879047929
pkg debug/elf, const DT_GNU_LIBLIST DynTag
pkg debug/elf, const DT_GNU_LIBLISTSZ = 1879047671
pkg debug/elf, const DT_GNU_LIBLISTSZ DynTag
pkg debug/elf, const DT_GNU_PRELINKED = 1879047669
pkg debug/elf, const DT_GNU_PRELINKED DynTag
pkg debug/elf, const DT_MIPS_AUX_DYNAMIC = 1879048241
pkg debug/elf, const DT_MIPS_AUX_DYNAMIC DynTag
pkg debug/elf, const DT_MIPS_BASE_ADDRESS = 1879048198
pkg debug/elf, const DT_MIPS_BASE_ADDRESS DynTag
pkg debug/elf, const DT_MIPS_COMPACT_SIZE = 1879048239
pkg debug/elf, const DT_MIPS_COMPACT_SIZE DynTag
pkg debug/elf, const DT_MIPS_CONFLICT = 1879048200
pkg debug/elf, const DT_MIPS_CONFLICT DynTag
pkg debug/elf, const DT_MIPS_CONFLICTNO = 1879048203
pkg debug/elf, const DT_MIPS_CONFLICTNO DynTag
pkg debug/elf, const DT_MIPS_CXX_FLAGS = 1879048226
pkg debug/elf, const DT_MIPS_CXX_FLAGS DynTag
pkg debug/elf, const DT_MIPS_DELTA_CLASS = 1879048215
pkg debug/elf, const DT_MIPS_DELTA_CLASS DynTag
pkg debug/elf, const DT_MIPS_DELTA_CLASSSYM = 1879048224
pkg debug/elf, const DT_MIPS_DELTA_CLASSSYM DynTag
pkg debug/elf, const DT_MIPS_DELTA_CLASSSYM_NO = 1879048225
pkg debug/elf, const DT_MIPS_DELTA_CLASSSYM_NO DynTag
pkg debug/elf, const DT_MIPS_DELTA_CLASS_NO = 1879048216
pkg debug/elf, const DT_MIPS_DELTA_CLASS_NO DynTag
pkg debug/elf, const DT_MIPS_DELTA_INSTANCE = 1879048217
pkg debug/elf, const DT_MIPS_DELTA_INSTANCE DynTag
pkg debug/elf, const DT_MIPS_DELTA_INSTANCE_NO = 1879048218
pkg debug/elf, const DT_MIPS_DELTA_INSTANCE_NO DynTag
pkg debug/elf, const DT_MIPS_DELTA_RELOC = 1879048219
pkg debug/elf, const DT_MIPS_DELTA_RELOC DynTag
pkg debug/elf, const DT_MIPS_DELTA_RELOC_NO = 1879048220
pkg debug/elf, const DT_MIPS_DELTA_RELOC_NO DynTag
pkg debug/elf, const DT_MIPS_DELTA_SYM = 1879048221
pkg debug/elf, const DT_MIPS_DELTA_SYM DynTag
pkg debug/elf, const DT_MIPS_DELTA_SYM_NO = 1879048222
pkg debug/elf, const DT_MIPS_DELTA_SYM_NO DynTag
pkg debug/elf, const DT_MIPS_DYNSTR_ALIGN = 1879048235
pkg debug/elf, const DT_MIPS_DYNSTR_ALIGN DynTag
pkg debug/elf, const DT_MIPS_FLAGS = 1879048197
pkg debug/elf, const DT_MIPS_FLAGS DynTag
pkg debug/elf, const DT_MIPS_GOTSYM = 1879048211
pkg debug/elf, const DT_MIPS_GOTSYM DynTag
pkg debug/elf, const DT_MIPS_GP_VALUE = 1879048240
pkg debug/elf, const DT_MIPS_GP_VALUE DynTag
pkg debug/elf, const DT_MIPS_HIDDEN_GOTIDX = 1879048231
pkg debug/elf, const DT_MIPS_HIDDEN_GOTIDX DynTag
pkg debug/elf, const DT_MIPS_HIPAGENO = 1879048212
pkg debug/elf, const DT_MIPS_HIPAGENO DynTag
pkg debug/elf, const DT_MIPS_ICHECKSUM = 1879048195
pkg debug/elf, const DT_MIPS_ICHECKSUM DynTag
pkg debug/elf, const DT_MIPS_INTERFACE = 1879048234
pkg debug/elf, const DT_MIPS_INTERFACE DynTag
pkg debug/elf, const DT_MIPS_INTERFACE_SIZE = 1879048236
pkg debug/elf, const DT_MIPS_INTERFACE_SIZE DynTag
pkg debug/elf, const DT_MIPS_IVERSION = 1879048196
pkg debug/elf, const DT_MIPS_IVERSION DynTag
pkg debug/elf, const DT_MIPS_LIBLIST = 1879048201
pkg debug/elf, const DT_MIPS_LIBLIST DynTag
pkg debug/elf, const DT_MIPS_LIBLISTNO = 1879048208
pkg debug/elf, const DT_MIPS_LIBLISTNO DynTag
pkg debug/elf, const DT_MIPS_LOCALPAGE_GOTIDX = 1879048229
pkg debug/elf, const DT_MIPS_LOCALPAGE_GOTIDX DynTag
pkg debug/elf, const DT_MIPS_LOCAL_GOTIDX = 1879048230
pkg debug/elf, const DT_MIPS_LOCAL_GOTIDX DynTag
pkg debug/elf, const DT_MIPS_LOCAL_GOTNO = 1879048202
pkg debug/elf, const DT_MIPS_LOCAL_GOTNO DynTag
pkg debug/elf, const DT_MIPS_MSYM = 1879048199
pkg debug/elf, const DT_MIPS_MSYM DynTag
pkg debug/elf, const DT_MIPS_OPTIONS = 1879048233
pkg debug/elf, const DT_MIPS_OPTIONS DynTag
pkg debug/elf, const DT_MIPS_PERF_SUFFIX = 1879048238
pkg debug/elf, const DT_MIPS_PERF_SUFFIX DynTag
pkg debug/elf, const DT_MIPS_PIXIE_INIT = 1879048227
pkg debug/elf, const DT_MIPS_PIXIE_INIT DynTag
pkg debug/elf, const DT_MIPS_PLTGOT = 1879048242
pkg debug/elf, const DT_MIPS_PLTGOT DynTag
pkg debug/elf, const DT_MIPS_PROTECTED_GOTIDX = 1879048232
pkg debug/elf, const DT_MIPS_PROTECTED_GOTIDX DynTag
pkg debug/elf, const DT_MIPS_RLD_MAP = 1879048214
pkg debug/elf, const DT_MIPS_RLD_MAP DynTag
pkg debug/elf, const DT_MIPS_RLD_MAP_REL = 1879048245
pkg debug/elf, const DT_MIPS_RLD_MAP_REL DynTag
pkg debug/elf, const DT_MIPS_RLD_TEXT_RESOLVE_ADDR = 1879048237
pkg debug/elf, const DT_MIPS_RLD_TEXT_RESOLVE_ADDR DynTag
pkg debug/elf, const DT_MIPS_RLD_VERSION = 1879048193
pkg debug/elf, const DT_MIPS_RLD_VERSION DynTag
pkg debug/elf, const DT_MIPS_RWPLT = 1879048244
pkg debug/elf, const DT_MIPS_RWPLT DynTag
pkg debug/elf, const DT_MIPS_SYMBOL_LIB = 1879048228
pkg debug/elf, const DT_MIPS_SYMBOL_LIB DynTag
pkg debug/elf, const DT_MIPS_SYMTABNO = 1879048209
pkg debug/elf, const DT_MIPS_SYMTABNO DynTag
pkg debug/elf, const DT_MIPS_TIME_STAMP = 1879048194
pkg debug/elf, const DT_MIPS_TIME_STAMP DynTag
pkg debug/elf, const DT_MIPS_UNREFEXTNO = 1879048210
pkg debug/elf, const DT_MIPS_UNREFEXTNO DynTag
pkg debug/elf, const DT_MOVEENT = 1879047674
pkg debug/elf, const DT_MOVEENT DynTag
pkg debug/elf, const DT_MOVESZ = 1879047675
pkg debug/elf, const DT_MOVESZ DynTag
pkg debug/elf, const DT_MOVETAB = 1879047934
pkg debug/elf, const DT_MOVETAB DynTag
pkg debug/elf, const DT_PLTPAD = 1879047933
pkg debug/elf, const DT_PLTPAD DynTag
pkg debug/elf, const DT_PLTPADSZ = 1879047673
pkg debug/elf, const DT_PLTPADSZ DynTag
pkg debug/elf, const DT_POSFLAG_1 = 1879047677
pkg debug/elf, const DT_POSFLAG_1 DynTag
pkg debug/elf, const DT_PPC64_GLINK = 1879048192
pkg debug/elf, const DT_PPC64_GLINK DynTag
pkg debug/elf, const DT_PPC64_OPD = 1879048193
pkg debug/elf, const DT_PPC64_OPD DynTag
pkg debug/elf, const DT_PPC64_OPDSZ = 1879048194
pkg debug/elf, const DT_PPC64_OPDSZ DynTag
pkg debug/elf, const DT_PPC64_OPT = 1879048195
pkg debug/elf, const DT_PPC64_OPT DynTag
pkg debug/elf, const DT_PPC_GOT = 1879048192
pkg debug/elf, const DT_PPC_GOT DynTag
pkg debug/elf, const DT_PPC_OPT = 1879048193
pkg debug/elf, const DT_PPC_OPT DynTag
pkg debug/elf, const DT_RELACOUNT = 1879048185
pkg debug/elf, const DT_RELACOUNT DynTag
pkg debug/elf, const DT_RELCOUNT = 1879048186
pkg debug/elf, const DT_RELCOUNT DynTag
pkg debug/elf, const DT_SPARC_REGISTER = 1879048193
pkg debug/elf, const DT_SPARC_REGISTER DynTag
pkg debug/elf, const DT_SYMINENT = 1879047679
pkg debug/elf, const DT_SYMINENT DynTag
pkg debug/elf, const DT_SYMINFO = 1879047935
pkg debug/elf, const DT_SYMINFO DynTag
pkg debug/elf, const DT_SYMINSZ = 1879047678
pkg debug/elf, const DT_SYMINSZ DynTag
pkg debug/elf, const DT_SYMTAB_SHNDX = 34
pkg debug/elf, const DT_SYMTAB_SHNDX DynTag
pkg debug/elf, const DT_TLSDESC_GOT = 1879047927
pkg debug/elf, const DT_TLSDESC_GOT DynTag
pkg debug/elf, const DT_TLSDESC_PLT = 1879047926
pkg debug/elf, const DT_TLSDESC_PLT DynTag
pkg debug/elf, const DT_USED = 2147483646
pkg debug/elf, const DT_USED DynTag
pkg debug/elf, const DT_VALRNGHI = 1879047679
pkg debug/elf, const DT_VALRNGHI DynTag
pkg debug/elf, const DT_VALRNGLO = 1879047424
pkg debug/elf, const DT_VALRNGLO DynTag
pkg debug/elf, const DT_VERDEF = 1879048188
pkg debug/elf, const DT_VERDEF DynTag
pkg debug/elf, const DT_VERDEFNUM = 1879048189
pkg debug/elf, const DT_VERDEFNUM DynTag
pkg debug/elf, const PT_AARCH64_ARCHEXT = 1879048192
pkg debug/elf, const PT_AARCH64_ARCHEXT ProgType
pkg debug/elf, const PT_AARCH64_UNWIND = 1879048193
pkg debug/elf, const PT_AARCH64_UNWIND ProgType
pkg debug/elf, const PT_ARM_ARCHEXT = 1879048192
pkg debug/elf, const PT_ARM_ARCHEXT ProgType
pkg debug/elf, const PT_ARM_EXIDX = 1879048193
pkg debug/elf, const PT_ARM_EXIDX ProgType
pkg debug/elf, const PT_GNU_EH_FRAME = 1685382480
pkg debug/elf, const PT_GNU_EH_FRAME ProgType
pkg debug/elf, const PT_GNU_MBIND_HI = 1685386580
pkg debug/elf, const PT_GNU_MBIND_HI ProgType
pkg debug/elf, const PT_GNU_MBIND_LO = 1685382485
pkg debug/elf, const PT_GNU_MBIND_LO ProgType
pkg debug/elf, const PT_GNU_PROPERTY = 1685382483
pkg debug/elf, const PT_GNU_PROPERTY ProgType
pkg debug/elf, const PT_GNU_RELRO = 1685382482
pkg debug/elf, const PT_GNU_RELRO ProgType
pkg debug/elf, const PT_GNU_STACK = 1685382481
pkg debug/elf, const PT_GNU_STACK ProgType
pkg debug/elf, const PT_MIPS_ABIFLAGS = 1879048195
pkg debug/elf, const PT_MIPS_ABIFLAGS ProgType
pkg debug/elf, const PT_MIPS_OPTIONS = 1879048194
pkg debug/elf, const PT_MIPS_OPTIONS ProgType
pkg debug/elf, const PT_MIPS_REGINFO = 1879048192
pkg debug/elf, const PT_MIPS_REGINFO ProgType
pkg debug/elf, const PT_MIPS_RTPROC = 1879048193
pkg debug/elf, const PT_MIPS_RTPROC ProgType
pkg debug/elf, const PT_OPENBSD_BOOTDATA = 1705253862
pkg debug/elf, const PT_OPENBSD_BOOTDATA ProgType
pkg debug/elf, const PT_OPENBSD_RANDOMIZE = 1705237478
pkg debug/elf, const PT_OPENBSD_RANDOMIZE ProgType
pkg debug/elf, const PT_OPENBSD_WXNEEDED = 1705237479
pkg debug/elf, const PT_OPENBSD_WXNEEDED ProgType
pkg debug/elf, const PT_PAX_FLAGS = 1694766464
pkg debug/elf, const PT_PAX_FLAGS ProgType
pkg debug/elf, const PT_S390_PGSTE = 1879048192
pkg debug/elf, const PT_S390_PGSTE ProgType
pkg debug/elf, const PT_SUNWSTACK = 1879048187
pkg debug/elf, const PT_SUNWSTACK ProgType
pkg debug/elf, const PT_SUNW_EH_FRAME = 1685382480
pkg debug/elf, const PT_SUNW_EH_FRAME ProgType
pkg embed, method (FS) Open(string) (fs.File, error)
pkg embed, method (FS) ReadDir(string) ([]fs.DirEntry, error)
pkg embed, method (FS) ReadFile(string) ([]uint8, error)
pkg embed, type FS struct
pkg flag, func Func(string, string, func(string) error)
pkg flag, method (*FlagSet) Func(string, string, func(string) error)
pkg go/build, type Package struct, EmbedPatterns []string
pkg go/build, type Package struct, IgnoredOtherFiles []string
pkg go/build, type Package struct, TestEmbedPatterns []string
pkg go/build, type Package struct, XTestEmbedPatterns []string
pkg html/template, func ParseFS(fs.FS, ...string) (*Template, error)
pkg html/template, method (*Template) ParseFS(fs.FS, ...string) (*Template, error)
pkg io, func NopCloser(Reader) ReadCloser
pkg io, func ReadAll(Reader) ([]uint8, error)
pkg io, type ReadSeekCloser interface { Close, Read, Seek }
pkg io, type ReadSeekCloser interface, Close() error
pkg io, type ReadSeekCloser interface, Read([]uint8) (int, error)
pkg io, type ReadSeekCloser interface, Seek(int64, int) (int64, error)
pkg io, var Discard Writer
pkg io/fs, const ModeAppend = 1073741824
pkg io/fs, const ModeAppend FileMode
pkg io/fs, const ModeCharDevice = 2097152
pkg io/fs, const ModeCharDevice FileMode
pkg io/fs, const ModeDevice = 67108864
pkg io/fs, const ModeDevice FileMode
pkg io/fs, const ModeDir = 2147483648
pkg io/fs, const ModeDir FileMode
pkg io/fs, const ModeExclusive = 536870912
pkg io/fs, const ModeExclusive FileMode
pkg io/fs, const ModeIrregular = 524288
pkg io/fs, const ModeIrregular FileMode
pkg io/fs, const ModeNamedPipe = 33554432
pkg io/fs, const ModeNamedPipe FileMode
pkg io/fs, const ModePerm = 511
pkg io/fs, const ModePerm FileMode
pkg io/fs, const ModeSetgid = 4194304
pkg io/fs, const ModeSetgid FileMode
pkg io/fs, const ModeSetuid = 8388608
pkg io/fs, const ModeSetuid FileMode
pkg io/fs, const ModeSocket = 16777216
pkg io/fs, const ModeSocket FileMode
pkg io/fs, const ModeSticky = 1048576
pkg io/fs, const ModeSticky FileMode
pkg io/fs, const ModeSymlink = 134217728
pkg io/fs, const ModeSymlink FileMode
pkg io/fs, const ModeTemporary = 268435456
pkg io/fs, const ModeTemporary FileMode
pkg io/fs, const ModeType = 2401763328
pkg io/fs, const ModeType FileMode
pkg io/fs, func Glob(FS, string) ([]string, error)
pkg io/fs, func ReadDir(FS, string) ([]DirEntry, error)
pkg io/fs, func ReadFile(FS, string) ([]uint8, error)
pkg io/fs, func Stat(FS, string) (FileInfo, error)
pkg io/fs, func ValidPath(string) bool
pkg io/fs, method (*PathError) Error() string
pkg io/fs, method (*PathError) Timeout() bool
pkg io/fs, method (*PathError) Unwrap() error
pkg io/fs, method (FileMode) IsDir() bool
pkg io/fs, method (FileMode) IsRegular() bool
pkg io/fs, method (FileMode) Perm() FileMode
pkg io/fs, method (FileMode) String() string
pkg io/fs, method (FileMode) Type() FileMode
pkg io/fs, type DirEntry interface { Info, IsDir, Name, Type }
pkg io/fs, type DirEntry interface, Info() (FileInfo, error)
pkg io/fs, type DirEntry interface, IsDir() bool
pkg io/fs, type DirEntry interface, Name() string
pkg io/fs, type DirEntry interface, Type() FileMode
pkg io/fs, type FS interface { Open }
pkg io/fs, type FS interface, Open(string) (File, error)
pkg io/fs, type File interface { Close, Read, Stat }
pkg io/fs, type File interface, Close() error
pkg io/fs, type File interface, Read([]uint8) (int, error)
pkg io/fs, type File interface, Stat() (FileInfo, error)
pkg io/fs, type FileInfo interface { IsDir, ModTime, Mode, Name, Size, Sys }
pkg io/fs, type FileInfo interface, IsDir() bool
pkg io/fs, type FileInfo interface, ModTime() time.Time
pkg io/fs, type FileInfo interface, Mode() FileMode
pkg io/fs, type FileInfo interface, Name() string
pkg io/fs, type FileInfo interface, Size() int64
pkg io/fs, type FileInfo interface, Sys() interface{}
pkg io/fs, type FileMode uint32
pkg io/fs, type GlobFS interface { Glob, Open }
pkg io/fs, type GlobFS interface, Glob(string) ([]string, error)
pkg io/fs, type GlobFS interface, Open(string) (File, error)
pkg io/fs, type PathError struct
pkg io/fs, type PathError struct, Err error
pkg io/fs, type PathError struct, Op string
pkg io/fs, type PathError struct, Path string
pkg io/fs, type ReadDirFS interface { Open, ReadDir }
pkg io/fs, type ReadDirFS interface, Open(string) (File, error)
pkg io/fs, type ReadDirFS interface, ReadDir(string) ([]DirEntry, error)
pkg io/fs, type ReadDirFile interface { Close, Read, ReadDir, Stat }
pkg io/fs, type ReadDirFile interface, Close() error
pkg io/fs, type ReadDirFile interface, Read([]uint8) (int, error)
pkg io/fs, type ReadDirFile interface, ReadDir(int) ([]DirEntry, error)
pkg io/fs, type ReadDirFile interface, Stat() (FileInfo, error)
pkg io/fs, type ReadFileFS interface { Open, ReadFile }
pkg io/fs, type ReadFileFS interface, Open(string) (File, error)
pkg io/fs, type ReadFileFS interface, ReadFile(string) ([]uint8, error)
pkg io/fs, type StatFS interface { Open, Stat }
pkg io/fs, type StatFS interface, Open(string) (File, error)
pkg io/fs, type StatFS interface, Stat(string) (FileInfo, error)
pkg io/fs, var ErrClosed error
pkg io/fs, var ErrExist error
pkg io/fs, var ErrInvalid error
pkg io/fs, var ErrNotExist error
pkg io/fs, var ErrPermission error
pkg log, func Default() *Logger
pkg net, var ErrClosed error
pkg net/http, func FS(fs.FS) FileSystem
pkg net/http, type Transport struct, GetProxyConnectHeader func(context.Context, *url.URL, string) (Header, error)
pkg os, const ModeAppend fs.FileMode
pkg os, const ModeCharDevice fs.FileMode
pkg os, const ModeDevice fs.FileMode
pkg os, const ModeDir fs.FileMode
pkg os, const ModeExclusive fs.FileMode
pkg os, const ModeIrregular fs.FileMode
pkg os, const ModeNamedPipe fs.FileMode
pkg os, const ModePerm fs.FileMode
pkg os, const ModeSetgid fs.FileMode
pkg os, const ModeSetuid fs.FileMode
pkg os, const ModeSocket fs.FileMode
pkg os, const ModeSticky fs.FileMode
pkg os, const ModeSymlink fs.FileMode
pkg os, const ModeTemporary fs.FileMode
pkg os, const ModeType fs.FileMode
pkg os, func Chmod(string, fs.FileMode) error
pkg os, func DirFS(string) fs.FS
pkg os, func Lstat(string) (fs.FileInfo, error)
pkg os, func Mkdir(string, fs.FileMode) error
pkg os, func MkdirAll(string, fs.FileMode) error
pkg os, func OpenFile(string, int, fs.FileMode) (*File, error)
pkg os, func SameFile(fs.FileInfo, fs.FileInfo) bool
pkg os, func Stat(string) (fs.FileInfo, error)
pkg os, method (*File) Chmod(fs.FileMode) error
pkg os, method (*File) ReadDir(int) ([]fs.DirEntry, error)
pkg os, method (*File) Readdir(int) ([]fs.FileInfo, error)
pkg os, method (*File) Stat() (fs.FileInfo, error)
pkg os, type DirEntry = fs.DirEntry
pkg os, type FileInfo = fs.FileInfo
pkg os, type FileMode = fs.FileMode
pkg os, type PathError = fs.PathError
pkg os/signal, func NotifyContext(context.Context, ...os.Signal) (context.Context, context.CancelFunc)
pkg runtime/metrics, const KindBad = 0
pkg runtime/metrics, const KindBad ValueKind
pkg runtime/metrics, const KindFloat64 = 2
pkg runtime/metrics, const KindFloat64 ValueKind
pkg runtime/metrics, const KindFloat64Histogram = 3
pkg runtime/metrics, const KindFloat64Histogram ValueKind
pkg runtime/metrics, const KindUint64 = 1
pkg runtime/metrics, const KindUint64 ValueKind
pkg runtime/metrics, func All() []Description
pkg runtime/metrics, func Read([]Sample)
pkg runtime/metrics, method (Value) Float64() float64
pkg runtime/metrics, method (Value) Float64Histogram() *Float64Histogram
pkg runtime/metrics, method (Value) Kind() ValueKind
pkg runtime/metrics, method (Value) Uint64() uint64
pkg runtime/metrics, type Description struct
pkg runtime/metrics, type Description struct, Cumulative bool
pkg runtime/metrics, type Description struct, Description string
pkg runtime/metrics, type Description struct, Kind ValueKind
pkg runtime/metrics, type Description struct, Name string
pkg runtime/metrics, type Description struct, StopTheWorld bool
pkg runtime/metrics, type Float64Histogram struct
pkg runtime/metrics, type Float64Histogram struct, Buckets []float64
pkg runtime/metrics, type Float64Histogram struct, Counts []uint64
pkg runtime/metrics, type Sample struct
pkg runtime/metrics, type Sample struct, Name string
pkg runtime/metrics, type Sample struct, Value Value
pkg runtime/metrics, type Value struct
pkg runtime/metrics, type ValueKind int
pkg syscall (linux-386), func AllThreadsSyscall(uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-386), func AllThreadsSyscall6(uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-386), func Setegid(int) error
pkg syscall (linux-386), func Seteuid(int) error
pkg syscall (linux-386-cgo), func AllThreadsSyscall(uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-386-cgo), func AllThreadsSyscall6(uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-386-cgo), func Setegid(int) error
pkg syscall (linux-386-cgo), func Seteuid(int) error
pkg syscall (linux-amd64), func AllThreadsSyscall(uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-amd64), func AllThreadsSyscall6(uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-amd64), func Setegid(int) error
pkg syscall (linux-amd64), func Seteuid(int) error
pkg syscall (linux-amd64-cgo), func AllThreadsSyscall(uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-amd64-cgo), func AllThreadsSyscall6(uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-amd64-cgo), func Setegid(int) error
pkg syscall (linux-amd64-cgo), func Seteuid(int) error
pkg syscall (linux-arm), func AllThreadsSyscall(uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-arm), func AllThreadsSyscall6(uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-arm), func Setegid(int) error
pkg syscall (linux-arm), func Seteuid(int) error
pkg syscall (linux-arm-cgo), func AllThreadsSyscall(uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-arm-cgo), func AllThreadsSyscall6(uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-arm-cgo), func Setegid(int) error
pkg syscall (linux-arm-cgo), func Seteuid(int) error
pkg syscall (windows-386), func RtlGenRandom(*uint8, uint32) error
pkg syscall (windows-amd64), func RtlGenRandom(*uint8, uint32) error
pkg testing/fstest, func TestFS(fs.FS, ...string) error
pkg testing/fstest, method (MapFS) Glob(string) ([]string, error)
pkg testing/fstest, method (MapFS) Open(string) (fs.File, error)
pkg testing/fstest, method (MapFS) ReadDir(string) ([]fs.DirEntry, error)
pkg testing/fstest, method (MapFS) ReadFile(string) ([]uint8, error)
pkg testing/fstest, method (MapFS) Stat(string) (fs.FileInfo, error)
pkg testing/fstest, type MapFS map[string]*MapFile
pkg testing/fstest, type MapFile struct
pkg testing/fstest, type MapFile struct, Data []uint8
pkg testing/fstest, type MapFile struct, ModTime time.Time
pkg testing/fstest, type MapFile struct, Mode fs.FileMode
pkg testing/fstest, type MapFile struct, Sys interface{}
pkg testing/iotest, func ErrReader(error) io.Reader
pkg testing/iotest, func TestReader(io.Reader, []uint8) error
pkg text/template, func ParseFS(fs.FS, ...string) (*Template, error)
pkg text/template, method (*Template) ParseFS(fs.FS, ...string) (*Template, error)
pkg text/template/parse, const NodeComment = 20
pkg text/template/parse, const NodeComment NodeType
pkg text/template/parse, const ParseComments = 1
@ -17,3 +445,8 @@ pkg text/template/parse, type CommentNode struct, embedded NodeType
pkg text/template/parse, type CommentNode struct, embedded Pos
pkg text/template/parse, type Mode uint
pkg text/template/parse, type Tree struct, Mode Mode
pkg unicode, const Version = "13.0.0"
pkg unicode, var Chorasmian *RangeTable
pkg unicode, var Dives_Akuru *RangeTable
pkg unicode, var Khitan_Small_Script *RangeTable
pkg unicode, var Yezidi *RangeTable

View file

@ -947,10 +947,18 @@ The Gerrit voting system involves an integer in the range -2 to +2:
</li>
</ul>
<p>
At least two maintainers must approve of the change, and at least one
of those maintainers must +2 the change.
The second maintainer may cast a vote of Trust+1, meaning that the
change looks basically OK, but that the maintainer hasn't done the
detailed review required for a +2 vote.
</p>
<h3 id="submit">Submitting an approved change</h3>
<p>
After the code has been +2'ed, an approver will
After the code has been +2'ed and Trust+1'ed, an approver will
apply it to the master branch using the Gerrit user interface.
This is called "submitting the change".
</p>

View file

@ -71,6 +71,16 @@ Do not send CLs removing the interior tags from such phrases.
TODO: write and link to blog post
</p>
<p><!-- golang.org/issue/40276 -->
<code>go</code> <code>install</code>, with or without a version suffix (as
described above), is now the recommended way to build and install packages in
module mode. <code>go</code> <code>get</code> should be used with the
<code>-d</code> flag to adjust the current module's dependencies without
building packages, and use of <code>go</code> <code>get</code> to build and
install packages is deprecated. In a future release, the <code>-d</code> flag
will always be enabled.
</p>
<p><!-- golang.org/issue/24031 -->
<code>retract</code> directives may now be used in a <code>go.mod</code> file
to indicate that certain published versions of the module should not be used
@ -110,6 +120,16 @@ Do not send CLs removing the interior tags from such phrases.
See <code>go</code> <code>help</code> <code>environment</code> for details.
</p>
<h4 id="go-get"><code>go</code> <code>get</code></h4>
<p><!-- golang.org/cl/263267 -->
<code>go</code> <code>get</code> <code>example.com/mod@patch</code> now
requires that some version of <code>example.com/mod</code> already be
required by the main module.
(However, <code>go</code> <code>get</code> <code>-u=patch</code> continues
to patch even newly-added dependencies.)
</p>
<h4 id="all-pattern">The <code>all</code> pattern</h4>
<p><!-- golang.org/cl/240623 -->
@ -122,6 +142,37 @@ Do not send CLs removing the interior tags from such phrases.
by <code>go</code> <code>mod</code> <code>vendor</code> since Go 1.11.
</p>
<h4 id="toolexec">The <code>-toolexec</code> build flag</h4>
<p><!-- golang.org/cl/263357 -->
When the <code>-toolexec</code> build flag is specified to use a program when
invoking toolchain programs like compile or asm, the environment variable
<code>TOOLEXEC_IMPORTPATH</code> is now set to the import path of the package
being built.
</p>
<h4 id="i-flag">The <code>-i</code> build flag</h4>
<p><!-- golang.org/issue/41696 -->
The <code>-i</code> flag accepted by <code>go</code> <code>build</code>,
<code>go</code> <code>install</code>, and <code>go</code> <code>test</code> is
now deprecated. The <code>-i</code> flag instructs the <code>go</code> command
to install packages imported by packages named on the command line. Since
the build cache was introduced in Go 1.10, the <code>-i</code> flag no longer
has a significant effect on build times, and it causes errors when the install
directory is not writable.
</p>
<h4 id="list-buildid">The <code>list</code> command</h4>
<p><!-- golang.org/cl/263542 -->
When the <code>-export</code> flag is specified, the <code>BuildID</code>
field is now set to the build ID of the compiled package. This is equivalent
to running <code>go</code> <code>tool</code> <code>buildid</code> on
<code>go</code> <code>list</code> <code>-exported</code> <code>-f</code> <code>{{.Export}</code>,
but without the extra step.
</p>
<h3 id="cgo">Cgo</h3>
<p> <!-- CL 252378 -->
@ -142,6 +193,18 @@ Do not send CLs removing the interior tags from such phrases.
TODO
</p>
<p><!-- CL 267100 -->
On Linux, the runtime now defaults to releasing memory to the
operating system promptly (using <code>MADV_DONTNEED</code>), rather
than lazily when the operating system is under memory pressure
(using <code>MADV_FREE</code>). This means process-level memory
statistics like RSS will more accurately reflect the amount of
physical memory being used by Go processes. Systems that are
currently using <code>GODEBUG=madvdontneed=1</code> to improve
memory monitoring behavior no longer need to set this environment
variable.
</p>
<h2 id="compiler">Compiler</h2>
<p>
@ -173,7 +236,10 @@ Do not send CLs removing the interior tags from such phrases.
TODO: update with final numbers later in the release.
</p>
<!-- CL 255259: https://golang.org/cl/255259: cmd/link: enable ASLR on windows binaries built with -buildmode=c-shared -->
<p> <!-- CL 255259 -->
On Windows, <code>go build -buildmode=c-shared</code> now generates Windows
ASLR DLLs by default. ASLR can be disabled with <code>--ldflags=-aslr=false</code>.
</p>
<h2 id="library">Core library</h2>
@ -199,6 +265,27 @@ Do not send CLs removing the interior tags from such phrases.
by the <code>Error</code> method with <code>"tls: use of closed connection"</code>.
</p>
<p><!-- CL 266037 -->
A default deadline is set in <a href="/pkg/crypto/tls/#Conn.Close">Close</a>
before sending the close notify alert, in order to prevent blocking
indefinitely.
</p>
<p><!-- CL 246338 -->
<a href="/pkg/crypto/tls#Conn.HandshakeContext">(*Conn).HandshakeContext</a> was added to
allow the user to control cancellation of an in-progress TLS Handshake.
The context provided is propagated into the
<a href="/pkg/crypto/tls#ClientHelloInfo">ClientHelloInfo</a>
and <a href="/pkg/crypto/tls#CertificateRequestInfo">CertificateRequestInfo</a>
structs and accessible through the new
<a href="/pkg/crypto/tls#ClientHelloInfo.Context">(*ClientHelloInfo).Context</a>
and
<a href="/pkg/crypto/tls#CertificateRequestInfo.Context">
(*CertificateRequestInfo).Context
</a> methods respectively. Canceling the context after the handshake has finished
has no effect.
</p>
<h3 id="crypto/x509"><a href="/pkg/crypto/x509">crypto/x509</a></h3>
<p><!-- CL 235078 -->
@ -216,6 +303,14 @@ Do not send CLs removing the interior tags from such phrases.
of a malformed certificate.
</p>
<h3 id="encoding/json"><a href="/pkg/encoding/json">encoding/json</a></h3>
<p><!-- CL 263619 -->
The error message for
<a href="/pkg/encoding/json/#SyntaxError">SyntaxError</a>
now begins with "json: ", matching the other errors in the package.
</p>
<h3 id="net"><a href="/pkg/net/">net</a></h3>
<p><!-- CL 250357 -->
@ -228,13 +323,10 @@ Do not send CLs removing the interior tags from such phrases.
with <code>"use of closed network connection"</code>.
</p>
<h3 id="reflect"><a href="/pkg/reflect/">reflect</a></h3>
<p><!-- CL 259237, golang.org/issue/22075 -->
For interface types and values, <a href="/pkg/reflect/#Value.Method">Method</a>,
<a href="/pkg/reflect/#Value.MethodByName">MethodByName</a>, and
<a href="/pkg/reflect/#Value.NumMethod">NumMethod</a> now
operate on the interface's exported method set, rather than its full method set.
<p><!-- CL 255898 -->
In previous Go releases the default TCP listener backlog size on Linux systems,
set by <code>/proc/sys/net/core/somaxconn</code>, was limited to a maximum of <code>65535</code>.
On Linux kernel version 4.1 and above, the maximum is now <code>4294967295</code>.
</p>
<h3 id="text/template/parse"><a href="/pkg/text/template/parse/">text/template/parse</a></h3>
@ -288,6 +380,20 @@ Do not send CLs removing the interior tags from such phrases.
</dd>
</dl><!-- crypto/x509 -->
<dl id="encoding/xml"><dt><a href="/pkg/encoding/xml/">encoding/xml</a></dt>
<dd>
<p><!-- CL 264024 -->
The encoder has always taken care to avoid using namespace prefixes
beginning with <code>xml</code>, which are reserved by the XML
specification.
Now, following the specification more closely, that check is
case-insensitive, so that prefixes beginning
with <code>XML</code>, <code>XmL</code>, and so on are also
avoided.
</p>
</dd>
</dl><!-- encoding/xml -->
<dl id="net/http"><dt><a href="/pkg/net/http/">net/http</a></dt>
<dd>
<p><!-- CL 233637 -->
@ -314,6 +420,13 @@ Do not send CLs removing the interior tags from such phrases.
Cookies set with <code>SameSiteDefaultMode</code> now behave according to the current
spec (no attribute is set) instead of generating a SameSite key without a value.
</p>
<p><!-- CL 246338 -->
The <a href="/pkg/net/http/"><code>net/http</code></a> package now uses the new
<a href="/pkg/crypto/tls#Conn.HandshakeContext"><code>(*tls.Conn).HandshakeContext</code></a>
with the <a href="/pkg/net/http/#Request"><code>Request</code></a> context
when performing TLS handshakes in the client or server.
</p>
</dd>
</dl><!-- net/http -->
@ -325,6 +438,14 @@ Do not send CLs removing the interior tags from such phrases.
</dd>
</dl><!-- runtime/debug -->
<dl id="syscall"><dt><a href="/pkg/syscall/">syscall</a></dt>
<dd>
<p><!-- CL 261917 -->
<a href="/pkg/syscall/#SysProcAttr"><code>SysProcAttr</code></a> on Windows has a new NoInheritHandles field that disables inheriting handles when creating a new process.
</p>
</dd>
</dl><!-- syscall -->
<dl id="strconv"><dt><a href="/pkg/strconv/">strconv</a></dt>
<dd>
<p><!-- CL 260858 -->

View file

@ -1,6 +1,6 @@
<!--{
"Title": "The Go Programming Language Specification",
"Subtitle": "Version of Sep 24, 2020",
"Subtitle": "Version of Oct 7, 2020",
"Path": "/ref/spec"
}-->
@ -3594,23 +3594,33 @@ replaced by its left operand alone.
</p>
<pre>
var a [1024]byte
var s uint = 33
// The results of the following examples are given for 64-bit ints.
var i = 1&lt;&lt;s // 1 has type int
var j int32 = 1&lt;&lt;s // 1 has type int32; j == 0
var k = uint64(1&lt;&lt;s) // 1 has type uint64; k == 1&lt;&lt;33
var m int = 1.0&lt;&lt;s // 1.0 has type int; m == 0 if ints are 32bits in size
var n = 1.0&lt;&lt;s == j // 1.0 has type int32; n == true
var o = 1&lt;&lt;s == 2&lt;&lt;s // 1 and 2 have type int; o == true if ints are 32bits in size
var p = 1&lt;&lt;s == 1&lt;&lt;33 // illegal if ints are 32bits in size: 1 has type int, but 1&lt;&lt;33 overflows int
var m int = 1.0&lt;&lt;s // 1.0 has type int; m == 1&lt;&lt;33
var n = 1.0&lt;&lt;s == j // 1.0 has type int; n == true
var o = 1&lt;&lt;s == 2&lt;&lt;s // 1 and 2 have type int; o == false
var p = 1&lt;&lt;s == 1&lt;&lt;33 // 1 has type int; p == true
var u = 1.0&lt;&lt;s // illegal: 1.0 has type float64, cannot shift
var u1 = 1.0&lt;&lt;s != 0 // illegal: 1.0 has type float64, cannot shift
var u2 = 1&lt;&lt;s != 1.0 // illegal: 1 has type float64, cannot shift
var v float32 = 1&lt;&lt;s // illegal: 1 has type float32, cannot shift
var w int64 = 1.0&lt;&lt;33 // 1.0&lt;&lt;33 is a constant shift expression
var x = a[1.0&lt;&lt;s] // 1.0 has type int; x == a[0] if ints are 32bits in size
var a = make([]byte, 1.0&lt;&lt;s) // 1.0 has type int; len(a) == 0 if ints are 32bits in size
</pre>
var w int64 = 1.0&lt;&lt;33 // 1.0&lt;&lt;33 is a constant shift expression; w == 1&lt;&lt;33
var x = a[1.0&lt;&lt;s] // panics: 1.0 has type int, but 1&lt;&lt;33 overflows array bounds
var b = make([]byte, 1.0&lt;&lt;s) // 1.0 has type int; len(b) == 1&lt;&lt;33
// The results of the following examples are given for 32-bit ints,
// which means the shifts will overflow.
var mm int = 1.0&lt;&lt;s // 1.0 has type int; mm == 0
var oo = 1&lt;&lt;s == 2&lt;&lt;s // 1 and 2 have type int; oo == true
var pp = 1&lt;&lt;s == 1&lt;&lt;33 // illegal: 1 has type int, but 1&lt;&lt;33 overflows int
var xx = a[1.0&lt;&lt;s] // 1.0 has type int; xx == a[0]
var bb = make([]byte, 1.0&lt;&lt;s) // 1.0 has type int; len(bb) == 0
</pre>
<h4 id="Operator_precedence">Operator precedence</h4>
<p>

View file

@ -8,8 +8,8 @@
# Consult https://www.iana.org/time-zones for the latest versions.
# Versions to use.
CODE=2020b
DATA=2020b
CODE=2020d
DATA=2020d
set -e
rm -rf work

Binary file not shown.

View file

@ -181,7 +181,7 @@ func testCallbackCallers(t *testing.T) {
name := []string{
"runtime.cgocallbackg1",
"runtime.cgocallbackg",
"runtime.cgocallback_gofunc",
"runtime.cgocallback",
"runtime.asmcgocall",
"runtime.cgocall",
"test._Cfunc_callback",

View file

@ -15,5 +15,6 @@ func TestSetgid(t *testing.T) {
}
testSetgid(t)
}
func Test1435(t *testing.T) { test1435(t) }
func Test6997(t *testing.T) { test6997(t) }
func TestBuildID(t *testing.T) { testBuildID(t) }

View file

@ -76,6 +76,8 @@ func TestCheckConst(t *testing.T) { testCheckConst(t) }
func TestConst(t *testing.T) { testConst(t) }
func TestCthread(t *testing.T) { testCthread(t) }
func TestEnum(t *testing.T) { testEnum(t) }
func TestNamedEnum(t *testing.T) { testNamedEnum(t) }
func TestCastToEnum(t *testing.T) { testCastToEnum(t) }
func TestErrno(t *testing.T) { testErrno(t) }
func TestFpVar(t *testing.T) { testFpVar(t) }
func TestHelpers(t *testing.T) { testHelpers(t) }

152
misc/cgo/test/issue1435.go Normal file
View file

@ -0,0 +1,152 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux,cgo
package cgotest
import (
"fmt"
"io/ioutil"
"strings"
"syscall"
"testing"
)
// #include <stdio.h>
// #include <stdlib.h>
// #include <pthread.h>
// #include <unistd.h>
// #include <sys/types.h>
//
// pthread_t *t = NULL;
// pthread_mutex_t mu;
// int nts = 0;
// int all_done = 0;
//
// static void *aFn(void *vargp) {
// int done = 0;
// while (!done) {
// usleep(100);
// pthread_mutex_lock(&mu);
// done = all_done;
// pthread_mutex_unlock(&mu);
// }
// return NULL;
// }
//
// void trial(int argc) {
// int i;
// nts = argc;
// t = calloc(nts, sizeof(pthread_t));
// pthread_mutex_init(&mu, NULL);
// for (i = 0; i < nts; i++) {
// pthread_create(&t[i], NULL, aFn, NULL);
// }
// }
//
// void cleanup(void) {
// int i;
// pthread_mutex_lock(&mu);
// all_done = 1;
// pthread_mutex_unlock(&mu);
// for (i = 0; i < nts; i++) {
// pthread_join(t[i], NULL);
// }
// pthread_mutex_destroy(&mu);
// free(t);
// }
import "C"
// compareStatus is used to confirm the contents of the thread
// specific status files match expectations.
func compareStatus(filter, expect string) error {
expected := filter + "\t" + expect
pid := syscall.Getpid()
fs, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/task", pid))
if err != nil {
return fmt.Errorf("unable to find %d tasks: %v", pid, err)
}
for _, f := range fs {
tf := fmt.Sprintf("/proc/%s/status", f.Name())
d, err := ioutil.ReadFile(tf)
if err != nil {
return fmt.Errorf("unable to read %q: %v", tf, err)
}
lines := strings.Split(string(d), "\n")
for _, line := range lines {
if strings.HasPrefix(line, filter) {
if line != expected {
return fmt.Errorf("%s %s (bad)\n", tf, line)
}
break
}
}
}
return nil
}
// test1435 test 9 glibc implemented setuid/gid syscall functions are
// mapped. This test is a slightly more expansive test than that of
// src/syscall/syscall_linux_test.go:TestSetuidEtc() insofar as it
// launches concurrent threads from C code via CGo and validates that
// they are subject to the system calls being tested. For the actual
// Go functionality being tested here, the syscall_linux_test version
// is considered authoritative, but non-trivial improvements to that
// should be mirrored here.
func test1435(t *testing.T) {
if syscall.Getuid() != 0 {
t.Skip("skipping root only test")
}
// Launch some threads in C.
const cts = 5
C.trial(cts)
defer C.cleanup()
vs := []struct {
call string
fn func() error
filter, expect string
}{
{call: "Setegid(1)", fn: func() error { return syscall.Setegid(1) }, filter: "Gid:", expect: "0\t1\t0\t1"},
{call: "Setegid(0)", fn: func() error { return syscall.Setegid(0) }, filter: "Gid:", expect: "0\t0\t0\t0"},
{call: "Seteuid(1)", fn: func() error { return syscall.Seteuid(1) }, filter: "Uid:", expect: "0\t1\t0\t1"},
{call: "Setuid(0)", fn: func() error { return syscall.Setuid(0) }, filter: "Uid:", expect: "0\t0\t0\t0"},
{call: "Setgid(1)", fn: func() error { return syscall.Setgid(1) }, filter: "Gid:", expect: "1\t1\t1\t1"},
{call: "Setgid(0)", fn: func() error { return syscall.Setgid(0) }, filter: "Gid:", expect: "0\t0\t0\t0"},
{call: "Setgroups([]int{0,1,2,3})", fn: func() error { return syscall.Setgroups([]int{0, 1, 2, 3}) }, filter: "Groups:", expect: "0 1 2 3 "},
{call: "Setgroups(nil)", fn: func() error { return syscall.Setgroups(nil) }, filter: "Groups:", expect: " "},
{call: "Setgroups([]int{0})", fn: func() error { return syscall.Setgroups([]int{0}) }, filter: "Groups:", expect: "0 "},
{call: "Setregid(101,0)", fn: func() error { return syscall.Setregid(101, 0) }, filter: "Gid:", expect: "101\t0\t0\t0"},
{call: "Setregid(0,102)", fn: func() error { return syscall.Setregid(0, 102) }, filter: "Gid:", expect: "0\t102\t102\t102"},
{call: "Setregid(0,0)", fn: func() error { return syscall.Setregid(0, 0) }, filter: "Gid:", expect: "0\t0\t0\t0"},
{call: "Setreuid(1,0)", fn: func() error { return syscall.Setreuid(1, 0) }, filter: "Uid:", expect: "1\t0\t0\t0"},
{call: "Setreuid(0,2)", fn: func() error { return syscall.Setreuid(0, 2) }, filter: "Uid:", expect: "0\t2\t2\t2"},
{call: "Setreuid(0,0)", fn: func() error { return syscall.Setreuid(0, 0) }, filter: "Uid:", expect: "0\t0\t0\t0"},
{call: "Setresgid(101,0,102)", fn: func() error { return syscall.Setresgid(101, 0, 102) }, filter: "Gid:", expect: "101\t0\t102\t0"},
{call: "Setresgid(0,102,101)", fn: func() error { return syscall.Setresgid(0, 102, 101) }, filter: "Gid:", expect: "0\t102\t101\t102"},
{call: "Setresgid(0,0,0)", fn: func() error { return syscall.Setresgid(0, 0, 0) }, filter: "Gid:", expect: "0\t0\t0\t0"},
{call: "Setresuid(1,0,2)", fn: func() error { return syscall.Setresuid(1, 0, 2) }, filter: "Uid:", expect: "1\t0\t2\t0"},
{call: "Setresuid(0,2,1)", fn: func() error { return syscall.Setresuid(0, 2, 1) }, filter: "Uid:", expect: "0\t2\t1\t2"},
{call: "Setresuid(0,0,0)", fn: func() error { return syscall.Setresuid(0, 0, 0) }, filter: "Uid:", expect: "0\t0\t0\t0"},
}
for i, v := range vs {
if err := v.fn(); err != nil {
t.Errorf("[%d] %q failed: %v", i, v.call, err)
continue
}
if err := compareStatus(v.filter, v.expect); err != nil {
t.Errorf("[%d] %q comparison: %v", i, v.call, err)
}
}
}

View file

@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// +build !windows,!static
// +build !darwin !internal_pie
// +build !darwin !internal_pie,!arm64
#include <stdint.h>
#include <dlfcn.h>

View file

@ -3,10 +3,11 @@
// license that can be found in the LICENSE file.
// +build !windows,!static
// +build !darwin !internal_pie
// +build !darwin !internal_pie,!arm64
// Excluded in darwin internal linking PIE mode, as dynamic export is not
// supported.
// Excluded in internal linking mode on darwin/arm64, as it is always PIE.
package cgotest

View file

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build windows static darwin,internal_pie
// +build windows static darwin,internal_pie darwin,arm64
package cgotest

View file

@ -1000,6 +1000,32 @@ func testEnum(t *testing.T) {
}
}
func testNamedEnum(t *testing.T) {
e := new(C.enum_E)
*e = C.Enum1
if *e != 1 {
t.Error("bad enum", C.Enum1)
}
*e = C.Enum2
if *e != 2 {
t.Error("bad enum", C.Enum2)
}
}
func testCastToEnum(t *testing.T) {
e := C.enum_E(C.Enum1)
if e != 1 {
t.Error("bad enum", C.Enum1)
}
e = C.enum_E(C.Enum2)
if e != 2 {
t.Error("bad enum", C.Enum2)
}
}
func testAtol(t *testing.T) {
l := Atol("123")
if l != 123 {

View file

@ -0,0 +1,31 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build riscv64
// +build !gccgo
#include "textflag.h"
TEXT ·RewindAndSetgid(SB),NOSPLIT|NOFRAME,$0-0
// Rewind stack pointer so anything that happens on the stack
// will clobber the test pattern created by the caller
ADD $(1024*8), X2
// Ask signaller to setgid
MOV $1, X5
FENCE
MOVW X5, ·Baton(SB)
FENCE
// Wait for setgid completion
loop:
FENCE
MOVW ·Baton(SB), X5
OR X6, X6, X6 // hint that we're in a spin loop
BNE ZERO, X5, loop
FENCE
// Restore stack
ADD $(-1024*8), X2
RET

View file

@ -7,6 +7,8 @@ package cshared_test
import (
"bytes"
"debug/elf"
"debug/pe"
"encoding/binary"
"flag"
"fmt"
"io/ioutil"
@ -355,6 +357,101 @@ func TestExportedSymbols(t *testing.T) {
}
}
func checkNumberOfExportedFunctionsWindows(t *testing.T, exportAllSymbols bool) {
const prog = `
package main
import "C"
//export GoFunc
func GoFunc() {
println(42)
}
//export GoFunc2
func GoFunc2() {
println(24)
}
func main() {
}
`
tmpdir := t.TempDir()
srcfile := filepath.Join(tmpdir, "test.go")
objfile := filepath.Join(tmpdir, "test.dll")
if err := ioutil.WriteFile(srcfile, []byte(prog), 0666); err != nil {
t.Fatal(err)
}
argv := []string{"build", "-buildmode=c-shared"}
if exportAllSymbols {
argv = append(argv, "-ldflags", "-extldflags=-Wl,--export-all-symbols")
}
argv = append(argv, "-o", objfile, srcfile)
out, err := exec.Command("go", argv...).CombinedOutput()
if err != nil {
t.Fatalf("build failure: %s\n%s\n", err, string(out))
}
f, err := pe.Open(objfile)
if err != nil {
t.Fatalf("pe.Open failed: %v", err)
}
defer f.Close()
section := f.Section(".edata")
if section == nil {
t.Fatalf(".edata section is not present")
}
// TODO: deduplicate this struct from cmd/link/internal/ld/pe.go
type IMAGE_EXPORT_DIRECTORY struct {
_ [2]uint32
_ [2]uint16
_ [2]uint32
NumberOfFunctions uint32
NumberOfNames uint32
_ [3]uint32
}
var e IMAGE_EXPORT_DIRECTORY
if err := binary.Read(section.Open(), binary.LittleEndian, &e); err != nil {
t.Fatalf("binary.Read failed: %v", err)
}
// Only the two exported functions and _cgo_dummy_export should be exported
expectedNumber := uint32(3)
if exportAllSymbols {
if e.NumberOfFunctions <= expectedNumber {
t.Fatalf("missing exported functions: %v", e.NumberOfFunctions)
}
if e.NumberOfNames <= expectedNumber {
t.Fatalf("missing exported names: %v", e.NumberOfNames)
}
} else {
if e.NumberOfFunctions != expectedNumber {
t.Fatalf("got %d exported functions; want %d", e.NumberOfFunctions, expectedNumber)
}
if e.NumberOfNames != expectedNumber {
t.Fatalf("got %d exported names; want %d", e.NumberOfNames, expectedNumber)
}
}
}
func TestNumberOfExportedFunctions(t *testing.T) {
if GOOS != "windows" {
t.Skip("skipping windows only test")
}
t.Parallel()
t.Run("OnlyExported", func(t *testing.T) {
checkNumberOfExportedFunctionsWindows(t, false)
})
t.Run("All", func(t *testing.T) {
checkNumberOfExportedFunctionsWindows(t, true)
})
}
// test1: shared library can be dynamically loaded and exported symbols are accessible.
func TestExportedSymbolsWithDynamicLoad(t *testing.T) {
t.Parallel()

View file

@ -102,7 +102,7 @@
}
}
if (!global.crypto) {
if (!global.crypto && global.require) {
const nodeCrypto = require("crypto");
global.crypto = {
getRandomValues(b) {
@ -110,6 +110,9 @@
},
};
}
if (!global.crypto) {
throw new Error("global.crypto is not available, polyfill required (getRandomValues only)");
}
if (!global.performance) {
global.performance = {
@ -120,13 +123,19 @@
};
}
if (!global.TextEncoder) {
if (!global.TextEncoder && global.require) {
global.TextEncoder = require("util").TextEncoder;
}
if (!global.TextEncoder) {
throw new Error("global.TextEncoder is not available, polyfill required");
}
if (!global.TextDecoder) {
if (!global.TextDecoder && global.require) {
global.TextDecoder = require("util").TextDecoder;
}
if (!global.TextDecoder) {
throw new Error("global.TextDecoder is not available, polyfill required");
}
// End of polyfills for common API.
@ -255,6 +264,7 @@
// func wasmExit(code int32)
"runtime.wasmExit": (sp) => {
sp >>>= 0;
const code = this.mem.getInt32(sp + 8, true);
this.exited = true;
delete this._inst;
@ -267,6 +277,7 @@
// func wasmWrite(fd uintptr, p unsafe.Pointer, n int32)
"runtime.wasmWrite": (sp) => {
sp >>>= 0;
const fd = getInt64(sp + 8);
const p = getInt64(sp + 16);
const n = this.mem.getInt32(sp + 24, true);
@ -275,16 +286,19 @@
// func resetMemoryDataView()
"runtime.resetMemoryDataView": (sp) => {
sp >>>= 0;
this.mem = new DataView(this._inst.exports.mem.buffer);
},
// func nanotime1() int64
"runtime.nanotime1": (sp) => {
sp >>>= 0;
setInt64(sp + 8, (timeOrigin + performance.now()) * 1000000);
},
// func walltime1() (sec int64, nsec int32)
"runtime.walltime1": (sp) => {
sp >>>= 0;
const msec = (new Date).getTime();
setInt64(sp + 8, msec / 1000);
this.mem.setInt32(sp + 16, (msec % 1000) * 1000000, true);
@ -292,6 +306,7 @@
// func scheduleTimeoutEvent(delay int64) int32
"runtime.scheduleTimeoutEvent": (sp) => {
sp >>>= 0;
const id = this._nextCallbackTimeoutID;
this._nextCallbackTimeoutID++;
this._scheduledTimeouts.set(id, setTimeout(
@ -311,6 +326,7 @@
// func clearTimeoutEvent(id int32)
"runtime.clearTimeoutEvent": (sp) => {
sp >>>= 0;
const id = this.mem.getInt32(sp + 8, true);
clearTimeout(this._scheduledTimeouts.get(id));
this._scheduledTimeouts.delete(id);
@ -318,11 +334,13 @@
// func getRandomData(r []byte)
"runtime.getRandomData": (sp) => {
sp >>>= 0;
crypto.getRandomValues(loadSlice(sp + 8));
},
// func finalizeRef(v ref)
"syscall/js.finalizeRef": (sp) => {
sp >>>= 0;
const id = this.mem.getUint32(sp + 8, true);
this._goRefCounts[id]--;
if (this._goRefCounts[id] === 0) {
@ -335,44 +353,51 @@
// func stringVal(value string) ref
"syscall/js.stringVal": (sp) => {
sp >>>= 0;
storeValue(sp + 24, loadString(sp + 8));
},
// func valueGet(v ref, p string) ref
"syscall/js.valueGet": (sp) => {
sp >>>= 0;
const result = Reflect.get(loadValue(sp + 8), loadString(sp + 16));
sp = this._inst.exports.getsp(); // see comment above
sp = this._inst.exports.getsp() >>> 0; // see comment above
storeValue(sp + 32, result);
},
// func valueSet(v ref, p string, x ref)
"syscall/js.valueSet": (sp) => {
sp >>>= 0;
Reflect.set(loadValue(sp + 8), loadString(sp + 16), loadValue(sp + 32));
},
// func valueDelete(v ref, p string)
"syscall/js.valueDelete": (sp) => {
sp >>>= 0;
Reflect.deleteProperty(loadValue(sp + 8), loadString(sp + 16));
},
// func valueIndex(v ref, i int) ref
"syscall/js.valueIndex": (sp) => {
sp >>>= 0;
storeValue(sp + 24, Reflect.get(loadValue(sp + 8), getInt64(sp + 16)));
},
// valueSetIndex(v ref, i int, x ref)
"syscall/js.valueSetIndex": (sp) => {
sp >>>= 0;
Reflect.set(loadValue(sp + 8), getInt64(sp + 16), loadValue(sp + 24));
},
// func valueCall(v ref, m string, args []ref) (ref, bool)
"syscall/js.valueCall": (sp) => {
sp >>>= 0;
try {
const v = loadValue(sp + 8);
const m = Reflect.get(v, loadString(sp + 16));
const args = loadSliceOfValues(sp + 32);
const result = Reflect.apply(m, v, args);
sp = this._inst.exports.getsp(); // see comment above
sp = this._inst.exports.getsp() >>> 0; // see comment above
storeValue(sp + 56, result);
this.mem.setUint8(sp + 64, 1);
} catch (err) {
@ -383,11 +408,12 @@
// func valueInvoke(v ref, args []ref) (ref, bool)
"syscall/js.valueInvoke": (sp) => {
sp >>>= 0;
try {
const v = loadValue(sp + 8);
const args = loadSliceOfValues(sp + 16);
const result = Reflect.apply(v, undefined, args);
sp = this._inst.exports.getsp(); // see comment above
sp = this._inst.exports.getsp() >>> 0; // see comment above
storeValue(sp + 40, result);
this.mem.setUint8(sp + 48, 1);
} catch (err) {
@ -398,11 +424,12 @@
// func valueNew(v ref, args []ref) (ref, bool)
"syscall/js.valueNew": (sp) => {
sp >>>= 0;
try {
const v = loadValue(sp + 8);
const args = loadSliceOfValues(sp + 16);
const result = Reflect.construct(v, args);
sp = this._inst.exports.getsp(); // see comment above
sp = this._inst.exports.getsp() >>> 0; // see comment above
storeValue(sp + 40, result);
this.mem.setUint8(sp + 48, 1);
} catch (err) {
@ -413,11 +440,13 @@
// func valueLength(v ref) int
"syscall/js.valueLength": (sp) => {
sp >>>= 0;
setInt64(sp + 16, parseInt(loadValue(sp + 8).length));
},
// valuePrepareString(v ref) (ref, int)
"syscall/js.valuePrepareString": (sp) => {
sp >>>= 0;
const str = encoder.encode(String(loadValue(sp + 8)));
storeValue(sp + 16, str);
setInt64(sp + 24, str.length);
@ -425,17 +454,20 @@
// valueLoadString(v ref, b []byte)
"syscall/js.valueLoadString": (sp) => {
sp >>>= 0;
const str = loadValue(sp + 8);
loadSlice(sp + 16).set(str);
},
// func valueInstanceOf(v ref, t ref) bool
"syscall/js.valueInstanceOf": (sp) => {
sp >>>= 0;
this.mem.setUint8(sp + 24, (loadValue(sp + 8) instanceof loadValue(sp + 16)) ? 1 : 0);
},
// func copyBytesToGo(dst []byte, src ref) (int, bool)
"syscall/js.copyBytesToGo": (sp) => {
sp >>>= 0;
const dst = loadSlice(sp + 8);
const src = loadValue(sp + 32);
if (!(src instanceof Uint8Array || src instanceof Uint8ClampedArray)) {
@ -450,6 +482,7 @@
// func copyBytesToJS(dst ref, src []byte) (int, bool)
"syscall/js.copyBytesToJS": (sp) => {
sp >>>= 0;
const dst = loadValue(sp + 8);
const src = loadSlice(sp + 16);
if (!(dst instanceof Uint8Array || dst instanceof Uint8ClampedArray)) {
@ -470,6 +503,9 @@
}
async run(instance) {
if (!(instance instanceof WebAssembly.Instance)) {
throw new Error("Go.run: WebAssembly.Instance expected");
}
this._inst = instance;
this.mem = new DataView(this._inst.exports.mem.buffer);
this._values = [ // JS values that Go currently has references to, indexed by reference id

View file

@ -13,8 +13,8 @@ package tar
import (
"errors"
"fmt"
"io/fs"
"math"
"os"
"path"
"reflect"
"strconv"
@ -525,12 +525,12 @@ func (h Header) allowedFormats() (format Format, paxHdrs map[string]string, err
return format, paxHdrs, err
}
// FileInfo returns an os.FileInfo for the Header.
func (h *Header) FileInfo() os.FileInfo {
// FileInfo returns an fs.FileInfo for the Header.
func (h *Header) FileInfo() fs.FileInfo {
return headerFileInfo{h}
}
// headerFileInfo implements os.FileInfo.
// headerFileInfo implements fs.FileInfo.
type headerFileInfo struct {
h *Header
}
@ -549,57 +549,57 @@ func (fi headerFileInfo) Name() string {
}
// Mode returns the permission and mode bits for the headerFileInfo.
func (fi headerFileInfo) Mode() (mode os.FileMode) {
func (fi headerFileInfo) Mode() (mode fs.FileMode) {
// Set file permission bits.
mode = os.FileMode(fi.h.Mode).Perm()
mode = fs.FileMode(fi.h.Mode).Perm()
// Set setuid, setgid and sticky bits.
if fi.h.Mode&c_ISUID != 0 {
mode |= os.ModeSetuid
mode |= fs.ModeSetuid
}
if fi.h.Mode&c_ISGID != 0 {
mode |= os.ModeSetgid
mode |= fs.ModeSetgid
}
if fi.h.Mode&c_ISVTX != 0 {
mode |= os.ModeSticky
mode |= fs.ModeSticky
}
// Set file mode bits; clear perm, setuid, setgid, and sticky bits.
switch m := os.FileMode(fi.h.Mode) &^ 07777; m {
switch m := fs.FileMode(fi.h.Mode) &^ 07777; m {
case c_ISDIR:
mode |= os.ModeDir
mode |= fs.ModeDir
case c_ISFIFO:
mode |= os.ModeNamedPipe
mode |= fs.ModeNamedPipe
case c_ISLNK:
mode |= os.ModeSymlink
mode |= fs.ModeSymlink
case c_ISBLK:
mode |= os.ModeDevice
mode |= fs.ModeDevice
case c_ISCHR:
mode |= os.ModeDevice
mode |= os.ModeCharDevice
mode |= fs.ModeDevice
mode |= fs.ModeCharDevice
case c_ISSOCK:
mode |= os.ModeSocket
mode |= fs.ModeSocket
}
switch fi.h.Typeflag {
case TypeSymlink:
mode |= os.ModeSymlink
mode |= fs.ModeSymlink
case TypeChar:
mode |= os.ModeDevice
mode |= os.ModeCharDevice
mode |= fs.ModeDevice
mode |= fs.ModeCharDevice
case TypeBlock:
mode |= os.ModeDevice
mode |= fs.ModeDevice
case TypeDir:
mode |= os.ModeDir
mode |= fs.ModeDir
case TypeFifo:
mode |= os.ModeNamedPipe
mode |= fs.ModeNamedPipe
}
return mode
}
// sysStat, if non-nil, populates h from system-dependent fields of fi.
var sysStat func(fi os.FileInfo, h *Header) error
var sysStat func(fi fs.FileInfo, h *Header) error
const (
// Mode constants from the USTAR spec:
@ -623,10 +623,10 @@ const (
// If fi describes a symlink, FileInfoHeader records link as the link target.
// If fi describes a directory, a slash is appended to the name.
//
// Since os.FileInfo's Name method only returns the base name of
// Since fs.FileInfo's Name method only returns the base name of
// the file it describes, it may be necessary to modify Header.Name
// to provide the full path name of the file.
func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
func FileInfoHeader(fi fs.FileInfo, link string) (*Header, error) {
if fi == nil {
return nil, errors.New("archive/tar: FileInfo is nil")
}
@ -643,29 +643,29 @@ func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
case fi.IsDir():
h.Typeflag = TypeDir
h.Name += "/"
case fm&os.ModeSymlink != 0:
case fm&fs.ModeSymlink != 0:
h.Typeflag = TypeSymlink
h.Linkname = link
case fm&os.ModeDevice != 0:
if fm&os.ModeCharDevice != 0 {
case fm&fs.ModeDevice != 0:
if fm&fs.ModeCharDevice != 0 {
h.Typeflag = TypeChar
} else {
h.Typeflag = TypeBlock
}
case fm&os.ModeNamedPipe != 0:
case fm&fs.ModeNamedPipe != 0:
h.Typeflag = TypeFifo
case fm&os.ModeSocket != 0:
case fm&fs.ModeSocket != 0:
return nil, fmt.Errorf("archive/tar: sockets not supported")
default:
return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
}
if fm&os.ModeSetuid != 0 {
if fm&fs.ModeSetuid != 0 {
h.Mode |= c_ISUID
}
if fm&os.ModeSetgid != 0 {
if fm&fs.ModeSetgid != 0 {
h.Mode |= c_ISGID
}
if fm&os.ModeSticky != 0 {
if fm&fs.ModeSticky != 0 {
h.Mode |= c_ISVTX
}
// If possible, populate additional fields from OS-specific

View file

@ -7,7 +7,6 @@ package tar
import (
"bytes"
"io"
"io/ioutil"
"strconv"
"strings"
"time"
@ -104,7 +103,7 @@ func (tr *Reader) next() (*Header, error) {
continue // This is a meta header affecting the next header
case TypeGNULongName, TypeGNULongLink:
format.mayOnlyBe(FormatGNU)
realname, err := ioutil.ReadAll(tr)
realname, err := io.ReadAll(tr)
if err != nil {
return nil, err
}
@ -294,7 +293,7 @@ func mergePAX(hdr *Header, paxHdrs map[string]string) (err error) {
// parsePAX parses PAX headers.
// If an extended header (type 'x') is invalid, ErrHeader is returned
func parsePAX(r io.Reader) (map[string]string, error) {
buf, err := ioutil.ReadAll(r)
buf, err := io.ReadAll(r)
if err != nil {
return nil, err
}
@ -850,7 +849,7 @@ func discard(r io.Reader, n int64) error {
}
}
copySkipped, err := io.CopyN(ioutil.Discard, r, n-seekSkipped)
copySkipped, err := io.CopyN(io.Discard, r, n-seekSkipped)
if err == io.EOF && seekSkipped+copySkipped < n {
err = io.ErrUnexpectedEOF
}

View file

@ -865,7 +865,7 @@ func TestReadTruncation(t *testing.T) {
}
cnt++
if s2 == "manual" {
if _, err = tr.writeTo(ioutil.Discard); err != nil {
if _, err = tr.writeTo(io.Discard); err != nil {
break
}
}

View file

@ -7,7 +7,7 @@
package tar
import (
"os"
"io/fs"
"os/user"
"runtime"
"strconv"
@ -23,7 +23,7 @@ func init() {
// The downside is that renaming uname or gname by the OS never takes effect.
var userMap, groupMap sync.Map // map[int]string
func statUnix(fi os.FileInfo, h *Header) error {
func statUnix(fi fs.FileInfo, h *Header) error {
sys, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return nil

View file

@ -10,6 +10,7 @@ import (
"fmt"
"internal/testenv"
"io"
"io/fs"
"io/ioutil"
"math"
"os"
@ -327,7 +328,7 @@ func TestRoundTrip(t *testing.T) {
if !reflect.DeepEqual(rHdr, hdr) {
t.Errorf("Header mismatch.\n got %+v\nwant %+v", rHdr, hdr)
}
rData, err := ioutil.ReadAll(tr)
rData, err := io.ReadAll(tr)
if err != nil {
t.Fatalf("Read: %v", err)
}
@ -338,7 +339,7 @@ func TestRoundTrip(t *testing.T) {
type headerRoundTripTest struct {
h *Header
fm os.FileMode
fm fs.FileMode
}
func TestHeaderRoundTrip(t *testing.T) {
@ -361,7 +362,7 @@ func TestHeaderRoundTrip(t *testing.T) {
ModTime: time.Unix(1360600852, 0),
Typeflag: TypeSymlink,
},
fm: 0777 | os.ModeSymlink,
fm: 0777 | fs.ModeSymlink,
}, {
// character device node.
h: &Header{
@ -371,7 +372,7 @@ func TestHeaderRoundTrip(t *testing.T) {
ModTime: time.Unix(1360578951, 0),
Typeflag: TypeChar,
},
fm: 0666 | os.ModeDevice | os.ModeCharDevice,
fm: 0666 | fs.ModeDevice | fs.ModeCharDevice,
}, {
// block device node.
h: &Header{
@ -381,7 +382,7 @@ func TestHeaderRoundTrip(t *testing.T) {
ModTime: time.Unix(1360578954, 0),
Typeflag: TypeBlock,
},
fm: 0660 | os.ModeDevice,
fm: 0660 | fs.ModeDevice,
}, {
// directory.
h: &Header{
@ -391,7 +392,7 @@ func TestHeaderRoundTrip(t *testing.T) {
ModTime: time.Unix(1360601116, 0),
Typeflag: TypeDir,
},
fm: 0755 | os.ModeDir,
fm: 0755 | fs.ModeDir,
}, {
// fifo node.
h: &Header{
@ -401,7 +402,7 @@ func TestHeaderRoundTrip(t *testing.T) {
ModTime: time.Unix(1360578949, 0),
Typeflag: TypeFifo,
},
fm: 0600 | os.ModeNamedPipe,
fm: 0600 | fs.ModeNamedPipe,
}, {
// setuid.
h: &Header{
@ -411,7 +412,7 @@ func TestHeaderRoundTrip(t *testing.T) {
ModTime: time.Unix(1355405093, 0),
Typeflag: TypeReg,
},
fm: 0755 | os.ModeSetuid,
fm: 0755 | fs.ModeSetuid,
}, {
// setguid.
h: &Header{
@ -421,7 +422,7 @@ func TestHeaderRoundTrip(t *testing.T) {
ModTime: time.Unix(1360602346, 0),
Typeflag: TypeReg,
},
fm: 0750 | os.ModeSetgid,
fm: 0750 | fs.ModeSetgid,
}, {
// sticky.
h: &Header{
@ -431,7 +432,7 @@ func TestHeaderRoundTrip(t *testing.T) {
ModTime: time.Unix(1360602540, 0),
Typeflag: TypeReg,
},
fm: 0600 | os.ModeSticky,
fm: 0600 | fs.ModeSticky,
}, {
// hard link.
h: &Header{
@ -804,9 +805,9 @@ func Benchmark(b *testing.B) {
b.Run(v.label, func(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
// Writing to ioutil.Discard because we want to
// Writing to io.Discard because we want to
// test purely the writer code and not bring in disk performance into this.
tw := NewWriter(ioutil.Discard)
tw := NewWriter(io.Discard)
for _, file := range v.files {
if err := tw.WriteHeader(file.hdr); err != nil {
b.Errorf("unexpected WriteHeader error: %v", err)
@ -844,7 +845,7 @@ func Benchmark(b *testing.B) {
if _, err := tr.Next(); err != nil {
b.Errorf("unexpected Next error: %v", err)
}
if _, err := io.Copy(ioutil.Discard, tr); err != nil {
if _, err := io.Copy(io.Discard, tr); err != nil {
b.Errorf("unexpected Copy error : %v", err)
}
}

View file

@ -11,7 +11,12 @@ import (
"hash"
"hash/crc32"
"io"
"io/fs"
"os"
"path"
"sort"
"strings"
"sync"
"time"
)
@ -21,18 +26,28 @@ var (
ErrChecksum = errors.New("zip: checksum error")
)
// A Reader serves content from a ZIP archive.
type Reader struct {
r io.ReaderAt
File []*File
Comment string
decompressors map[uint16]Decompressor
// fileList is a list of files sorted by ename,
// for use by the Open method.
fileListOnce sync.Once
fileList []fileListEntry
}
// A ReadCloser is a Reader that must be closed when no longer needed.
type ReadCloser struct {
f *os.File
Reader
}
// A File is a single file in a ZIP archive.
// The file information is in the embedded FileHeader.
// The file content can be accessed by calling Open.
type File struct {
FileHeader
zip *Reader
@ -187,6 +202,10 @@ type checksumReader struct {
err error // sticky error
}
func (r *checksumReader) Stat() (fs.FileInfo, error) {
return headerFileInfo{&r.f.FileHeader}, nil
}
func (r *checksumReader) Read(b []byte) (n int, err error) {
if r.err != nil {
return 0, r.err
@ -607,3 +626,173 @@ func (b *readBuf) sub(n int) readBuf {
*b = (*b)[n:]
return b2
}
// A fileListEntry is a File and its ename.
// If file == nil, the fileListEntry describes a directory, without metadata.
type fileListEntry struct {
name string
file *File // nil for directories
}
type fileInfoDirEntry interface {
fs.FileInfo
fs.DirEntry
}
func (e *fileListEntry) stat() fileInfoDirEntry {
if e.file != nil {
return headerFileInfo{&e.file.FileHeader}
}
return e
}
// Only used for directories.
func (f *fileListEntry) Name() string { _, elem, _ := split(f.name); return elem }
func (f *fileListEntry) Size() int64 { return 0 }
func (f *fileListEntry) ModTime() time.Time { return time.Time{} }
func (f *fileListEntry) Mode() fs.FileMode { return fs.ModeDir | 0555 }
func (f *fileListEntry) Type() fs.FileMode { return fs.ModeDir }
func (f *fileListEntry) IsDir() bool { return true }
func (f *fileListEntry) Sys() interface{} { return nil }
func (f *fileListEntry) Info() (fs.FileInfo, error) { return f, nil }
// toValidName coerces name to be a valid name for fs.FS.Open.
func toValidName(name string) string {
name = strings.ReplaceAll(name, `\`, `/`)
p := path.Clean(name)
if strings.HasPrefix(p, "/") {
p = p[len("/"):]
}
for strings.HasPrefix(name, "../") {
p = p[len("../"):]
}
return p
}
func (r *Reader) initFileList() {
r.fileListOnce.Do(func() {
dirs := make(map[string]bool)
for _, file := range r.File {
name := toValidName(file.Name)
for dir := path.Dir(name); dir != "."; dir = path.Dir(dir) {
dirs[dir] = true
}
r.fileList = append(r.fileList, fileListEntry{name, file})
}
for dir := range dirs {
r.fileList = append(r.fileList, fileListEntry{dir + "/", nil})
}
sort.Slice(r.fileList, func(i, j int) bool { return fileEntryLess(r.fileList[i].name, r.fileList[j].name) })
})
}
func fileEntryLess(x, y string) bool {
xdir, xelem, _ := split(x)
ydir, yelem, _ := split(y)
return xdir < ydir || xdir == ydir && xelem < yelem
}
// Open opens the named file in the ZIP archive,
// using the semantics of fs.FS.Open:
// paths are always slash separated, with no
// leading / or ../ elements.
func (r *Reader) Open(name string) (fs.File, error) {
r.initFileList()
e := r.openLookup(name)
if e == nil || !fs.ValidPath(name) {
return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist}
}
if e.file == nil || strings.HasSuffix(e.file.Name, "/") {
return &openDir{e, r.openReadDir(name), 0}, nil
}
rc, err := e.file.Open()
if err != nil {
return nil, err
}
return rc.(fs.File), nil
}
func split(name string) (dir, elem string, isDir bool) {
if name[len(name)-1] == '/' {
isDir = true
name = name[:len(name)-1]
}
i := len(name) - 1
for i >= 0 && name[i] != '/' {
i--
}
if i < 0 {
return ".", name, isDir
}
return name[:i], name[i+1:], isDir
}
var dotFile = &fileListEntry{name: "./"}
func (r *Reader) openLookup(name string) *fileListEntry {
if name == "." {
return dotFile
}
dir, elem, _ := split(name)
files := r.fileList
i := sort.Search(len(files), func(i int) bool {
idir, ielem, _ := split(files[i].name)
return idir > dir || idir == dir && ielem >= elem
})
if i < len(files) {
fname := files[i].name
if fname == name || len(fname) == len(name)+1 && fname[len(name)] == '/' && fname[:len(name)] == name {
return &files[i]
}
}
return nil
}
func (r *Reader) openReadDir(dir string) []fileListEntry {
files := r.fileList
i := sort.Search(len(files), func(i int) bool {
idir, _, _ := split(files[i].name)
return idir >= dir
})
j := sort.Search(len(files), func(j int) bool {
jdir, _, _ := split(files[j].name)
return jdir > dir
})
return files[i:j]
}
type openDir struct {
e *fileListEntry
files []fileListEntry
offset int
}
func (d *openDir) Close() error { return nil }
func (d *openDir) Stat() (fs.FileInfo, error) { return d.e.stat(), nil }
func (d *openDir) Read([]byte) (int, error) {
return 0, &fs.PathError{Op: "read", Path: d.e.name, Err: errors.New("is a directory")}
}
func (d *openDir) ReadDir(count int) ([]fs.DirEntry, error) {
n := len(d.files) - d.offset
if count > 0 && n > count {
n = count
}
if n == 0 {
if count <= 0 {
return nil, nil
}
return nil, io.EOF
}
list := make([]fs.DirEntry, n)
for i := range list {
list[i] = d.files[d.offset+i].stat()
}
d.offset += n
return list, nil
}

View file

@ -10,12 +10,14 @@ import (
"encoding/hex"
"internal/obscuretestdata"
"io"
"io/fs"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
"testing"
"testing/fstest"
"time"
)
@ -30,7 +32,7 @@ type ZipTest struct {
type ZipTestFile struct {
Name string
Mode os.FileMode
Mode fs.FileMode
NonUTF8 bool
ModTime time.Time
Modified time.Time
@ -107,7 +109,7 @@ var tests = []ZipTest{
Name: "symlink",
Content: []byte("../target"),
Modified: time.Date(2012, 2, 3, 19, 56, 48, 0, timeZone(-2*time.Hour)),
Mode: 0777 | os.ModeSymlink,
Mode: 0777 | fs.ModeSymlink,
},
},
},
@ -149,7 +151,7 @@ var tests = []ZipTest{
Name: "dir/empty/",
Content: []byte{},
Modified: time.Date(2011, 12, 8, 10, 8, 6, 0, time.UTC),
Mode: os.ModeDir | 0777,
Mode: fs.ModeDir | 0777,
},
{
Name: "readonly",
@ -179,7 +181,7 @@ var tests = []ZipTest{
Name: "dir/empty/",
Content: []byte{},
Modified: time.Date(2011, 12, 8, 10, 8, 6, 0, timeZone(0)),
Mode: os.ModeDir | 0777,
Mode: fs.ModeDir | 0777,
},
{
Name: "readonly",
@ -645,7 +647,7 @@ func readTestFile(t *testing.T, zt ZipTest, ft ZipTestFile, f *File) {
}
}
func testFileMode(t *testing.T, f *File, want os.FileMode) {
func testFileMode(t *testing.T, f *File, want fs.FileMode) {
mode := f.Mode()
if want == 0 {
t.Errorf("%s mode: got %v, want none", f.Name, mode)
@ -928,7 +930,7 @@ func returnBigZipBytes() (r io.ReaderAt, size int64) {
if err != nil {
panic(err)
}
b, err = ioutil.ReadAll(f)
b, err = io.ReadAll(f)
if err != nil {
panic(err)
}
@ -985,7 +987,7 @@ func TestIssue10957(t *testing.T) {
continue
}
if f.UncompressedSize64 < 1e6 {
n, err := io.Copy(ioutil.Discard, r)
n, err := io.Copy(io.Discard, r)
if i == 3 && err != io.ErrUnexpectedEOF {
t.Errorf("File[3] error = %v; want io.ErrUnexpectedEOF", err)
}
@ -1027,7 +1029,7 @@ func TestIssue11146(t *testing.T) {
if err != nil {
t.Fatal(err)
}
_, err = ioutil.ReadAll(r)
_, err = io.ReadAll(r)
if err != io.ErrUnexpectedEOF {
t.Errorf("File[0] error = %v; want io.ErrUnexpectedEOF", err)
}
@ -1070,3 +1072,13 @@ func TestIssue12449(t *testing.T) {
t.Errorf("Error reading the archive: %v", err)
}
}
func TestFS(t *testing.T) {
z, err := OpenReader("testdata/unix.zip")
if err != nil {
t.Fatal(err)
}
if err := fstest.TestFS(z, "hello", "dir/bar", "dir/empty", "readonly"); err != nil {
t.Fatal(err)
}
}

View file

@ -8,7 +8,6 @@ import (
"compress/flate"
"errors"
"io"
"io/ioutil"
"sync"
)
@ -111,7 +110,7 @@ func init() {
compressors.Store(Store, Compressor(func(w io.Writer) (io.WriteCloser, error) { return &nopCloser{w}, nil }))
compressors.Store(Deflate, Compressor(func(w io.Writer) (io.WriteCloser, error) { return newFlateWriter(w), nil }))
decompressors.Store(Store, Decompressor(ioutil.NopCloser))
decompressors.Store(Store, Decompressor(io.NopCloser))
decompressors.Store(Deflate, Decompressor(newFlateReader))
}

View file

@ -20,7 +20,7 @@ fields must be used instead.
package zip
import (
"os"
"io/fs"
"path"
"time"
)
@ -137,12 +137,12 @@ type FileHeader struct {
ExternalAttrs uint32 // Meaning depends on CreatorVersion
}
// FileInfo returns an os.FileInfo for the FileHeader.
func (h *FileHeader) FileInfo() os.FileInfo {
// FileInfo returns an fs.FileInfo for the FileHeader.
func (h *FileHeader) FileInfo() fs.FileInfo {
return headerFileInfo{h}
}
// headerFileInfo implements os.FileInfo.
// headerFileInfo implements fs.FileInfo.
type headerFileInfo struct {
fh *FileHeader
}
@ -161,17 +161,20 @@ func (fi headerFileInfo) ModTime() time.Time {
}
return fi.fh.Modified.UTC()
}
func (fi headerFileInfo) Mode() os.FileMode { return fi.fh.Mode() }
func (fi headerFileInfo) Mode() fs.FileMode { return fi.fh.Mode() }
func (fi headerFileInfo) Type() fs.FileMode { return fi.fh.Mode().Type() }
func (fi headerFileInfo) Sys() interface{} { return fi.fh }
func (fi headerFileInfo) Info() (fs.FileInfo, error) { return fi, nil }
// FileInfoHeader creates a partially-populated FileHeader from an
// os.FileInfo.
// Because os.FileInfo's Name method returns only the base name of
// fs.FileInfo.
// Because fs.FileInfo's Name method returns only the base name of
// the file it describes, it may be necessary to modify the Name field
// of the returned header to provide the full path name of the file.
// If compression is desired, callers should set the FileHeader.Method
// field; it is unset by default.
func FileInfoHeader(fi os.FileInfo) (*FileHeader, error) {
func FileInfoHeader(fi fs.FileInfo) (*FileHeader, error) {
size := fi.Size()
fh := &FileHeader{
Name: fi.Name(),
@ -280,7 +283,7 @@ const (
)
// Mode returns the permission and mode bits for the FileHeader.
func (h *FileHeader) Mode() (mode os.FileMode) {
func (h *FileHeader) Mode() (mode fs.FileMode) {
switch h.CreatorVersion >> 8 {
case creatorUnix, creatorMacOSX:
mode = unixModeToFileMode(h.ExternalAttrs >> 16)
@ -288,18 +291,18 @@ func (h *FileHeader) Mode() (mode os.FileMode) {
mode = msdosModeToFileMode(h.ExternalAttrs)
}
if len(h.Name) > 0 && h.Name[len(h.Name)-1] == '/' {
mode |= os.ModeDir
mode |= fs.ModeDir
}
return mode
}
// SetMode changes the permission and mode bits for the FileHeader.
func (h *FileHeader) SetMode(mode os.FileMode) {
func (h *FileHeader) SetMode(mode fs.FileMode) {
h.CreatorVersion = h.CreatorVersion&0xff | creatorUnix<<8
h.ExternalAttrs = fileModeToUnixMode(mode) << 16
// set MSDOS attributes too, as the original zip does.
if mode&os.ModeDir != 0 {
if mode&fs.ModeDir != 0 {
h.ExternalAttrs |= msdosDir
}
if mode&0200 == 0 {
@ -312,9 +315,9 @@ func (h *FileHeader) isZip64() bool {
return h.CompressedSize64 >= uint32max || h.UncompressedSize64 >= uint32max
}
func msdosModeToFileMode(m uint32) (mode os.FileMode) {
func msdosModeToFileMode(m uint32) (mode fs.FileMode) {
if m&msdosDir != 0 {
mode = os.ModeDir | 0777
mode = fs.ModeDir | 0777
} else {
mode = 0666
}
@ -324,64 +327,64 @@ func msdosModeToFileMode(m uint32) (mode os.FileMode) {
return mode
}
func fileModeToUnixMode(mode os.FileMode) uint32 {
func fileModeToUnixMode(mode fs.FileMode) uint32 {
var m uint32
switch mode & os.ModeType {
switch mode & fs.ModeType {
default:
m = s_IFREG
case os.ModeDir:
case fs.ModeDir:
m = s_IFDIR
case os.ModeSymlink:
case fs.ModeSymlink:
m = s_IFLNK
case os.ModeNamedPipe:
case fs.ModeNamedPipe:
m = s_IFIFO
case os.ModeSocket:
case fs.ModeSocket:
m = s_IFSOCK
case os.ModeDevice:
if mode&os.ModeCharDevice != 0 {
case fs.ModeDevice:
if mode&fs.ModeCharDevice != 0 {
m = s_IFCHR
} else {
m = s_IFBLK
}
}
if mode&os.ModeSetuid != 0 {
if mode&fs.ModeSetuid != 0 {
m |= s_ISUID
}
if mode&os.ModeSetgid != 0 {
if mode&fs.ModeSetgid != 0 {
m |= s_ISGID
}
if mode&os.ModeSticky != 0 {
if mode&fs.ModeSticky != 0 {
m |= s_ISVTX
}
return m | uint32(mode&0777)
}
func unixModeToFileMode(m uint32) os.FileMode {
mode := os.FileMode(m & 0777)
func unixModeToFileMode(m uint32) fs.FileMode {
mode := fs.FileMode(m & 0777)
switch m & s_IFMT {
case s_IFBLK:
mode |= os.ModeDevice
mode |= fs.ModeDevice
case s_IFCHR:
mode |= os.ModeDevice | os.ModeCharDevice
mode |= fs.ModeDevice | fs.ModeCharDevice
case s_IFDIR:
mode |= os.ModeDir
mode |= fs.ModeDir
case s_IFIFO:
mode |= os.ModeNamedPipe
mode |= fs.ModeNamedPipe
case s_IFLNK:
mode |= os.ModeSymlink
mode |= fs.ModeSymlink
case s_IFREG:
// nothing to do
case s_IFSOCK:
mode |= os.ModeSocket
mode |= fs.ModeSocket
}
if m&s_ISGID != 0 {
mode |= os.ModeSetgid
mode |= fs.ModeSetgid
}
if m&s_ISUID != 0 {
mode |= os.ModeSetuid
mode |= fs.ModeSetuid
}
if m&s_ISVTX != 0 {
mode |= os.ModeSticky
mode |= fs.ModeSticky
}
return mode
}

View file

@ -9,9 +9,9 @@ import (
"encoding/binary"
"fmt"
"io"
"io/fs"
"io/ioutil"
"math/rand"
"os"
"strings"
"testing"
"time"
@ -23,7 +23,7 @@ type WriteTest struct {
Name string
Data []byte
Method uint16
Mode os.FileMode
Mode fs.FileMode
}
var writeTests = []WriteTest{
@ -43,19 +43,19 @@ var writeTests = []WriteTest{
Name: "setuid",
Data: []byte("setuid file"),
Method: Deflate,
Mode: 0755 | os.ModeSetuid,
Mode: 0755 | fs.ModeSetuid,
},
{
Name: "setgid",
Data: []byte("setgid file"),
Method: Deflate,
Mode: 0755 | os.ModeSetgid,
Mode: 0755 | fs.ModeSetgid,
},
{
Name: "symlink",
Data: []byte("../link/target"),
Method: Deflate,
Mode: 0755 | os.ModeSymlink,
Mode: 0755 | fs.ModeSymlink,
},
}
@ -301,7 +301,7 @@ func TestWriterFlush(t *testing.T) {
}
func TestWriterDir(t *testing.T) {
w := NewWriter(ioutil.Discard)
w := NewWriter(io.Discard)
dw, err := w.Create("dir/")
if err != nil {
t.Fatal(err)
@ -380,7 +380,7 @@ func testReadFile(t *testing.T, f *File, wt *WriteTest) {
if err != nil {
t.Fatal("opening:", err)
}
b, err := ioutil.ReadAll(rc)
b, err := io.ReadAll(rc)
if err != nil {
t.Fatal("reading:", err)
}

View file

@ -13,7 +13,6 @@ import (
"hash"
"internal/testenv"
"io"
"io/ioutil"
"runtime"
"sort"
"strings"
@ -620,7 +619,7 @@ func testZip64(t testing.TB, size int64) *rleBuffer {
t.Fatal("read:", err)
}
}
gotEnd, err := ioutil.ReadAll(rc)
gotEnd, err := io.ReadAll(rc)
if err != nil {
t.Fatal("read end:", err)
}

View file

@ -10,7 +10,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"strings"
"testing"
"testing/iotest"
@ -886,7 +885,7 @@ func TestReadEmptyBuffer(t *testing.T) {
func TestLinesAfterRead(t *testing.T) {
l := NewReaderSize(bytes.NewReader([]byte("foo")), minReadBufferSize)
_, err := ioutil.ReadAll(l)
_, err := io.ReadAll(l)
if err != nil {
t.Error(err)
return
@ -1130,7 +1129,7 @@ func TestWriterReadFromCounts(t *testing.T) {
}
}
// A writeCountingDiscard is like ioutil.Discard and counts the number of times
// A writeCountingDiscard is like io.Discard and counts the number of times
// Write is called on it.
type writeCountingDiscard int
@ -1300,7 +1299,7 @@ func TestReaderReset(t *testing.T) {
t.Errorf("buf = %q; want foo", buf)
}
r.Reset(strings.NewReader("bar bar"))
all, err := ioutil.ReadAll(r)
all, err := io.ReadAll(r)
if err != nil {
t.Fatal(err)
}
@ -1645,13 +1644,13 @@ func BenchmarkReaderWriteToOptimal(b *testing.B) {
buf := make([]byte, bufSize)
r := bytes.NewReader(buf)
srcReader := NewReaderSize(onlyReader{r}, 1<<10)
if _, ok := ioutil.Discard.(io.ReaderFrom); !ok {
b.Fatal("ioutil.Discard doesn't support ReaderFrom")
if _, ok := io.Discard.(io.ReaderFrom); !ok {
b.Fatal("io.Discard doesn't support ReaderFrom")
}
for i := 0; i < b.N; i++ {
r.Seek(0, io.SeekStart)
srcReader.Reset(onlyReader{r})
n, err := srcReader.WriteTo(ioutil.Discard)
n, err := srcReader.WriteTo(io.Discard)
if err != nil {
b.Fatal(err)
}
@ -1722,7 +1721,7 @@ func BenchmarkReaderEmpty(b *testing.B) {
str := strings.Repeat("x", 16<<10)
for i := 0; i < b.N; i++ {
br := NewReader(strings.NewReader(str))
n, err := io.Copy(ioutil.Discard, br)
n, err := io.Copy(io.Discard, br)
if err != nil {
b.Fatal(err)
}
@ -1737,7 +1736,7 @@ func BenchmarkWriterEmpty(b *testing.B) {
str := strings.Repeat("x", 1<<10)
bs := []byte(str)
for i := 0; i < b.N; i++ {
bw := NewWriter(ioutil.Discard)
bw := NewWriter(io.Discard)
bw.Flush()
bw.WriteByte('a')
bw.Flush()
@ -1752,7 +1751,7 @@ func BenchmarkWriterEmpty(b *testing.B) {
func BenchmarkWriterFlush(b *testing.B) {
b.ReportAllocs()
bw := NewWriter(ioutil.Discard)
bw := NewWriter(io.Discard)
str := strings.Repeat("x", 50)
for i := 0; i < b.N; i++ {
bw.WriteString(str)

View file

@ -8,7 +8,6 @@ import (
. "bytes"
"fmt"
"io"
"io/ioutil"
"sync"
"testing"
)
@ -235,7 +234,7 @@ func TestReaderCopyNothing(t *testing.T) {
type justWriter struct {
io.Writer
}
discard := justWriter{ioutil.Discard} // hide ReadFrom
discard := justWriter{io.Discard} // hide ReadFrom
var with, withOut nErr
with.n, with.err = io.Copy(discard, NewReader(nil))
@ -248,7 +247,7 @@ func TestReaderCopyNothing(t *testing.T) {
// tests that Len is affected by reads, but Size is not.
func TestReaderLenSize(t *testing.T) {
r := NewReader([]byte("abc"))
io.CopyN(ioutil.Discard, r, 1)
io.CopyN(io.Discard, r, 1)
if r.Len() != 2 {
t.Errorf("Len = %d; want 2", r.Len())
}
@ -268,7 +267,7 @@ func TestReaderReset(t *testing.T) {
if err := r.UnreadRune(); err == nil {
t.Errorf("UnreadRune: expected error, got nil")
}
buf, err := ioutil.ReadAll(r)
buf, err := io.ReadAll(r)
if err != nil {
t.Errorf("ReadAll: unexpected error: %v", err)
}
@ -314,7 +313,7 @@ func TestReaderZero(t *testing.T) {
t.Errorf("UnreadRune: got nil, want error")
}
if n, err := (&Reader{}).WriteTo(ioutil.Discard); n != 0 || err != nil {
if n, err := (&Reader{}).WriteTo(io.Discard); n != 0 || err != nil {
t.Errorf("WriteTo: got %d, %v; want 0, nil", n, err)
}
}

View file

@ -326,6 +326,18 @@ func compareAPI(w io.Writer, features, required, optional, exception []string, a
return
}
// aliasReplacer applies type aliases to earlier API files,
// to avoid misleading negative results.
// This makes all the references to os.FileInfo in go1.txt
// be read as if they said fs.FileInfo, since os.FileInfo is now an alias.
// If there are many of these, we could do a more general solution,
// but for now the replacer is fine.
var aliasReplacer = strings.NewReplacer(
"os.FileInfo", "fs.FileInfo",
"os.FileMode", "fs.FileMode",
"os.PathError", "fs.PathError",
)
func fileFeatures(filename string) []string {
if filename == "" {
return nil
@ -334,7 +346,9 @@ func fileFeatures(filename string) []string {
if err != nil {
log.Fatalf("Error reading file %s: %v", filename, err)
}
lines := strings.Split(string(bs), "\n")
s := string(bs)
s = aliasReplacer.Replace(s)
lines := strings.Split(s, "\n")
var nonblank []string
for _, line := range lines {
line = strings.TrimSpace(line)
@ -856,6 +870,10 @@ func (w *Walker) emitObj(obj types.Object) {
func (w *Walker) emitType(obj *types.TypeName) {
name := obj.Name()
typ := obj.Type()
if obj.IsAlias() {
w.emitf("type %s = %s", name, w.typeString(typ))
return
}
switch typ := typ.Underlying().(type) {
case *types.Struct:
w.emitStructType(name, typ)

View file

@ -535,6 +535,9 @@ func archRISCV64() *Arch {
// Standard register names.
for i := riscv.REG_X0; i <= riscv.REG_X31; i++ {
if i == riscv.REG_G {
continue
}
name := fmt.Sprintf("X%d", i-riscv.REG_X0)
register[name] = int16(i)
}
@ -571,7 +574,7 @@ func archRISCV64() *Arch {
register["S8"] = riscv.REG_S8
register["S9"] = riscv.REG_S9
register["S10"] = riscv.REG_S10
register["S11"] = riscv.REG_S11
// Skip S11 as it is the g register.
register["T3"] = riscv.REG_T3
register["T4"] = riscv.REG_T4
register["T5"] = riscv.REG_T5

View file

@ -75,7 +75,7 @@ func IsARM64STLXR(op obj.As) bool {
arm64.ASTXP, arm64.ASTXPW, arm64.ASTLXP, arm64.ASTLXPW:
return true
}
// atomic instructions
// LDADDx/SWPx/CASx atomic instructions
if arm64.IsAtomicInstruction(op) {
return true
}
@ -93,6 +93,17 @@ func IsARM64TBL(op obj.As) bool {
return false
}
// IsARM64CASP reports whether the op (as defined by an arm64.A*
// constant) is one of the CASP-like instructions, and its 2nd
// destination is a register pair that require special handling.
func IsARM64CASP(op obj.As) bool {
switch op {
case arm64.ACASPD, arm64.ACASPW:
return true
}
return false
}
// ARM64Suffix handles the special suffix for the ARM64.
// It returns a boolean to indicate success; failure means
// cond was unrecognized.

View file

@ -637,6 +637,18 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) {
prog.From = a[0]
prog.SetFrom3(a[1])
prog.To = a[2]
case arch.IsARM64CASP(op):
prog.From = a[0]
prog.To = a[1]
// both 1st operand and 3rd operand are (Rs, Rs+1) register pair.
// And the register pair must be contiguous.
if (a[0].Type != obj.TYPE_REGREG) || (a[2].Type != obj.TYPE_REGREG) {
p.errorf("invalid addressing modes for 1st or 3rd operand to %s instruction, must be register pair", op)
return
}
// For ARM64 CASP-like instructions, its 2nd destination operand is register pair(Rt, Rt+1) that can
// not fit into prog.RegTo2, so save it to the prog.RestArgs.
prog.SetTo2(a[2])
default:
prog.From = a[0]
prog.Reg = p.getRegister(prog, op, &a[1])
@ -725,7 +737,7 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) {
}
if p.arch.Family == sys.AMD64 {
prog.From = a[0]
prog.RestArgs = []obj.Addr{a[1], a[2]}
prog.SetRestArgs([]obj.Addr{a[1], a[2]})
prog.To = a[3]
break
}
@ -808,13 +820,13 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) {
}
if p.arch.Family == sys.AMD64 {
prog.From = a[0]
prog.RestArgs = []obj.Addr{a[1], a[2], a[3]}
prog.SetRestArgs([]obj.Addr{a[1], a[2], a[3]})
prog.To = a[4]
break
}
if p.arch.Family == sys.S390X {
prog.From = a[0]
prog.RestArgs = []obj.Addr{a[1], a[2], a[3]}
prog.SetRestArgs([]obj.Addr{a[1], a[2], a[3]})
prog.To = a[4]
break
}

View file

@ -390,12 +390,7 @@ func TestARM64Errors(t *testing.T) {
}
func TestAMD64EndToEnd(t *testing.T) {
defer func(old string) { objabi.GOAMD64 = old }(objabi.GOAMD64)
for _, goamd64 := range []string{"normaljumps", "alignedjumps"} {
t.Logf("GOAMD64=%s", goamd64)
objabi.GOAMD64 = goamd64
testEndToEnd(t, "amd64", "amd64")
}
}
func Test386Encoder(t *testing.T) {

View file

@ -10,14 +10,8 @@
TEXT foo(SB), DUPOK|NOSPLIT, $-8
//
// ADD
//
// LTYPE1 imsr ',' spreg ',' reg
// {
// outcode($1, &$2, $4, &$6);
// }
// imsr comes from the old 7a, we only support immediates and registers
// arithmetic operations
ADDW $1, R2, R3
ADDW R1, R2, R3
ADDW R1, ZR, R3
@ -25,6 +19,13 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
ADD R1, R2, R3
ADD R1, ZR, R3
ADD $1, R2, R3
ADDW $1, R2
ADDW R1, R2
ADD $1, R2
ADD R1, R2
ADD R1>>11, R2
ADD R1<<22, R2
ADD R1->33, R2
ADD $0x000aaa, R2, R3 // ADD $2730, R2, R3 // 43a82a91
ADD $0x000aaa, R2 // ADD $2730, R2 // 42a82a91
ADD $0xaaa000, R2, R3 // ADD $11182080, R2, R3 // 43a86a91
@ -37,6 +38,10 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
SUB $0xaaa000, R2 // SUB $11182080, R2 // 42a86ad1
SUB $0xaaaaaa, R2, R3 // SUB $11184810, R2, R3 // 43a82ad163a86ad1
SUB $0xaaaaaa, R2 // SUB $11184810, R2 // 42a82ad142a86ad1
ADDW $0x60060, R2 // ADDW $393312, R2 // 4280011142804111
ADD $0x186a0, R2, R5 // ADD $100000, R2, R5 // 45801a91a5604091
SUB $0xe7791f700, R3, R1 // SUB $62135596800, R3, R1 // 1be09ed23bf2aef2db01c0f261001bcb
ADD $0x3fffffffc000, R5 // ADD $70368744161280, R5 // fb7f72b2a5001b8b
ADD R1>>11, R2, R3
ADD R1<<22, R2, R3
ADD R1->33, R2, R3
@ -59,6 +64,30 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
CMN R1.SXTX<<2, R10 // 5fe921ab
CMPW R2.UXTH<<3, R11 // 7f2d226b
CMNW R1.SXTB, R9 // 3f81212b
CMPW $0x60060, R2 // CMPW $393312, R2 // 1b0c8052db00a0725f001b6b
CMPW $40960, R0 // 1f284071
CMPW $27745, R2 // 3b8c8d525f001b6b
CMNW $0x3fffffc0, R2 // CMNW $1073741760, R2 // fb5f1a325f001b2b
CMPW $0xffff0, R1 // CMPW $1048560, R1 // fb3f1c323f001b6b
CMP $0xffffffffffa0, R3 // CMP $281474976710560, R3 // fb0b80921b00e0f27f001beb
CMP $0xf4240, R1 // CMP $1000000, R1 // 1b4888d2fb01a0f23f001beb
CMP $3343198598084851058, R3 // 5bae8ed2db8daef23badcdf2bbcce5f27f001beb
CMP $3, R2
CMP R1, R2
CMP R1->11, R2
CMP R1>>22, R2
CMP R1<<33, R2
CMP R22.SXTX, RSP // ffe336eb
CMP $0x22220000, RSP // CMP $572653568, RSP // 5b44a4d2ff633beb
CMPW $0x22220000, RSP // CMPW $572653568, RSP // 5b44a452ff633b6b
CCMN MI, ZR, R1, $4 // e44341ba
// MADD Rn,Rm,Ra,Rd
MADD R1, R2, R3, R4 // 6408019b
// CLS
CLSW R1, R2
CLS R1, R2
// fp/simd instructions.
VADDP V1.B16, V2.B16, V3.B16 // 43bc214e
VADDP V1.S4, V2.S4, V3.S4 // 43bca14e
VADDP V1.D2, V2.D2, V3.D2 // 43bce14e
@ -67,22 +96,6 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
VORR V5.B16, V4.B16, V3.B16 // 831ca54e
VADD V16.S4, V5.S4, V9.S4 // a984b04e
VEOR V0.B16, V1.B16, V0.B16 // 201c206e
SHA256H V9.S4, V3, V2 // 6240095e
SHA256H2 V9.S4, V4, V3 // 8350095e
SHA256SU0 V8.S4, V7.S4 // 0729285e
SHA256SU1 V6.S4, V5.S4, V7.S4 // a760065e
SHA1SU0 V11.S4, V8.S4, V6.S4 // 06310b5e
SHA1SU1 V5.S4, V1.S4 // a118285e
SHA1C V1.S4, V2, V3 // 4300015e
SHA1H V5, V4 // a408285e
SHA1M V8.S4, V7, V6 // e620085e
SHA1P V11.S4, V10, V9 // 49110b5e
SHA512H V2.D2, V1, V0 // 208062ce
SHA512H2 V4.D2, V3, V2 // 628464ce
SHA512SU0 V9.D2, V8.D2 // 2881c0ce
SHA512SU1 V7.D2, V6.D2, V5.D2 // c58867ce
VRAX1 V26.D2, V29.D2, V30.D2 // be8f7ace
VXAR $63, V27.D2, V21.D2, V26.D2 // bafe9bce
VADDV V0.S4, V0 // 00b8b14e
VMOVI $82, V0.B16 // 40e6024f
VUADDLV V6.B16, V6 // c638306e
@ -96,10 +109,6 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
VFMLS V1.D2, V12.D2, V1.D2 // 81cde14e
VFMLS V1.S2, V12.S2, V1.S2 // 81cda10e
VFMLS V1.S4, V12.S4, V1.S4 // 81cda14e
VPMULL V2.D1, V1.D1, V3.Q1 // 23e0e20e
VPMULL2 V2.D2, V1.D2, V4.Q1 // 24e0e24e
VPMULL V2.B8, V1.B8, V3.H8 // 23e0220e
VPMULL2 V2.B16, V1.B16, V4.H8 // 24e0224e
VEXT $4, V2.B8, V1.B8, V3.B8 // 2320022e
VEXT $8, V2.B16, V1.B16, V3.B16 // 2340026e
VRBIT V24.B16, V24.B16 // 185b606e
@ -125,6 +134,14 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
VSRI $8, V1.H8, V2.H8 // 2244186f
VSRI $2, V1.B8, V2.B8 // 22440e2f
VSRI $2, V1.B16, V2.B16 // 22440e6f
VSLI $7, V2.B16, V3.B16 // 43540f6f
VSLI $15, V3.H4, V4.H4 // 64541f2f
VSLI $31, V5.S4, V6.S4 // a6543f6f
VSLI $63, V7.D2, V8.D2 // e8547f6f
VUSRA $8, V2.B16, V3.B16 // 4314086f
VUSRA $16, V3.H4, V4.H4 // 6414102f
VUSRA $32, V5.S4, V6.S4 // a614206f
VUSRA $64, V7.D2, V8.D2 // e814406f
VTBL V22.B16, [V28.B16, V29.B16], V11.B16 // 8b23164e
VTBL V18.B8, [V17.B16, V18.B16, V19.B16], V22.B8 // 3642120e
VTBL V31.B8, [V14.B16, V15.B16, V16.B16, V17.B16], V15.B8 // cf611f0e
@ -141,8 +158,6 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
VTBL V14.B16, [V3.B16, V4.B16, V5.B16], V17.B16 // 71400e4e
VTBL V13.B16, [V29.B16, V30.B16, V31.B16, V0.B16], V28.B16 // bc630d4e
VTBL V3.B8, [V27.B16], V8.B8 // 6803030e
VEOR3 V2.B16, V7.B16, V12.B16, V25.B16 // 990907ce
VBCAX V1.B16, V2.B16, V26.B16, V31.B16 // 5f0722ce
VZIP1 V16.H8, V3.H8, V19.H8 // 7338504e
VZIP2 V22.D2, V25.D2, V21.D2 // 357bd64e
VZIP1 V6.D2, V9.D2, V11.D2 // 2b39c64e
@ -180,105 +195,87 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
VUSHLL2 $31, V30.S4, V2.D2 // c2a73f6f
VBIF V0.B8, V30.B8, V1.B8 // c11fe02e
VBIF V30.B16, V0.B16, V2.B16 // 021cfe6e
MOVD (R2)(R6.SXTW), R4 // 44c866f8
MOVD (R3)(R6), R5 // MOVD (R3)(R6*1), R5 // 656866f8
MOVD (R2)(R6), R4 // MOVD (R2)(R6*1), R4 // 446866f8
MOVWU (R19)(R20<<2), R20 // 747a74b8
MOVD (R2)(R6<<3), R4 // 447866f8
MOVD (R3)(R7.SXTX<<3), R8 // 68f867f8
MOVWU (R5)(R4.UXTW), R10 // aa4864b8
MOVBU (R3)(R9.UXTW), R8 // 68486938
MOVBU (R5)(R8), R10 // MOVBU (R5)(R8*1), R10 // aa686838
MOVHU (R2)(R7.SXTW<<1), R11 // 4bd86778
MOVHU (R1)(R2<<1), R5 // 25786278
MOVB (R9)(R3.UXTW), R6 // 2649a338
MOVB (R10)(R6), R15 // MOVB (R10)(R6*1), R15 // 4f69a638
MOVH (R5)(R7.SXTX<<1), R19 // b3f8a778
MOVH (R8)(R4<<1), R10 // 0a79a478
MOVW (R9)(R8.SXTW<<2), R19 // 33d9a8b8
MOVW (R1)(R4.SXTX), R11 // 2be8a4b8
MOVW (R1)(R4.SXTX), ZR // 3fe8a4b8
MOVW (R2)(R5), R12 // MOVW (R2)(R5*1), R12 // 4c68a5b8
MOVD R5, (R2)(R6<<3) // 457826f8
MOVD R9, (R6)(R7.SXTX<<3) // c9f827f8
MOVD ZR, (R6)(R7.SXTX<<3) // dff827f8
MOVW R8, (R2)(R3.UXTW<<2) // 485823b8
MOVW R7, (R3)(R4.SXTW) // 67c824b8
MOVB R4, (R2)(R6.SXTX) // 44e82638
MOVB R8, (R3)(R9.UXTW) // 68482938
MOVB R10, (R5)(R8) // MOVB R10, (R5)(R8*1) // aa682838
MOVH R11, (R2)(R7.SXTW<<1) // 4bd82778
MOVH R5, (R1)(R2<<1) // 25782278
MOVH R7, (R2)(R5.SXTX<<1) // 47f82578
MOVH R8, (R3)(R6.UXTW) // 68482678
MOVB (R29)(R30<<0), R14 // ae7bbe38
MOVB (R29)(R30), R14 // MOVB (R29)(R30*1), R14 // ae6bbe38
MOVB R4, (R2)(R6.SXTX) // 44e82638
FMOVS $(4.0), F0 // 0010221e
FMOVD $(4.0), F0 // 0010621e
FMOVS $(0.265625), F1 // 01302a1e
FMOVD $(0.1796875), F2 // 02f0681e
FMOVS $(0.96875), F3 // 03f02d1e
FMOVD $(28.0), F4 // 0490671e
VUADDW V9.B8, V12.H8, V14.H8 // 8e11292e
VUADDW V13.H4, V10.S4, V11.S4 // 4b116d2e
VUADDW V21.S2, V24.D2, V29.D2 // 1d13b52e
VUADDW2 V9.B16, V12.H8, V14.H8 // 8e11296e
VUADDW2 V13.H8, V20.S4, V30.S4 // 9e126d6e
VUADDW2 V21.S4, V24.D2, V29.D2 // 1d13b56e
FCCMPS LT, F1, F2, $1 // 41b4211e
FMADDS F1, F3, F2, F4 // 440c011f
FMADDD F4, F5, F4, F4 // 8414441f
FMSUBS F13, F21, F13, F19 // b3d50d1f
FMSUBD F11, F7, F15, F31 // ff9d4b1f
FNMADDS F1, F3, F2, F4 // 440c211f
FNMADDD F1, F3, F2, F4 // 440c611f
FNMSUBS F1, F3, F2, F4 // 448c211f
FNMSUBD F1, F3, F2, F4 // 448c611f
FADDS F2, F3, F4 // 6428221e
FADDD F1, F2 // 4228611e
VDUP V19.S[0], V17.S4 // 7106044e
// move a large constant to a Vd.
VMOVS $0x80402010, V11 // VMOVS $2151686160, V11
VMOVD $0x8040201008040201, V20 // VMOVD $-9205322385119247871, V20
VMOVQ $0x7040201008040201, $0x8040201008040201, V10 // VMOVQ $8088500183983456769, $-9205322385119247871, V10
VMOVQ $0x8040201008040202, $0x7040201008040201, V20 // VMOVQ $-9205322385119247870, $8088500183983456769, V20
FMOVS (R2)(R6), F4 // FMOVS (R2)(R6*1), F4 // 446866bc
FMOVS (R2)(R6<<2), F4 // 447866bc
FMOVD (R2)(R6), F4 // FMOVD (R2)(R6*1), F4 // 446866fc
FMOVD (R2)(R6<<3), F4 // 447866fc
FMOVS F4, (R2)(R6) // FMOVS F4, (R2)(R6*1) // 446826bc
FMOVS F4, (R2)(R6<<2) // 447826bc
FMOVD F4, (R2)(R6) // FMOVD F4, (R2)(R6*1) // 446826fc
FMOVD F4, (R2)(R6<<3) // 447826fc
// special
PRFM (R2), PLDL1KEEP // 400080f9
PRFM 16(R2), PLDL1KEEP // 400880f9
PRFM 48(R6), PSTL2STRM // d31880f9
PRFM 8(R12), PLIL3STRM // 8d0580f9
PRFM (R8), $25 // 190180f9
PRFM 8(R9), $30 // 3e0580f9
NOOP // 1f2003d5
HINT $0 // 1f2003d5
DMB $1
SVC
CMPW $40960, R0 // 1f284071
CMPW $27745, R2 // 3b8c8d525f001b6b
CMNW $0x3fffffc0, R2 // CMNW $1073741760, R2 // fb5f1a325f001b2b
CMPW $0xffff0, R1 // CMPW $1048560, R1 // fb3f1c323f001b6b
CMP $0xffffffffffa0, R3 // CMP $281474976710560, R3 // fb0b80921b00e0f27f001beb
CMP $0xf4240, R1 // CMP $1000000, R1 // 1b4888d2fb01a0f23f001beb
ADD $0x186a0, R2, R5 // ADD $100000, R2, R5 // 45801a91a5604091
SUB $0xe7791f700, R3, R1 // SUB $62135596800, R3, R1 // 1be09ed23bf2aef2db01c0f261001bcb
CMP $3343198598084851058, R3 // 5bae8ed2db8daef23badcdf2bbcce5f27f001beb
ADD $0x3fffffffc000, R5 // ADD $70368744161280, R5 // fb7f72b2a5001b8b
// LTYPE1 imsr ',' spreg ','
// {
// outcode($1, &$2, $4, &nullgen);
// }
// LTYPE1 imsr ',' reg
// {
// outcode($1, &$2, NREG, &$4);
// }
ADDW $1, R2
ADDW R1, R2
ADD $1, R2
ADD R1, R2
ADD R1>>11, R2
ADD R1<<22, R2
ADD R1->33, R2
AND R1@>33, R2
// encryption
SHA256H V9.S4, V3, V2 // 6240095e
SHA256H2 V9.S4, V4, V3 // 8350095e
SHA256SU0 V8.S4, V7.S4 // 0729285e
SHA256SU1 V6.S4, V5.S4, V7.S4 // a760065e
SHA1SU0 V11.S4, V8.S4, V6.S4 // 06310b5e
SHA1SU1 V5.S4, V1.S4 // a118285e
SHA1C V1.S4, V2, V3 // 4300015e
SHA1H V5, V4 // a408285e
SHA1M V8.S4, V7, V6 // e620085e
SHA1P V11.S4, V10, V9 // 49110b5e
SHA512H V2.D2, V1, V0 // 208062ce
SHA512H2 V4.D2, V3, V2 // 628464ce
SHA512SU0 V9.D2, V8.D2 // 2881c0ce
SHA512SU1 V7.D2, V6.D2, V5.D2 // c58867ce
VRAX1 V26.D2, V29.D2, V30.D2 // be8f7ace
VXAR $63, V27.D2, V21.D2, V26.D2 // bafe9bce
VPMULL V2.D1, V1.D1, V3.Q1 // 23e0e20e
VPMULL2 V2.D2, V1.D2, V4.Q1 // 24e0e24e
VPMULL V2.B8, V1.B8, V3.H8 // 23e0220e
VPMULL2 V2.B16, V1.B16, V4.H8 // 24e0224e
VEOR3 V2.B16, V7.B16, V12.B16, V25.B16 // 990907ce
VBCAX V1.B16, V2.B16, V26.B16, V31.B16 // 5f0722ce
VREV32 V5.B16, V5.B16 // a508206e
VREV64 V2.S2, V3.S2 // 4308a00e
VREV64 V2.S4, V3.S4 // 4308a04e
// logical ops
//
// make sure constants get encoded into an instruction when it could
AND R1@>33, R2
AND $(1<<63), R1 // AND $-9223372036854775808, R1 // 21004192
AND $(1<<63-1), R1 // AND $9223372036854775807, R1 // 21f84092
ORR $(1<<63), R1 // ORR $-9223372036854775808, R1 // 210041b2
ORR $(1<<63-1), R1 // ORR $9223372036854775807, R1 // 21f840b2
EOR $(1<<63), R1 // EOR $-9223372036854775808, R1 // 210041d2
EOR $(1<<63-1), R1 // EOR $9223372036854775807, R1 // 21f840d2
ANDW $0x3ff00000, R2 // ANDW $1072693248, R2 // 42240c12
BICW $0x3ff00000, R2 // BICW $1072693248, R2 // 42540212
ORRW $0x3ff00000, R2 // ORRW $1072693248, R2 // 42240c32
ORNW $0x3ff00000, R2 // ORNW $1072693248, R2 // 42540232
EORW $0x3ff00000, R2 // EORW $1072693248, R2 // 42240c52
EONW $0x3ff00000, R2 // EONW $1072693248, R2 // 42540252
AND $0x22220000, R3, R4 // AND $572653568, R3, R4 // 5b44a4d264001b8a
ORR $0x22220000, R3, R4 // ORR $572653568, R3, R4 // 5b44a4d264001baa
EOR $0x22220000, R3, R4 // EOR $572653568, R3, R4 // 5b44a4d264001bca
@ -287,7 +284,6 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
EON $0x22220000, R3, R4 // EON $572653568, R3, R4 // 5b44a4d264003bca
ANDS $0x22220000, R3, R4 // ANDS $572653568, R3, R4 // 5b44a4d264001bea
BICS $0x22220000, R3, R4 // BICS $572653568, R3, R4 // 5b44a4d264003bea
EOR $0xe03fffffffffffff, R20, R22 // EOR $-2287828610704211969, R20, R22 // 96e243d2
TSTW $0x600000006, R1 // TSTW $25769803782, R1 // 3f041f72
TST $0x4900000049, R0 // TST $313532612681, R0 // 3b0980d23b09c0f21f001bea
@ -316,19 +312,22 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
EONW $0x6006000060060, R5 // EONW $1689262177517664, R5 // 1b0c8052db00a072a5003b4a
ORNW $0x6006000060060, R5 // ORNW $1689262177517664, R5 // 1b0c8052db00a072a5003b2a
BICSW $0x6006000060060, R5 // BICSW $1689262177517664, R5 // 1b0c8052db00a072a5003b6a
ADDW $0x60060, R2 // ADDW $393312, R2 // 4280011142804111
CMPW $0x60060, R2 // CMPW $393312, R2 // 1b0c8052db00a0725f001b6b
// TODO: this could have better encoding
ANDW $-1, R10 // 1b0080124a011b0a
AND $8, R0, RSP // 1f007d92
ORR $8, R0, RSP // 1f007db2
EOR $8, R0, RSP // 1f007dd2
BIC $8, R0, RSP // 1ff87c92
ORN $8, R0, RSP // 1ff87cb2
EON $8, R0, RSP // 1ff87cd2
TST $15, R2 // 5f0c40f2
TST R1, R2 // 5f0001ea
TST R1->11, R2 // 5f2c81ea
TST R1>>22, R2 // 5f5841ea
TST R1<<33, R2 // 5f8401ea
TST $0x22220000, R3 // TST $572653568, R3 // 5b44a4d27f001bea
// move an immediate to a Rn.
MOVD $0x3fffffffc000, R0 // MOVD $70368744161280, R0 // e07f72b2
MOVW $1000000, R4 // 04488852e401a072
MOVW $0xaaaa0000, R1 // MOVW $2863267840, R1 // 4155b552
@ -348,46 +347,37 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
MOVD $-1, R1 // 01008092
MOVD $0x210000, R0 // MOVD $2162688, R0 // 2004a0d2
MOVD $0xffffffffffffaaaa, R1 // MOVD $-21846, R1 // a1aa8a92
MOVW $1, ZR
MOVW $1, R1
MOVD $1, ZR
MOVD $1, R1
MOVK $1, R1
// move a large constant to a Vd.
VMOVS $0x80402010, V11 // VMOVS $2151686160, V11
VMOVD $0x8040201008040201, V20 // VMOVD $-9205322385119247871, V20
VMOVQ $0x7040201008040201, $0x8040201008040201, V10 // VMOVQ $8088500183983456769, $-9205322385119247871, V10
VMOVQ $0x8040201008040202, $0x7040201008040201, V20 // VMOVQ $-9205322385119247870, $8088500183983456769, V20
// mov(to/from sp)
MOVD $0x1002(RSP), R1 // MOVD $4098(RSP), R1 // fb074091610b0091
MOVD $0x1708(RSP), RSP // MOVD $5896(RSP), RSP // fb0740917f231c91
MOVD $0x2001(R7), R1 // MOVD $8193(R7), R1 // fb08409161070091
MOVD $0xffffff(R7), R1 // MOVD $16777215(R7), R1 // fbfc7f9161ff3f91
MOVD $-0x1(R7), R1 // MOVD $-1(R7), R1 // e10400d1
MOVD $-0x30(R7), R1 // MOVD $-48(R7), R1 // e1c000d1
MOVD $-0x708(R7), R1 // MOVD $-1800(R7), R1 // e1201cd1
MOVD $-0x2000(RSP), R1 // MOVD $-8192(RSP), R1 // e10b40d1
MOVD $-0x10000(RSP), RSP // MOVD $-65536(RSP), RSP // ff4340d1
//
// CLS
//
// LTYPE2 imsr ',' reg
// {
// outcode($1, &$2, NREG, &$4);
// }
CLSW R1, R2
CLS R1, R2
//
// MOV
//
// LTYPE3 addr ',' addr
// {
// outcode($1, &$2, NREG, &$4);
// }
MOVW R1, R2
MOVW ZR, R1
MOVW R1, ZR
MOVW $1, ZR
MOVW $1, R1
MOVW ZR, (R1)
MOVD R1, R2
MOVD ZR, R1
MOVD $1, ZR
MOVD $1, R1
MOVD ZR, (R1)
// store and load
//
// LD1/ST1
VLD1 (R8), [V1.B16, V2.B16] // 01a1404c
VLD1.P (R3), [V31.H8, V0.H8] // 7fa4df4c
VLD1.P (R8)(R20), [V21.B16, V22.B16] // VLD1.P (R8)(R20*1), [V21.B16,V22.B16] // 15a1d44c
@ -445,34 +435,21 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
VST4 [V22.D2, V23.D2, V24.D2, V25.D2], (R3) // 760c004c
VST4.P [V14.D2, V15.D2, V16.D2, V17.D2], 64(R15) // ee0d9f4c
VST4.P [V24.B8, V25.B8, V26.B8, V27.B8], (R3)(R23) // VST4.P [V24.B8, V25.B8, V26.B8, V27.B8], (R3)(R23*1) // 7800970c
FMOVS F20, (R0) // 140000bd
// pre/post-indexed
FMOVS.P F20, 4(R0) // 144400bc
FMOVS.W F20, 4(R0) // 144c00bc
FMOVS (R0), F20 // 140040bd
FMOVD.P F20, 8(R1) // 348400fc
FMOVQ.P F13, 11(R10) // 4db5803c
FMOVQ.W F15, 11(R20) // 8fbe803c
FMOVS.P 8(R0), F20 // 148440bc
FMOVS.W 8(R0), F20 // 148c40bc
FMOVD F20, (R2) // 540000fd
FMOVD.P F20, 8(R1) // 348400fc
FMOVD.W 8(R1), F20 // 348c40fc
PRFM (R2), PLDL1KEEP // 400080f9
PRFM 16(R2), PLDL1KEEP // 400880f9
PRFM 48(R6), PSTL2STRM // d31880f9
PRFM 8(R12), PLIL3STRM // 8d0580f9
PRFM (R8), $25 // 190180f9
PRFM 8(R9), $30 // 3e0580f9
FMOVQ.P 11(R10), F13 // 4db5c03c
FMOVQ.W 11(R20), F15 // 8fbec03c
// small offset fits into instructions
MOVB 1(R1), R2 // 22048039
MOVH 1(R1), R2 // 22108078
MOVH 2(R1), R2 // 22048079
MOVW 1(R1), R2 // 221080b8
MOVW 4(R1), R2 // 220480b9
MOVD 1(R1), R2 // 221040f8
MOVD 8(R1), R2 // 220440f9
FMOVS 1(R1), F2 // 221040bc
FMOVS 4(R1), F2 // 220440bd
FMOVD 1(R1), F2 // 221040fc
FMOVD 8(R1), F2 // 220440fd
// small offset fits into instructions
MOVB R1, 1(R2) // 41040039
MOVH R1, 1(R2) // 41100078
MOVH R1, 2(R2) // 41040079
@ -480,18 +457,37 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
MOVW R1, 4(R2) // 410400b9
MOVD R1, 1(R2) // 411000f8
MOVD R1, 8(R2) // 410400f9
MOVD ZR, (R1)
MOVW ZR, (R1)
FMOVS F1, 1(R2) // 411000bc
FMOVS F1, 4(R2) // 410400bd
FMOVS F20, (R0) // 140000bd
FMOVD F1, 1(R2) // 411000fc
FMOVD F1, 8(R2) // 410400fd
FMOVD F20, (R2) // 540000fd
FMOVQ F0, 32(R5)// a008803d
FMOVQ F10, 65520(R10) // 4afdbf3d
FMOVQ F11, 64(RSP) // eb13803d
FMOVQ F11, 8(R20) // 8b82803c
FMOVQ F11, 4(R20) // 8b42803c
// large aligned offset, use two instructions
MOVB 0x1001(R1), R2 // MOVB 4097(R1), R2 // 3b04409162078039
MOVH 0x2002(R1), R2 // MOVH 8194(R1), R2 // 3b08409162078079
MOVW 0x4004(R1), R2 // MOVW 16388(R1), R2 // 3b104091620780b9
MOVD 0x8008(R1), R2 // MOVD 32776(R1), R2 // 3b204091620740f9
FMOVS 0x4004(R1), F2 // FMOVS 16388(R1), F2 // 3b104091620740bd
FMOVD 0x8008(R1), F2 // FMOVD 32776(R1), F2 // 3b204091620740fd
MOVB 1(R1), R2 // 22048039
MOVH 1(R1), R2 // 22108078
MOVH 2(R1), R2 // 22048079
MOVW 1(R1), R2 // 221080b8
MOVW 4(R1), R2 // 220480b9
MOVD 1(R1), R2 // 221040f8
MOVD 8(R1), R2 // 220440f9
FMOVS (R0), F20 // 140040bd
FMOVS 1(R1), F2 // 221040bc
FMOVS 4(R1), F2 // 220440bd
FMOVD 1(R1), F2 // 221040fc
FMOVD 8(R1), F2 // 220440fd
FMOVQ 32(R5), F2 // a208c03d
FMOVQ 65520(R10), F10 // 4afdff3d
FMOVQ 64(RSP), F11 // eb13c03d
// large aligned offset, use two instructions(add+ldr/store).
MOVB R1, 0x1001(R2) // MOVB R1, 4097(R2) // 5b04409161070039
MOVH R1, 0x2002(R2) // MOVH R1, 8194(R2) // 5b08409161070079
MOVW R1, 0x4004(R2) // MOVW R1, 16388(R2) // 5b104091610700b9
@ -499,15 +495,16 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
FMOVS F1, 0x4004(R2) // FMOVS F1, 16388(R2) // 5b104091610700bd
FMOVD F1, 0x8008(R2) // FMOVD F1, 32776(R2) // 5b204091610700fd
// very large or unaligned offset uses constant pool
// the encoding cannot be checked as the address of the constant pool is unknown.
// here we only test that they can be assembled.
MOVB 0x44332211(R1), R2 // MOVB 1144201745(R1), R2
MOVH 0x44332211(R1), R2 // MOVH 1144201745(R1), R2
MOVW 0x44332211(R1), R2 // MOVW 1144201745(R1), R2
MOVD 0x44332211(R1), R2 // MOVD 1144201745(R1), R2
FMOVS 0x44332211(R1), F2 // FMOVS 1144201745(R1), F2
FMOVD 0x44332211(R1), F2 // FMOVD 1144201745(R1), F2
MOVB 0x1001(R1), R2 // MOVB 4097(R1), R2 // 3b04409162078039
MOVH 0x2002(R1), R2 // MOVH 8194(R1), R2 // 3b08409162078079
MOVW 0x4004(R1), R2 // MOVW 16388(R1), R2 // 3b104091620780b9
MOVD 0x8008(R1), R2 // MOVD 32776(R1), R2 // 3b204091620740f9
FMOVS 0x4004(R1), F2 // FMOVS 16388(R1), F2 // 3b104091620740bd
FMOVD 0x8008(R1), F2 // FMOVD 32776(R1), F2 // 3b204091620740fd
// very large or unaligned offset uses constant pool.
// the encoding cannot be checked as the address of the constant pool is unknown.
// here we only test that they can be assembled.
MOVB R1, 0x44332211(R2) // MOVB R1, 1144201745(R2)
MOVH R1, 0x44332211(R2) // MOVH R1, 1144201745(R2)
MOVW R1, 0x44332211(R2) // MOVW R1, 1144201745(R2)
@ -515,14 +512,59 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
FMOVS F1, 0x44332211(R2) // FMOVS F1, 1144201745(R2)
FMOVD F1, 0x44332211(R2) // FMOVD F1, 1144201745(R2)
//
// MOVK
//
// LMOVK imm ',' reg
// {
// outcode($1, &$2, NREG, &$4);
// }
MOVK $1, R1
MOVB 0x44332211(R1), R2 // MOVB 1144201745(R1), R2
MOVH 0x44332211(R1), R2 // MOVH 1144201745(R1), R2
MOVW 0x44332211(R1), R2 // MOVW 1144201745(R1), R2
MOVD 0x44332211(R1), R2 // MOVD 1144201745(R1), R2
FMOVS 0x44332211(R1), F2 // FMOVS 1144201745(R1), F2
FMOVD 0x44332211(R1), F2 // FMOVD 1144201745(R1), F2
// shifted or extended register offset.
MOVD (R2)(R6.SXTW), R4 // 44c866f8
MOVD (R3)(R6), R5 // MOVD (R3)(R6*1), R5 // 656866f8
MOVD (R2)(R6), R4 // MOVD (R2)(R6*1), R4 // 446866f8
MOVWU (R19)(R20<<2), R20 // 747a74b8
MOVD (R2)(R6<<3), R4 // 447866f8
MOVD (R3)(R7.SXTX<<3), R8 // 68f867f8
MOVWU (R5)(R4.UXTW), R10 // aa4864b8
MOVBU (R3)(R9.UXTW), R8 // 68486938
MOVBU (R5)(R8), R10 // MOVBU (R5)(R8*1), R10 // aa686838
MOVHU (R2)(R7.SXTW<<1), R11 // 4bd86778
MOVHU (R1)(R2<<1), R5 // 25786278
MOVB (R9)(R3.UXTW), R6 // 2649a338
MOVB (R10)(R6), R15 // MOVB (R10)(R6*1), R15 // 4f69a638
MOVB (R29)(R30<<0), R14 // ae7bbe38
MOVB (R29)(R30), R14 // MOVB (R29)(R30*1), R14 // ae6bbe38
MOVH (R5)(R7.SXTX<<1), R19 // b3f8a778
MOVH (R8)(R4<<1), R10 // 0a79a478
MOVW (R9)(R8.SXTW<<2), R19 // 33d9a8b8
MOVW (R1)(R4.SXTX), R11 // 2be8a4b8
MOVW (R1)(R4.SXTX), ZR // 3fe8a4b8
MOVW (R2)(R5), R12 // MOVW (R2)(R5*1), R12 // 4c68a5b8
FMOVS (R2)(R6), F4 // FMOVS (R2)(R6*1), F4 // 446866bc
FMOVS (R2)(R6<<2), F4 // 447866bc
FMOVD (R2)(R6), F4 // FMOVD (R2)(R6*1), F4 // 446866fc
FMOVD (R2)(R6<<3), F4 // 447866fc
MOVD R5, (R2)(R6<<3) // 457826f8
MOVD R9, (R6)(R7.SXTX<<3) // c9f827f8
MOVD ZR, (R6)(R7.SXTX<<3) // dff827f8
MOVW R8, (R2)(R3.UXTW<<2) // 485823b8
MOVW R7, (R3)(R4.SXTW) // 67c824b8
MOVB R4, (R2)(R6.SXTX) // 44e82638
MOVB R8, (R3)(R9.UXTW) // 68482938
MOVB R10, (R5)(R8) // MOVB R10, (R5)(R8*1) // aa682838
MOVH R11, (R2)(R7.SXTW<<1) // 4bd82778
MOVH R5, (R1)(R2<<1) // 25782278
MOVH R7, (R2)(R5.SXTX<<1) // 47f82578
MOVH R8, (R3)(R6.UXTW) // 68482678
MOVB R4, (R2)(R6.SXTX) // 44e82638
FMOVS F4, (R2)(R6) // FMOVS F4, (R2)(R6*1) // 446826bc
FMOVS F4, (R2)(R6<<2) // 447826bc
FMOVD F4, (R2)(R6) // FMOVD F4, (R2)(R6*1) // 446826fc
FMOVD F4, (R2)(R6<<3) // 447826fc
// vmov
VMOV V8.S[1], R1 // 013d0c0e
VMOV V0.D[0], R11 // 0b3c084e
VMOV V0.D[1], R11 // 0b3c184e
@ -537,205 +579,28 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
VMOV V9.H[0], V12.H[1] // 2c05066e
VMOV V8.B[0], V12.B[1] // 0c05036e
VMOV V8.B[7], V4.B[8] // 043d116e
VREV32 V5.B16, V5.B16 // a508206e
VREV64 V2.S2, V3.S2 // 4308a00e
VREV64 V2.S4, V3.S4 // 4308a04e
VDUP V19.S[0], V17.S4 // 7106044e
//
// B/BL
//
// LTYPE4 comma rel
// {
// outcode($1, &nullgen, NREG, &$3);
// }
BL 1(PC) // CALL 1(PC)
// LTYPE4 comma nireg
// {
// outcode($1, &nullgen, NREG, &$3);
// }
BL (R2) // CALL (R2)
BL foo(SB) // CALL foo(SB)
BL bar<>(SB) // CALL bar<>(SB)
//
// BEQ
//
// LTYPE5 comma rel
// {
// outcode($1, &nullgen, NREG, &$3);
// }
BEQ 1(PC)
//
// SVC
//
// LTYPE6
// {
// outcode($1, &nullgen, NREG, &nullgen);
// }
SVC
//
// CMP
//
// LTYPE7 imsr ',' spreg comma
// {
// outcode($1, &$2, $4, &nullgen);
// }
CMP $3, R2
CMP R1, R2
CMP R1->11, R2
CMP R1>>22, R2
CMP R1<<33, R2
CMP R22.SXTX, RSP // ffe336eb
CMP $0x22220000, RSP // CMP $572653568, RSP // 5b44a4d2ff633beb
CMPW $0x22220000, RSP // CMPW $572653568, RSP // 5b44a452ff633b6b
// TST
TST $15, R2 // 5f0c40f2
TST R1, R2 // 5f0001ea
TST R1->11, R2 // 5f2c81ea
TST R1>>22, R2 // 5f5841ea
TST R1<<33, R2 // 5f8401ea
TST $0x22220000, R3 // TST $572653568, R3 // 5b44a4d27f001bea
//
// CBZ
//
// LTYPE8 reg ',' rel
// {
// outcode($1, &$2, NREG, &$4);
// }
again:
CBZ R1, again // CBZ R1
//
// CSET
//
// LTYPER cond ',' reg
// {
// outcode($1, &$2, NREG, &$4);
// }
// conditional operations
CSET GT, R1 // e1d79f9a
CSETW HI, R2 // e2979f1a
//
// CSEL/CSINC/CSNEG/CSINV
//
// LTYPES cond ',' reg ',' reg ',' reg
// {
// outgcode($1, &$2, $6.reg, &$4, &$8);
// }
CSEL LT, R1, R2, ZR // 3fb0829a
CSELW LT, R2, R3, R4 // 44b0831a
CSINC GT, R1, ZR, R3 // 23c49f9a
CSNEG MI, R1, R2, R3 // 234482da
CSINV CS, R1, R2, R3 // CSINV HS, R1, R2, R3 // 232082da
CSINVW MI, R2, ZR, R2 // 42409f5a
// LTYPES cond ',' reg ',' reg
// {
// outcode($1, &$2, $4.reg, &$6);
// }
CINC EQ, R4, R9 // 8914849a
CINCW PL, R2, ZR // 5f44821a
CINV PL, R11, R22 // 76418bda
CINVW LS, R7, R13 // ed80875a
CNEG LS, R13, R7 // a7858dda
CNEGW EQ, R8, R13 // 0d15885a
//
// CCMN
//
// LTYPEU cond ',' imsr ',' reg ',' imm comma
// {
// outgcode($1, &$2, $6.reg, &$4, &$8);
// }
CCMN MI, ZR, R1, $4 // e44341ba
//
// FADDD
//
// LTYPEK frcon ',' freg
// {
// outcode($1, &$2, NREG, &$4);
// }
// FADDD $0.5, F1 // FADDD $(0.5), F1
FADDD F1, F2
// LTYPEK frcon ',' freg ',' freg
// {
// outcode($1, &$2, $4.reg, &$6);
// }
// FADDD $0.7, F1, F2 // FADDD $(0.69999999999999996), F1, F2
FADDD F1, F2, F3
//
// FCMP
//
// LTYPEL frcon ',' freg comma
// {
// outcode($1, &$2, $4.reg, &nullgen);
// }
// FCMP $0.2, F1
// FCMP F1, F2
//
// FCCMP
//
// LTYPEF cond ',' freg ',' freg ',' imm comma
// {
// outgcode($1, &$2, $6.reg, &$4, &$8);
// }
FCCMPS LT, F1, F2, $1 // 41b4211e
//
// FMULA
//
// LTYPE9 freg ',' freg ',' freg ',' freg comma
// {
// outgcode($1, &$2, $4.reg, &$6, &$8);
// }
// FMULA F1, F2, F3, F4
//
// FCSEL
//
// LFCSEL cond ',' freg ',' freg ',' freg
// {
// outgcode($1, &$2, $6.reg, &$4, &$8);
// }
//
// MADD Rn,Rm,Ra,Rd
//
// LTYPEM reg ',' reg ',' sreg ',' reg
// {
// outgcode($1, &$2, $6, &$4, &$8);
// }
// MADD R1, R2, R3, R4
FMADDS F1, F3, F2, F4 // 440c011f
FMADDD F4, F5, F4, F4 // 8414441f
FMSUBS F13, F21, F13, F19 // b3d50d1f
FMSUBD F11, F7, F15, F31 // ff9d4b1f
FNMADDS F1, F3, F2, F4 // 440c211f
FNMADDD F1, F3, F2, F4 // 440c611f
FNMSUBS F1, F3, F2, F4 // 448c211f
FNMSUBD F1, F3, F2, F4 // 448c611f
// DMB, HINT
//
// LDMB imm
// {
// outcode($1, &$2, NREG, &nullgen);
// }
DMB $1
//
// STXR
//
// LSTXR reg ',' addr ',' reg
// {
// outcode($1, &$2, &$4, &$6);
// }
// atomic ops
LDARB (R25), R2 // 22ffdf08
LDARH (R5), R7 // a7fcdf48
LDAXPW (R10), (R20, R16) // 54c17f88
@ -816,38 +681,38 @@ again:
LDADDLH R5, (RSP), R7 // e7036578
LDADDLB R5, (R6), R7 // c7006538
LDADDLB R5, (RSP), R7 // e7036538
LDANDAD R5, (R6), R7 // c710a5f8
LDANDAD R5, (RSP), R7 // e713a5f8
LDANDAW R5, (R6), R7 // c710a5b8
LDANDAW R5, (RSP), R7 // e713a5b8
LDANDAH R5, (R6), R7 // c710a578
LDANDAH R5, (RSP), R7 // e713a578
LDANDAB R5, (R6), R7 // c710a538
LDANDAB R5, (RSP), R7 // e713a538
LDANDALD R5, (R6), R7 // c710e5f8
LDANDALD R5, (RSP), R7 // e713e5f8
LDANDALW R5, (R6), R7 // c710e5b8
LDANDALW R5, (RSP), R7 // e713e5b8
LDANDALH R5, (R6), R7 // c710e578
LDANDALH R5, (RSP), R7 // e713e578
LDANDALB R5, (R6), R7 // c710e538
LDANDALB R5, (RSP), R7 // e713e538
LDANDD R5, (R6), R7 // c71025f8
LDANDD R5, (RSP), R7 // e71325f8
LDANDW R5, (R6), R7 // c71025b8
LDANDW R5, (RSP), R7 // e71325b8
LDANDH R5, (R6), R7 // c7102578
LDANDH R5, (RSP), R7 // e7132578
LDANDB R5, (R6), R7 // c7102538
LDANDB R5, (RSP), R7 // e7132538
LDANDLD R5, (R6), R7 // c71065f8
LDANDLD R5, (RSP), R7 // e71365f8
LDANDLW R5, (R6), R7 // c71065b8
LDANDLW R5, (RSP), R7 // e71365b8
LDANDLH R5, (R6), R7 // c7106578
LDANDLH R5, (RSP), R7 // e7136578
LDANDLB R5, (R6), R7 // c7106538
LDANDLB R5, (RSP), R7 // e7136538
LDCLRAD R5, (R6), R7 // c710a5f8
LDCLRAD R5, (RSP), R7 // e713a5f8
LDCLRAW R5, (R6), R7 // c710a5b8
LDCLRAW R5, (RSP), R7 // e713a5b8
LDCLRAH R5, (R6), R7 // c710a578
LDCLRAH R5, (RSP), R7 // e713a578
LDCLRAB R5, (R6), R7 // c710a538
LDCLRAB R5, (RSP), R7 // e713a538
LDCLRALD R5, (R6), R7 // c710e5f8
LDCLRALD R5, (RSP), R7 // e713e5f8
LDCLRALW R5, (R6), R7 // c710e5b8
LDCLRALW R5, (RSP), R7 // e713e5b8
LDCLRALH R5, (R6), R7 // c710e578
LDCLRALH R5, (RSP), R7 // e713e578
LDCLRALB R5, (R6), R7 // c710e538
LDCLRALB R5, (RSP), R7 // e713e538
LDCLRD R5, (R6), R7 // c71025f8
LDCLRD R5, (RSP), R7 // e71325f8
LDCLRW R5, (R6), R7 // c71025b8
LDCLRW R5, (RSP), R7 // e71325b8
LDCLRH R5, (R6), R7 // c7102578
LDCLRH R5, (RSP), R7 // e7132578
LDCLRB R5, (R6), R7 // c7102538
LDCLRB R5, (RSP), R7 // e7132538
LDCLRLD R5, (R6), R7 // c71065f8
LDCLRLD R5, (RSP), R7 // e71365f8
LDCLRLW R5, (R6), R7 // c71065b8
LDCLRLW R5, (RSP), R7 // e71365b8
LDCLRLH R5, (R6), R7 // c7106578
LDCLRLH R5, (RSP), R7 // e7136578
LDCLRLB R5, (R6), R7 // c7106538
LDCLRLB R5, (RSP), R7 // e7136538
LDEORAD R5, (R6), R7 // c720a5f8
LDEORAD R5, (RSP), R7 // e723a5f8
LDEORAW R5, (R6), R7 // c720a5b8
@ -912,21 +777,36 @@ again:
LDORLH R5, (RSP), R7 // e7336578
LDORLB R5, (R6), R7 // c7306538
LDORLB R5, (RSP), R7 // e7336538
CASD R1, (R2), ZR // 5f7ca1c8
CASW R1, (RSP), ZR // ff7fa188
CASB ZR, (R5), R3 // a37cbf08
CASH R3, (RSP), ZR // ff7fa348
CASW R5, (R7), R6 // e67ca588
CASLD ZR, (RSP), R8 // e8ffbfc8
CASLW R9, (R10), ZR // 5ffda988
CASAD R7, (R11), R15 // 6f7de7c8
CASAW R10, (RSP), R19 // f37fea88
CASALD R5, (R6), R7 // c7fce5c8
CASALD R5, (RSP), R7 // e7ffe5c8
CASALW R5, (R6), R7 // c7fce588
CASALW R5, (RSP), R7 // e7ffe588
CASALH ZR, (R5), R8 // a8fcff48
CASALB R8, (R9), ZR // 3ffde808
CASPD (R30, ZR), (RSP), (R8, R9) // e87f3e48
CASPW (R6, R7), (R8), (R4, R5) // 047d2608
CASPD (R2, R3), (R2), (R8, R9) // 487c2248
// RET
//
// LTYPEA comma
// {
// outcode($1, &nullgen, NREG, &nullgen);
// }
BEQ 2(PC)
RET
RET foo(SB)
// More B/BL cases, and canonical names JMP, CALL.
BEQ 2(PC)
B foo(SB) // JMP foo(SB)
// B/BL/B.cond cases, and canonical names JMP, CALL.
BL 1(PC) // CALL 1(PC)
BL (R2) // CALL (R2)
BL foo(SB) // CALL foo(SB)
BL bar<>(SB) // CALL bar<>(SB)
B foo(SB) // JMP foo(SB)
BEQ 1(PC)
BEQ 2(PC)
TBZ $1, R1, 2(PC)
TBNZ $2, R2, 2(PC)
@ -1101,8 +981,6 @@ again:
FSTPS (F3, F4), 1024(RSP) // fb0310916313002d
FSTPS (F3, F4), x(SB)
FSTPS (F3, F4), x+8(SB)
NOOP // 1f2003d5
HINT $0 // 1f2003d5
// System Register
MSR $1, SPSel // bf4100d5
@ -1664,11 +1542,4 @@ again:
MSR R13, ZCR_EL1 // 0d1218d5
MRS ZCR_EL1, R23 // 171238d5
MSR R17, ZCR_EL1 // 111218d5
// END
//
// LTYPEE comma
// {
// outcode($1, &nullgen, NREG, &nullgen);
// }
END

View file

@ -87,13 +87,13 @@ TEXT errors(SB),$0
VLD1.P 32(R1), [V8.S4, V9.S4, V10.S4] // ERROR "invalid post-increment offset"
VLD1.P 48(R1), [V7.S4, V8.S4, V9.S4, V10.S4] // ERROR "invalid post-increment offset"
VPMULL V1.D1, V2.H4, V3.Q1 // ERROR "invalid arrangement"
VPMULL V1.H4, V2.H4, V3.Q1 // ERROR "invalid arrangement"
VPMULL V1.D2, V2.D2, V3.Q1 // ERROR "invalid arrangement"
VPMULL V1.B16, V2.B16, V3.H8 // ERROR "invalid arrangement"
VPMULL V1.H4, V2.H4, V3.Q1 // ERROR "operand mismatch"
VPMULL V1.D2, V2.D2, V3.Q1 // ERROR "operand mismatch"
VPMULL V1.B16, V2.B16, V3.H8 // ERROR "operand mismatch"
VPMULL2 V1.D2, V2.H4, V3.Q1 // ERROR "invalid arrangement"
VPMULL2 V1.H4, V2.H4, V3.Q1 // ERROR "invalid arrangement"
VPMULL2 V1.D1, V2.D1, V3.Q1 // ERROR "invalid arrangement"
VPMULL2 V1.B8, V2.B8, V3.H8 // ERROR "invalid arrangement"
VPMULL2 V1.H4, V2.H4, V3.Q1 // ERROR "operand mismatch"
VPMULL2 V1.D1, V2.D1, V3.Q1 // ERROR "operand mismatch"
VPMULL2 V1.B8, V2.B8, V3.H8 // ERROR "operand mismatch"
VEXT $8, V1.B16, V2.B8, V2.B16 // ERROR "invalid arrangement"
VEXT $8, V1.H8, V2.H8, V2.H8 // ERROR "invalid arrangement"
VRBIT V1.B16, V2.B8 // ERROR "invalid arrangement"
@ -123,14 +123,14 @@ TEXT errors(SB),$0
LDADDLW R5, (R6), ZR // ERROR "illegal destination register"
LDADDLH R5, (R6), ZR // ERROR "illegal destination register"
LDADDLB R5, (R6), ZR // ERROR "illegal destination register"
LDANDD R5, (R6), ZR // ERROR "illegal destination register"
LDANDW R5, (R6), ZR // ERROR "illegal destination register"
LDANDH R5, (R6), ZR // ERROR "illegal destination register"
LDANDB R5, (R6), ZR // ERROR "illegal destination register"
LDANDLD R5, (R6), ZR // ERROR "illegal destination register"
LDANDLW R5, (R6), ZR // ERROR "illegal destination register"
LDANDLH R5, (R6), ZR // ERROR "illegal destination register"
LDANDLB R5, (R6), ZR // ERROR "illegal destination register"
LDCLRD R5, (R6), ZR // ERROR "illegal destination register"
LDCLRW R5, (R6), ZR // ERROR "illegal destination register"
LDCLRH R5, (R6), ZR // ERROR "illegal destination register"
LDCLRB R5, (R6), ZR // ERROR "illegal destination register"
LDCLRLD R5, (R6), ZR // ERROR "illegal destination register"
LDCLRLW R5, (R6), ZR // ERROR "illegal destination register"
LDCLRLH R5, (R6), ZR // ERROR "illegal destination register"
LDCLRLB R5, (R6), ZR // ERROR "illegal destination register"
LDEORD R5, (R6), ZR // ERROR "illegal destination register"
LDEORW R5, (R6), ZR // ERROR "illegal destination register"
LDEORH R5, (R6), ZR // ERROR "illegal destination register"
@ -163,22 +163,22 @@ TEXT errors(SB),$0
LDADDLW R5, (R6), RSP // ERROR "illegal destination register"
LDADDLH R5, (R6), RSP // ERROR "illegal destination register"
LDADDLB R5, (R6), RSP // ERROR "illegal destination register"
LDANDAD R5, (R6), RSP // ERROR "illegal destination register"
LDANDAW R5, (R6), RSP // ERROR "illegal destination register"
LDANDAH R5, (R6), RSP // ERROR "illegal destination register"
LDANDAB R5, (R6), RSP // ERROR "illegal destination register"
LDANDALD R5, (R6), RSP // ERROR "illegal destination register"
LDANDALW R5, (R6), RSP // ERROR "illegal destination register"
LDANDALH R5, (R6), RSP // ERROR "illegal destination register"
LDANDALB R5, (R6), RSP // ERROR "illegal destination register"
LDANDD R5, (R6), RSP // ERROR "illegal destination register"
LDANDW R5, (R6), RSP // ERROR "illegal destination register"
LDANDH R5, (R6), RSP // ERROR "illegal destination register"
LDANDB R5, (R6), RSP // ERROR "illegal destination register"
LDANDLD R5, (R6), RSP // ERROR "illegal destination register"
LDANDLW R5, (R6), RSP // ERROR "illegal destination register"
LDANDLH R5, (R6), RSP // ERROR "illegal destination register"
LDANDLB R5, (R6), RSP // ERROR "illegal destination register"
LDCLRAD R5, (R6), RSP // ERROR "illegal destination register"
LDCLRAW R5, (R6), RSP // ERROR "illegal destination register"
LDCLRAH R5, (R6), RSP // ERROR "illegal destination register"
LDCLRAB R5, (R6), RSP // ERROR "illegal destination register"
LDCLRALD R5, (R6), RSP // ERROR "illegal destination register"
LDCLRALW R5, (R6), RSP // ERROR "illegal destination register"
LDCLRALH R5, (R6), RSP // ERROR "illegal destination register"
LDCLRALB R5, (R6), RSP // ERROR "illegal destination register"
LDCLRD R5, (R6), RSP // ERROR "illegal destination register"
LDCLRW R5, (R6), RSP // ERROR "illegal destination register"
LDCLRH R5, (R6), RSP // ERROR "illegal destination register"
LDCLRB R5, (R6), RSP // ERROR "illegal destination register"
LDCLRLD R5, (R6), RSP // ERROR "illegal destination register"
LDCLRLW R5, (R6), RSP // ERROR "illegal destination register"
LDCLRLH R5, (R6), RSP // ERROR "illegal destination register"
LDCLRLB R5, (R6), RSP // ERROR "illegal destination register"
LDEORAD R5, (R6), RSP // ERROR "illegal destination register"
LDEORAW R5, (R6), RSP // ERROR "illegal destination register"
LDEORAH R5, (R6), RSP // ERROR "illegal destination register"
@ -353,4 +353,12 @@ TEXT errors(SB),$0
VUSHLL2 $32, V30.S4, V2.D2 // ERROR "shift amount out of range"
VBIF V0.B8, V1.B8, V2.B16 // ERROR "operand mismatch"
VBIF V0.D2, V1.D2, V2.D2 // ERROR "invalid arrangement"
VUADDW V9.B8, V12.H8, V14.B8 // ERROR "invalid arrangement"
VUADDW2 V9.B8, V12.S4, V14.S4 // ERROR "operand mismatch"
VSLI $64, V7.D2, V8.D2 // ERROR "shift out of range"
VUSRA $0, V7.D2, V8.D2 // ERROR "shift out of range"
CASPD (R3, R4), (R2), (R8, R9) // ERROR "source register pair must start from even register"
CASPD (R2, R3), (R2), (R9, R10) // ERROR "destination register pair must start from even register"
CASPD (R2, R4), (R2), (R8, R9) // ERROR "source register pair must be contiguous"
CASPD (R2, R3), (R2), (R8, R10) // ERROR "destination register pair must be contiguous"
RET

View file

@ -282,7 +282,9 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0
RLWMI $7, R3, $65535, R6 // 50663c3e
RLWMICC $7, R3, $65535, R6 // 50663c3f
RLWNM $3, R4, $7, R6 // 54861f7e
RLWNM R3, R4, $7, R6 // 5c861f7e
RLWNMCC $3, R4, $7, R6 // 54861f7f
RLWNMCC R3, R4, $7, R6 // 5c861f7f
RLDMI $0, R4, $7, R6 // 7886076c
RLDMICC $0, R4, $7, R6 // 7886076d
RLDIMI $0, R4, $7, R6 // 788601cc

View file

@ -297,6 +297,13 @@ start:
MOVW X5, (X6) // 23205300
MOVW X5, 4(X6) // 23225300
MOVB X5, X6 // 1393820313538343
MOVH X5, X6 // 1393020313530343
MOVW X5, X6 // 1b830200
MOVBU X5, X6 // 13f3f20f
MOVHU X5, X6 // 1393020313530303
MOVWU X5, X6 // 1393020213530302
MOVF 4(X5), F0 // 07a04200
MOVF F0, 4(X5) // 27a20200
MOVF F0, F1 // d3000020
@ -318,7 +325,7 @@ start:
// These jumps can get printed as jumps to 2 because they go to the
// second instruction in the function (the first instruction is an
// invisible stack pointer adjustment).
JMP start // JMP 2 // 6ff01fc5
JMP start // JMP 2 // 6ff09fc2
JMP (X5) // 67800200
JMP 4(X5) // 67804200
@ -331,16 +338,16 @@ start:
JMP asmtest(SB) // 970f0000
// Branch pseudo-instructions
BEQZ X5, start // BEQZ X5, 2 // e38a02c2
BGEZ X5, start // BGEZ X5, 2 // e3d802c2
BGT X5, X6, start // BGT X5, X6, 2 // e3c662c2
BGTU X5, X6, start // BGTU X5, X6, 2 // e3e462c2
BGTZ X5, start // BGTZ X5, 2 // e34250c2
BLE X5, X6, start // BLE X5, X6, 2 // e3d062c2
BLEU X5, X6, start // BLEU X5, X6, 2 // e3fe62c0
BLEZ X5, start // BLEZ X5, 2 // e35c50c0
BLTZ X5, start // BLTZ X5, 2 // e3ca02c0
BNEZ X5, start // BNEZ X5, 2 // e39802c0
BEQZ X5, start // BEQZ X5, 2 // e38602c0
BGEZ X5, start // BGEZ X5, 2 // e3d402c0
BGT X5, X6, start // BGT X5, X6, 2 // e3c262c0
BGTU X5, X6, start // BGTU X5, X6, 2 // e3e062c0
BGTZ X5, start // BGTZ X5, 2 // e34e50be
BLE X5, X6, start // BLE X5, X6, 2 // e3dc62be
BLEU X5, X6, start // BLEU X5, X6, 2 // e3fa62be
BLEZ X5, start // BLEZ X5, 2 // e35850be
BLTZ X5, start // BLTZ X5, 2 // e3c602be
BNEZ X5, start // BNEZ X5, 2 // e39402be
// Set pseudo-instructions
SEQZ X15, X15 // 93b71700

View file

@ -24,6 +24,7 @@ var (
SymABIs = flag.Bool("gensymabis", false, "write symbol ABI information to output file, don't assemble")
Importpath = flag.String("p", "", "set expected package import to path")
Spectre = flag.String("spectre", "", "enable spectre mitigations in `list` (all, ret)")
CompilingRuntime = flag.Bool("compiling-runtime", false, "source to be compiled is part of the Go runtime")
)
var (

View file

@ -52,7 +52,6 @@ func main() {
case "all", "ret":
ctxt.Retpoline = true
}
compilingRuntime := objabi.IsRuntimePackagePath(*flags.Importpath)
ctxt.Bso = bufio.NewWriter(os.Stdout)
defer ctxt.Bso.Flush()
@ -75,7 +74,8 @@ func main() {
var failedFile string
for _, f := range flag.Args() {
lexer := lex.NewLexer(f)
parser := asm.NewParser(ctxt, architecture, lexer, compilingRuntime)
parser := asm.NewParser(ctxt, architecture, lexer,
*flags.CompilingRuntime)
ctxt.DiagFunc = func(format string, args ...interface{}) {
diag = true
log.Printf(format, args...)

View file

@ -22,21 +22,6 @@ func usage() {
var wflag = flag.Bool("w", false, "write build ID")
// taken from cmd/go/internal/work/buildid.go
func hashToString(h [32]byte) string {
const b64 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_"
const chunks = 5
var dst [chunks * 4]byte
for i := 0; i < chunks; i++ {
v := uint32(h[3*i])<<16 | uint32(h[3*i+1])<<8 | uint32(h[3*i+2])
dst[4*i+0] = b64[(v>>18)&0x3F]
dst[4*i+1] = b64[(v>>12)&0x3F]
dst[4*i+2] = b64[(v>>6)&0x3F]
dst[4*i+3] = b64[v&0x3F]
}
return string(dst[:])
}
func main() {
log.SetPrefix("buildid: ")
log.SetFlags(0)
@ -63,12 +48,12 @@ func main() {
log.Fatal(err)
}
matches, hash, err := buildid.FindAndHash(f, id, 0)
f.Close()
if err != nil {
log.Fatal(err)
}
f.Close()
newID := id[:strings.LastIndex(id, "/")] + "/" + hashToString(hash)
newID := id[:strings.LastIndex(id, "/")] + "/" + buildid.HashToString(hash)
if len(newID) != len(id) {
log.Fatalf("%s: build ID length mismatch %q vs %q", file, id, newID)
}

View file

@ -13,7 +13,6 @@ import (
"go/scanner"
"go/token"
"os"
"path/filepath"
"strings"
)
@ -44,14 +43,7 @@ func sourceLine(n ast.Node) int {
// attached to the import "C" comment, a list of references to C.xxx,
// a list of exported functions, and the actual AST, to be rewritten and
// printed.
func (f *File) ParseGo(name string, src []byte) {
// Create absolute path for file, so that it will be used in error
// messages and recorded in debug line number information.
// This matches the rest of the toolchain. See golang.org/issue/5122.
if aname, err := filepath.Abs(name); err == nil {
name = aname
}
func (f *File) ParseGo(abspath string, src []byte) {
// Two different parses: once with comments, once without.
// The printer is not good enough at printing comments in the
// right place when we start editing the AST behind its back,
@ -60,8 +52,8 @@ func (f *File) ParseGo(name string, src []byte) {
// and reprinting.
// In cgo mode, we ignore ast2 and just apply edits directly
// the text behind ast1. In godefs mode we modify and print ast2.
ast1 := parse(name, src, parser.ParseComments)
ast2 := parse(name, src, 0)
ast1 := parse(abspath, src, parser.ParseComments)
ast2 := parse(abspath, src, 0)
f.Package = ast1.Name.Name
f.Name = make(map[string]*Name)
@ -88,7 +80,7 @@ func (f *File) ParseGo(name string, src []byte) {
cg = d.Doc
}
if cg != nil {
f.Preamble += fmt.Sprintf("#line %d %q\n", sourceLine(cg), name)
f.Preamble += fmt.Sprintf("#line %d %q\n", sourceLine(cg), abspath)
f.Preamble += commentText(cg) + "\n"
f.Preamble += "#line 1 \"cgo-generated-wrapper\"\n"
}

View file

@ -721,7 +721,7 @@ linkage to the desired libraries. The main function is provided by
_cgo_main.c:
int main() { return 0; }
void crosscall2(void(*fn)(void*, int, uintptr_t), void *a, int c, uintptr_t ctxt) { }
void crosscall2(void(*fn)(void*), void *a, int c, uintptr_t ctxt) { }
uintptr_t _cgo_wait_runtime_init_done(void) { return 0; }
void _cgo_release_context(uintptr_t ctxt) { }
char* _cgo_topofstack(void) { return (char*)0; }

View file

@ -16,7 +16,7 @@ import (
)
// godefs returns the output for -godefs mode.
func (p *Package) godefs(f *File, srcfile string) string {
func (p *Package) godefs(f *File) string {
var buf bytes.Buffer
fmt.Fprintf(&buf, "// Code generated by cmd/cgo -godefs; DO NOT EDIT.\n")

View file

@ -243,6 +243,8 @@ var gccgopkgpath = flag.String("gccgopkgpath", "", "-fgo-pkgpath option used wit
var gccgoMangler func(string) string
var importRuntimeCgo = flag.Bool("import_runtime_cgo", true, "import runtime/cgo in generated code")
var importSyscall = flag.Bool("import_syscall", true, "import syscall in generated code")
var trimpath = flag.String("trimpath", "", "applies supplied rewrites or trims prefixes to recorded source file paths")
var goarch, goos string
func main() {
@ -322,6 +324,13 @@ func main() {
input = filepath.Join(*srcDir, input)
}
// Create absolute path for file, so that it will be used in error
// messages and recorded in debug line number information.
// This matches the rest of the toolchain. See golang.org/issue/5122.
if aname, err := filepath.Abs(input); err == nil {
input = aname
}
b, err := ioutil.ReadFile(input)
if err != nil {
fatalf("%s", err)
@ -330,6 +339,10 @@ func main() {
fatalf("%s", err)
}
// Apply trimpath to the file path. The path won't be read from after this point.
input, _ = objabi.ApplyRewrites(input, *trimpath)
goFiles[i] = input
f := new(File)
f.Edit = edit.NewBuffer(b)
f.ParseGo(input, b)
@ -367,7 +380,7 @@ func main() {
p.PackagePath = f.Package
p.Record(f)
if *godefs {
os.Stdout.WriteString(p.godefs(f, input))
os.Stdout.WriteString(p.godefs(f))
} else {
p.writeOutput(f, input)
}

View file

@ -59,14 +59,14 @@ func (p *Package) writeDefs() {
// Write C main file for using gcc to resolve imports.
fmt.Fprintf(fm, "int main() { return 0; }\n")
if *importRuntimeCgo {
fmt.Fprintf(fm, "void crosscall2(void(*fn)(void*, int, __SIZE_TYPE__), void *a, int c, __SIZE_TYPE__ ctxt) { }\n")
fmt.Fprintf(fm, "void crosscall2(void(*fn)(void*), void *a, int c, __SIZE_TYPE__ ctxt) { }\n")
fmt.Fprintf(fm, "__SIZE_TYPE__ _cgo_wait_runtime_init_done(void) { return 0; }\n")
fmt.Fprintf(fm, "void _cgo_release_context(__SIZE_TYPE__ ctxt) { }\n")
fmt.Fprintf(fm, "char* _cgo_topofstack(void) { return (char*)0; }\n")
} else {
// If we're not importing runtime/cgo, we *are* runtime/cgo,
// which provides these functions. We just need a prototype.
fmt.Fprintf(fm, "void crosscall2(void(*fn)(void*, int, __SIZE_TYPE__), void *a, int c, __SIZE_TYPE__ ctxt);\n")
fmt.Fprintf(fm, "void crosscall2(void(*fn)(void*), void *a, int c, __SIZE_TYPE__ ctxt);\n")
fmt.Fprintf(fm, "__SIZE_TYPE__ _cgo_wait_runtime_init_done(void);\n")
fmt.Fprintf(fm, "void _cgo_release_context(__SIZE_TYPE__);\n")
}
@ -852,7 +852,7 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
fmt.Fprintf(fgcc, "#pragma GCC diagnostic ignored \"-Wpragmas\"\n")
fmt.Fprintf(fgcc, "#pragma GCC diagnostic ignored \"-Waddress-of-packed-member\"\n")
fmt.Fprintf(fgcc, "extern void crosscall2(void (*fn)(void *, int, __SIZE_TYPE__), void *, int, __SIZE_TYPE__);\n")
fmt.Fprintf(fgcc, "extern void crosscall2(void (*fn)(void *), void *, int, __SIZE_TYPE__);\n")
fmt.Fprintf(fgcc, "extern __SIZE_TYPE__ _cgo_wait_runtime_init_done(void);\n")
fmt.Fprintf(fgcc, "extern void _cgo_release_context(__SIZE_TYPE__);\n\n")
fmt.Fprintf(fgcc, "extern char* _cgo_topofstack(void);")
@ -862,59 +862,48 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
for _, exp := range p.ExpFunc {
fn := exp.Func
// Construct a gcc struct matching the gc argument and
// result frame. The gcc struct will be compiled with
// __attribute__((packed)) so all padding must be accounted
// for explicitly.
// Construct a struct that will be used to communicate
// arguments from C to Go. The C and Go definitions
// just have to agree. The gcc struct will be compiled
// with __attribute__((packed)) so all padding must be
// accounted for explicitly.
ctype := "struct {\n"
gotype := new(bytes.Buffer)
fmt.Fprintf(gotype, "struct {\n")
off := int64(0)
npad := 0
if fn.Recv != nil {
t := p.cgoType(fn.Recv.List[0].Type)
ctype += fmt.Sprintf("\t\t%s recv;\n", t.C)
argField := func(typ ast.Expr, namePat string, args ...interface{}) {
name := fmt.Sprintf(namePat, args...)
t := p.cgoType(typ)
if off%t.Align != 0 {
pad := t.Align - off%t.Align
ctype += fmt.Sprintf("\t\tchar __pad%d[%d];\n", npad, pad)
off += pad
npad++
}
ctype += fmt.Sprintf("\t\t%s %s;\n", t.C, name)
fmt.Fprintf(gotype, "\t\t%s ", name)
noSourceConf.Fprint(gotype, fset, typ)
fmt.Fprintf(gotype, "\n")
off += t.Size
}
if fn.Recv != nil {
argField(fn.Recv.List[0].Type, "recv")
}
fntype := fn.Type
forFieldList(fntype.Params,
func(i int, aname string, atype ast.Expr) {
t := p.cgoType(atype)
if off%t.Align != 0 {
pad := t.Align - off%t.Align
ctype += fmt.Sprintf("\t\tchar __pad%d[%d];\n", npad, pad)
off += pad
npad++
}
ctype += fmt.Sprintf("\t\t%s p%d;\n", t.C, i)
off += t.Size
argField(atype, "p%d", i)
})
if off%p.PtrSize != 0 {
pad := p.PtrSize - off%p.PtrSize
ctype += fmt.Sprintf("\t\tchar __pad%d[%d];\n", npad, pad)
off += pad
npad++
}
forFieldList(fntype.Results,
func(i int, aname string, atype ast.Expr) {
t := p.cgoType(atype)
if off%t.Align != 0 {
pad := t.Align - off%t.Align
ctype += fmt.Sprintf("\t\tchar __pad%d[%d];\n", npad, pad)
off += pad
npad++
}
ctype += fmt.Sprintf("\t\t%s r%d;\n", t.C, i)
off += t.Size
argField(atype, "r%d", i)
})
if off%p.PtrSize != 0 {
pad := p.PtrSize - off%p.PtrSize
ctype += fmt.Sprintf("\t\tchar __pad%d[%d];\n", npad, pad)
off += pad
npad++
}
if ctype == "struct {\n" {
ctype += "\t\tchar unused;\n" // avoid empty struct
}
ctype += "\t}"
fmt.Fprintf(gotype, "\t}")
// Get the return type of the wrapper function
// compiled by gcc.
@ -939,7 +928,11 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
}
// Build the wrapper function compiled by gcc.
s := fmt.Sprintf("%s %s(", gccResult, exp.ExpName)
gccExport := ""
if goos == "windows" {
gccExport = "__declspec(dllexport)"
}
s := fmt.Sprintf("%s %s %s(", gccExport, gccResult, exp.ExpName)
if fn.Recv != nil {
s += p.cgoType(fn.Recv.List[0].Type).C.String()
s += " recv"
@ -961,12 +954,15 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
}
fmt.Fprintf(fgcch, "extern %s;\n", s)
fmt.Fprintf(fgcc, "extern void _cgoexp%s_%s(void *, int, __SIZE_TYPE__);\n", cPrefix, exp.ExpName)
fmt.Fprintf(fgcc, "extern void _cgoexp%s_%s(void *);\n", cPrefix, exp.ExpName)
fmt.Fprintf(fgcc, "\nCGO_NO_SANITIZE_THREAD")
fmt.Fprintf(fgcc, "\n%s\n", s)
fmt.Fprintf(fgcc, "{\n")
fmt.Fprintf(fgcc, "\t__SIZE_TYPE__ _cgo_ctxt = _cgo_wait_runtime_init_done();\n")
fmt.Fprintf(fgcc, "\t%s %v _cgo_a;\n", ctype, p.packedAttribute())
// The results part of the argument structure must be
// initialized to 0 so the write barriers generated by
// the assignments to these fields in Go are safe.
fmt.Fprintf(fgcc, "\t%s %v _cgo_a = {0};\n", ctype, p.packedAttribute())
if gccResult != "void" && (len(fntype.Results.List) > 1 || len(fntype.Results.List[0].Names) > 1) {
fmt.Fprintf(fgcc, "\t%s r;\n", gccResult)
}
@ -995,82 +991,28 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
fmt.Fprintf(fgcc, "}\n")
// Build the wrapper function compiled by cmd/compile.
goname := "_cgoexpwrap" + cPrefix + "_"
if fn.Recv != nil {
goname += fn.Recv.List[0].Names[0].Name + "_"
}
goname += exp.Func.Name.Name
// This unpacks the argument struct above and calls the Go function.
fmt.Fprintf(fgo2, "//go:cgo_export_dynamic %s\n", exp.ExpName)
fmt.Fprintf(fgo2, "//go:linkname _cgoexp%s_%s _cgoexp%s_%s\n", cPrefix, exp.ExpName, cPrefix, exp.ExpName)
fmt.Fprintf(fgo2, "//go:cgo_export_static _cgoexp%s_%s\n", cPrefix, exp.ExpName)
fmt.Fprintf(fgo2, "//go:nosplit\n") // no split stack, so no use of m or g
fmt.Fprintf(fgo2, "//go:norace\n") // must not have race detector calls inserted
fmt.Fprintf(fgo2, "func _cgoexp%s_%s(a unsafe.Pointer, n int32, ctxt uintptr) {\n", cPrefix, exp.ExpName)
fmt.Fprintf(fgo2, "\tfn := %s\n", goname)
// The indirect here is converting from a Go function pointer to a C function pointer.
fmt.Fprintf(fgo2, "\t_cgo_runtime_cgocallback(**(**unsafe.Pointer)(unsafe.Pointer(&fn)), a, uintptr(n), ctxt);\n")
fmt.Fprintf(fgo2, "}\n")
fmt.Fprintf(fgo2, "func _cgoexp%s_%s(a *%s) {\n", cPrefix, exp.ExpName, gotype)
fmt.Fprintf(fm, "int _cgoexp%s_%s;\n", cPrefix, exp.ExpName)
// This code uses printer.Fprint, not conf.Fprint,
// because we don't want //line comments in the middle
// of the function types.
fmt.Fprintf(fgo2, "\n")
fmt.Fprintf(fgo2, "func %s(", goname)
comma := false
if fn.Recv != nil {
fmt.Fprintf(fgo2, "recv ")
printer.Fprint(fgo2, fset, fn.Recv.List[0].Type)
comma = true
}
forFieldList(fntype.Params,
func(i int, aname string, atype ast.Expr) {
if comma {
fmt.Fprintf(fgo2, ", ")
}
fmt.Fprintf(fgo2, "p%d ", i)
printer.Fprint(fgo2, fset, atype)
comma = true
})
fmt.Fprintf(fgo2, ")")
if gccResult != "void" {
fmt.Fprint(fgo2, " (")
// Write results back to frame.
fmt.Fprintf(fgo2, "\t")
forFieldList(fntype.Results,
func(i int, aname string, atype ast.Expr) {
if i > 0 {
fmt.Fprint(fgo2, ", ")
fmt.Fprintf(fgo2, ", ")
}
fmt.Fprintf(fgo2, "r%d ", i)
printer.Fprint(fgo2, fset, atype)
fmt.Fprintf(fgo2, "a.r%d", i)
})
fmt.Fprint(fgo2, ")")
}
fmt.Fprint(fgo2, " {\n")
if gccResult == "void" {
fmt.Fprint(fgo2, "\t")
} else {
// Verify that any results don't contain any
// Go pointers.
addedDefer := false
forFieldList(fntype.Results,
func(i int, aname string, atype ast.Expr) {
if !p.hasPointer(nil, atype, false) {
return
}
if !addedDefer {
fmt.Fprint(fgo2, "\tdefer func() {\n")
addedDefer = true
}
fmt.Fprintf(fgo2, "\t\t_cgoCheckResult(r%d)\n", i)
})
if addedDefer {
fmt.Fprint(fgo2, "\t}()\n")
}
fmt.Fprint(fgo2, "\treturn ")
fmt.Fprintf(fgo2, " = ")
}
if fn.Recv != nil {
fmt.Fprintf(fgo2, "recv.")
fmt.Fprintf(fgo2, "a.recv.")
}
fmt.Fprintf(fgo2, "%s(", exp.Func.Name)
forFieldList(fntype.Params,
@ -1078,9 +1020,20 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
if i > 0 {
fmt.Fprint(fgo2, ", ")
}
fmt.Fprintf(fgo2, "p%d", i)
fmt.Fprintf(fgo2, "a.p%d", i)
})
fmt.Fprint(fgo2, ")\n")
if gccResult != "void" {
// Verify that any results don't contain any
// Go pointers.
forFieldList(fntype.Results,
func(i int, aname string, atype ast.Expr) {
if !p.hasPointer(nil, atype, false) {
return
}
fmt.Fprintf(fgo2, "\t_cgoCheckResult(a.r%d)\n", i)
})
}
fmt.Fprint(fgo2, "}\n")
}
@ -1578,9 +1531,6 @@ const goProlog = `
//go:linkname _cgo_runtime_cgocall runtime.cgocall
func _cgo_runtime_cgocall(unsafe.Pointer, uintptr) int32
//go:linkname _cgo_runtime_cgocallback runtime.cgocallback
func _cgo_runtime_cgocallback(unsafe.Pointer, unsafe.Pointer, uintptr, uintptr)
//go:linkname _cgoCheckPointer runtime.cgoCheckPointer
func _cgoCheckPointer(interface{}, interface{})

View file

@ -105,10 +105,8 @@ var knownFormats = map[string]string{
"cmd/compile/internal/ssa.GCNode %v": "",
"cmd/compile/internal/ssa.ID %d": "",
"cmd/compile/internal/ssa.ID %v": "",
"cmd/compile/internal/ssa.LocPair %s": "",
"cmd/compile/internal/ssa.LocalSlot %s": "",
"cmd/compile/internal/ssa.LocalSlot %v": "",
"cmd/compile/internal/ssa.Location %T": "",
"cmd/compile/internal/ssa.Location %s": "",
"cmd/compile/internal/ssa.Op %s": "",
"cmd/compile/internal/ssa.Op %v": "",

View file

@ -42,10 +42,11 @@ func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
// loadByType returns the load instruction of the given type.
func loadByType(t *types.Type) obj.As {
// Avoid partial register write
if !t.IsFloat() && t.Size() <= 2 {
if t.Size() == 1 {
if !t.IsFloat() {
switch t.Size() {
case 1:
return x86.AMOVBLZX
} else {
case 2:
return x86.AMOVWLZX
}
}
@ -1070,7 +1071,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
val := v.AuxInt
// 0 means math.RoundToEven, 1 Floor, 2 Ceil, 3 Trunc
if val != 0 && val != 1 && val != 2 && val != 3 {
if val < 0 || val > 3 {
v.Fatalf("Invalid rounding mode")
}
p.From.Offset = val
@ -1210,7 +1211,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p = s.Prog(x86.ASETEQ)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
case ssa.OpAMD64ANDBlock, ssa.OpAMD64ORBlock:
case ssa.OpAMD64ANDBlock, ssa.OpAMD64ANDLlock, ssa.OpAMD64ORBlock, ssa.OpAMD64ORLlock:
s.Prog(x86.ALOCK)
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG

View file

@ -581,6 +581,24 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p2.From.Reg = arm64.REGTMP
p2.To.Type = obj.TYPE_BRANCH
gc.Patch(p2, p)
case ssa.OpARM64LoweredAtomicExchange64Variant,
ssa.OpARM64LoweredAtomicExchange32Variant:
swap := arm64.ASWPALD
if v.Op == ssa.OpARM64LoweredAtomicExchange32Variant {
swap = arm64.ASWPALW
}
r0 := v.Args[0].Reg()
r1 := v.Args[1].Reg()
out := v.Reg0()
// SWPALD Rarg1, (Rarg0), Rout
p := s.Prog(swap)
p.From.Type = obj.TYPE_REG
p.From.Reg = r1
p.To.Type = obj.TYPE_MEM
p.To.Reg = r0
p.RegTo2 = out
case ssa.OpARM64LoweredAtomicAdd64,
ssa.OpARM64LoweredAtomicAdd32:
// LDAXR (Rarg0), Rout
@ -687,16 +705,74 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p5.To.Type = obj.TYPE_REG
p5.To.Reg = out
gc.Patch(p2, p5)
case ssa.OpARM64LoweredAtomicCas64Variant,
ssa.OpARM64LoweredAtomicCas32Variant:
// Rarg0: ptr
// Rarg1: old
// Rarg2: new
// MOV Rarg1, Rtmp
// CASAL Rtmp, (Rarg0), Rarg2
// CMP Rarg1, Rtmp
// CSET EQ, Rout
cas := arm64.ACASALD
cmp := arm64.ACMP
mov := arm64.AMOVD
if v.Op == ssa.OpARM64LoweredAtomicCas32Variant {
cas = arm64.ACASALW
cmp = arm64.ACMPW
mov = arm64.AMOVW
}
r0 := v.Args[0].Reg()
r1 := v.Args[1].Reg()
r2 := v.Args[2].Reg()
out := v.Reg0()
// MOV Rarg1, Rtmp
p := s.Prog(mov)
p.From.Type = obj.TYPE_REG
p.From.Reg = r1
p.To.Type = obj.TYPE_REG
p.To.Reg = arm64.REGTMP
// CASAL Rtmp, (Rarg0), Rarg2
p1 := s.Prog(cas)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = arm64.REGTMP
p1.To.Type = obj.TYPE_MEM
p1.To.Reg = r0
p1.RegTo2 = r2
// CMP Rarg1, Rtmp
p2 := s.Prog(cmp)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = r1
p2.Reg = arm64.REGTMP
// CSET EQ, Rout
p3 := s.Prog(arm64.ACSET)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = arm64.COND_EQ
p3.To.Type = obj.TYPE_REG
p3.To.Reg = out
case ssa.OpARM64LoweredAtomicAnd8,
ssa.OpARM64LoweredAtomicOr8:
// LDAXRB (Rarg0), Rout
ssa.OpARM64LoweredAtomicAnd32,
ssa.OpARM64LoweredAtomicOr8,
ssa.OpARM64LoweredAtomicOr32:
// LDAXRB/LDAXRW (Rarg0), Rout
// AND/OR Rarg1, Rout
// STLXRB Rout, (Rarg0), Rtmp
// STLXRB/STLXRB Rout, (Rarg0), Rtmp
// CBNZ Rtmp, -3(PC)
ld := arm64.ALDAXRB
st := arm64.ASTLXRB
if v.Op == ssa.OpARM64LoweredAtomicAnd32 || v.Op == ssa.OpARM64LoweredAtomicOr32 {
ld = arm64.ALDAXRW
st = arm64.ASTLXRW
}
r0 := v.Args[0].Reg()
r1 := v.Args[1].Reg()
out := v.Reg0()
p := s.Prog(arm64.ALDAXRB)
p := s.Prog(ld)
p.From.Type = obj.TYPE_MEM
p.From.Reg = r0
p.To.Type = obj.TYPE_REG
@ -706,7 +782,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p1.From.Reg = r1
p1.To.Type = obj.TYPE_REG
p1.To.Reg = out
p2 := s.Prog(arm64.ASTLXRB)
p2 := s.Prog(st)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = out
p2.To.Type = obj.TYPE_MEM
@ -717,6 +793,63 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3.From.Reg = arm64.REGTMP
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
case ssa.OpARM64LoweredAtomicAnd8Variant,
ssa.OpARM64LoweredAtomicAnd32Variant:
atomic_clear := arm64.ALDCLRALW
if v.Op == ssa.OpARM64LoweredAtomicAnd8Variant {
atomic_clear = arm64.ALDCLRALB
}
r0 := v.Args[0].Reg()
r1 := v.Args[1].Reg()
out := v.Reg0()
// MNV Rarg1 Rtemp
p := s.Prog(arm64.AMVN)
p.From.Type = obj.TYPE_REG
p.From.Reg = r1
p.To.Type = obj.TYPE_REG
p.To.Reg = arm64.REGTMP
// LDCLRALW Rtemp, (Rarg0), Rout
p1 := s.Prog(atomic_clear)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = arm64.REGTMP
p1.To.Type = obj.TYPE_MEM
p1.To.Reg = r0
p1.RegTo2 = out
// AND Rarg1, Rout
p2 := s.Prog(arm64.AAND)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = r1
p2.To.Type = obj.TYPE_REG
p2.To.Reg = out
case ssa.OpARM64LoweredAtomicOr8Variant,
ssa.OpARM64LoweredAtomicOr32Variant:
atomic_or := arm64.ALDORALW
if v.Op == ssa.OpARM64LoweredAtomicOr8Variant {
atomic_or = arm64.ALDORALB
}
r0 := v.Args[0].Reg()
r1 := v.Args[1].Reg()
out := v.Reg0()
// LDORALW Rarg1, (Rarg0), Rout
p := s.Prog(atomic_or)
p.From.Type = obj.TYPE_REG
p.From.Reg = r1
p.To.Type = obj.TYPE_MEM
p.To.Reg = r0
p.RegTo2 = out
// ORR Rarg1, Rout
p2 := s.Prog(arm64.AORR)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = r1
p2.To.Type = obj.TYPE_REG
p2.To.Reg = out
case ssa.OpARM64MOVBreg,
ssa.OpARM64MOVBUreg,
ssa.OpARM64MOVHreg,

View file

@ -282,7 +282,7 @@ func genhash(t *types.Type) *obj.LSym {
}
sym := typesymprefix(".hash", t)
if Debug['r'] != 0 {
if Debug.r != 0 {
fmt.Printf("genhash %v %v %v\n", closure, sym, t)
}
@ -374,7 +374,7 @@ func genhash(t *types.Type) *obj.LSym {
r.List.Append(nh)
fn.Nbody.Append(r)
if Debug['r'] != 0 {
if Debug.r != 0 {
dumplist("genhash body", fn.Nbody)
}
@ -509,7 +509,7 @@ func geneq(t *types.Type) *obj.LSym {
return closure
}
sym := typesymprefix(".eq", t)
if Debug['r'] != 0 {
if Debug.r != 0 {
fmt.Printf("geneq %v\n", t)
}
@ -529,6 +529,10 @@ func geneq(t *types.Type) *obj.LSym {
fn := dclfunc(sym, tfn)
np := asNode(tfn.Type.Params().Field(0).Nname)
nq := asNode(tfn.Type.Params().Field(1).Nname)
nr := asNode(tfn.Type.Results().Field(0).Nname)
// Label to jump to if an equality test fails.
neq := autolabel(".neq")
// We reach here only for types that have equality but
// cannot be handled by the standard algorithms,
@ -555,13 +559,13 @@ func geneq(t *types.Type) *obj.LSym {
// for i := 0; i < nelem; i++ {
// if eq(p[i], q[i]) {
// } else {
// return
// goto neq
// }
// }
//
// TODO(josharian): consider doing some loop unrolling
// for larger nelem as well, processing a few elements at a time in a loop.
checkAll := func(unroll int64, eq func(pi, qi *Node) *Node) {
checkAll := func(unroll int64, last bool, eq func(pi, qi *Node) *Node) {
// checkIdx generates a node to check for equality at index i.
checkIdx := func(i *Node) *Node {
// pi := p[i]
@ -576,23 +580,21 @@ func geneq(t *types.Type) *obj.LSym {
}
if nelem <= unroll {
if last {
// Do last comparison in a different manner.
nelem--
}
// Generate a series of checks.
var cond *Node
for i := int64(0); i < nelem; i++ {
c := nodintconst(i)
check := checkIdx(c)
if cond == nil {
cond = check
continue
}
cond = nod(OANDAND, cond, check)
}
nif := nod(OIF, cond, nil)
nif.Rlist.Append(nod(ORETURN, nil, nil))
// if check {} else { goto neq }
nif := nod(OIF, checkIdx(nodintconst(i)), nil)
nif.Rlist.Append(nodSym(OGOTO, nil, neq))
fn.Nbody.Append(nif)
return
}
if last {
fn.Nbody.Append(nod(OAS, nr, checkIdx(nodintconst(nelem))))
}
} else {
// Generate a for loop.
// for i := 0; i < nelem; i++
i := temp(types.Types[TINT])
@ -601,12 +603,15 @@ func geneq(t *types.Type) *obj.LSym {
post := nod(OAS, i, nod(OADD, i, nodintconst(1)))
loop := nod(OFOR, cond, post)
loop.Ninit.Append(init)
// if eq(pi, qi) {} else { return }
check := checkIdx(i)
nif := nod(OIF, check, nil)
nif.Rlist.Append(nod(ORETURN, nil, nil))
// if eq(pi, qi) {} else { goto neq }
nif := nod(OIF, checkIdx(i), nil)
nif.Rlist.Append(nodSym(OGOTO, nil, neq))
loop.Nbody.Append(nif)
fn.Nbody.Append(loop)
if last {
fn.Nbody.Append(nod(OAS, nr, nodbool(true)))
}
}
}
switch t.Elem().Etype {
@ -614,32 +619,28 @@ func geneq(t *types.Type) *obj.LSym {
// Do two loops. First, check that all the lengths match (cheap).
// Second, check that all the contents match (expensive).
// TODO: when the array size is small, unroll the length match checks.
checkAll(3, func(pi, qi *Node) *Node {
checkAll(3, false, func(pi, qi *Node) *Node {
// Compare lengths.
eqlen, _ := eqstring(pi, qi)
return eqlen
})
checkAll(1, func(pi, qi *Node) *Node {
checkAll(1, true, func(pi, qi *Node) *Node {
// Compare contents.
_, eqmem := eqstring(pi, qi)
return eqmem
})
case TFLOAT32, TFLOAT64:
checkAll(2, func(pi, qi *Node) *Node {
checkAll(2, true, func(pi, qi *Node) *Node {
// p[i] == q[i]
return nod(OEQ, pi, qi)
})
// TODO: pick apart structs, do them piecemeal too
default:
checkAll(1, func(pi, qi *Node) *Node {
checkAll(1, true, func(pi, qi *Node) *Node {
// p[i] == q[i]
return nod(OEQ, pi, qi)
})
}
// return true
ret := nod(ORETURN, nil, nil)
ret.List.Append(nodbool(true))
fn.Nbody.Append(ret)
case TSTRUCT:
// Build a list of conditions to satisfy.
@ -717,22 +718,42 @@ func geneq(t *types.Type) *obj.LSym {
flatConds = append(flatConds, c...)
}
var cond *Node
if len(flatConds) == 0 {
cond = nodbool(true)
fn.Nbody.Append(nod(OAS, nr, nodbool(true)))
} else {
cond = flatConds[0]
for _, c := range flatConds[1:] {
cond = nod(OANDAND, cond, c)
for _, c := range flatConds[:len(flatConds)-1] {
// if cond {} else { goto neq }
n := nod(OIF, c, nil)
n.Rlist.Append(nodSym(OGOTO, nil, neq))
fn.Nbody.Append(n)
}
fn.Nbody.Append(nod(OAS, nr, flatConds[len(flatConds)-1]))
}
}
ret := nod(ORETURN, nil, nil)
ret.List.Append(cond)
fn.Nbody.Append(ret)
}
// ret:
// return
ret := autolabel(".ret")
fn.Nbody.Append(nodSym(OLABEL, nil, ret))
fn.Nbody.Append(nod(ORETURN, nil, nil))
if Debug['r'] != 0 {
// neq:
// r = false
// return (or goto ret)
fn.Nbody.Append(nodSym(OLABEL, nil, neq))
fn.Nbody.Append(nod(OAS, nr, nodbool(false)))
if EqCanPanic(t) || hasCall(fn) {
// Epilogue is large, so share it with the equal case.
fn.Nbody.Append(nodSym(OGOTO, nil, ret))
} else {
// Epilogue is small, so don't bother sharing.
fn.Nbody.Append(nod(ORETURN, nil, nil))
}
// TODO(khr): the epilogue size detection condition above isn't perfect.
// We should really do a generic CL that shares epilogues across
// the board. See #24936.
if Debug.r != 0 {
dumplist("geneq body", fn.Nbody)
}
@ -762,6 +783,39 @@ func geneq(t *types.Type) *obj.LSym {
return closure
}
func hasCall(n *Node) bool {
if n.Op == OCALL || n.Op == OCALLFUNC {
return true
}
if n.Left != nil && hasCall(n.Left) {
return true
}
if n.Right != nil && hasCall(n.Right) {
return true
}
for _, x := range n.Ninit.Slice() {
if hasCall(x) {
return true
}
}
for _, x := range n.Nbody.Slice() {
if hasCall(x) {
return true
}
}
for _, x := range n.List.Slice() {
if hasCall(x) {
return true
}
}
for _, x := range n.Rlist.Slice() {
if hasCall(x) {
return true
}
}
return false
}
// eqfield returns the node
// p.field == q.field
func eqfield(p *Node, q *Node, field *types.Sym) *Node {

View file

@ -86,7 +86,7 @@ func expandiface(t *types.Type) {
sort.Sort(methcmp(methods))
if int64(len(methods)) >= thearch.MAXWIDTH/int64(Widthptr) {
yyerror("interface too large")
yyerrorl(typePos(t), "interface too large")
}
for i, m := range methods {
m.Offset = int64(i) * int64(Widthptr)
@ -150,7 +150,7 @@ func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 {
maxwidth = 1<<31 - 1
}
if o >= maxwidth {
yyerror("type %L too large", errtype)
yyerrorl(typePos(errtype), "type %L too large", errtype)
o = 8 // small but nonzero
}
}
@ -199,7 +199,7 @@ func findTypeLoop(t *types.Type, path *[]*types.Type) bool {
}
*path = append(*path, t)
if findTypeLoop(asNode(t.Nod).Name.Param.Ntype.Type, path) {
if p := asNode(t.Nod).Name.Param; p != nil && findTypeLoop(p.Ntype.Type, path) {
return true
}
*path = (*path)[:len(*path)-1]
@ -381,7 +381,7 @@ func dowidth(t *types.Type) {
t1 := t.ChanArgs()
dowidth(t1) // just in case
if t1.Elem().Width >= 1<<16 {
yyerror("channel element type too large (>64kB)")
yyerrorl(typePos(t1), "channel element type too large (>64kB)")
}
w = 1 // anything will do
@ -414,7 +414,7 @@ func dowidth(t *types.Type) {
if t.Elem().Width != 0 {
cap := (uint64(thearch.MAXWIDTH) - 1) / uint64(t.Elem().Width)
if uint64(t.NumElem()) > cap {
yyerror("type %L larger than address space", t)
yyerrorl(typePos(t), "type %L larger than address space", t)
}
}
w = t.NumElem() * t.Elem().Width
@ -456,7 +456,7 @@ func dowidth(t *types.Type) {
}
if Widthptr == 4 && w != int64(int32(w)) {
yyerror("type %v too large", t)
yyerrorl(typePos(t), "type %v too large", t)
}
t.Width = w

View file

@ -44,6 +44,7 @@ var runtimeDecls = [...]struct {
{"printcomplex", funcTag, 27},
{"printstring", funcTag, 29},
{"printpointer", funcTag, 30},
{"printuintptr", funcTag, 31},
{"printiface", funcTag, 30},
{"printeface", funcTag, 30},
{"printslice", funcTag, 30},
@ -51,134 +52,134 @@ var runtimeDecls = [...]struct {
{"printsp", funcTag, 9},
{"printlock", funcTag, 9},
{"printunlock", funcTag, 9},
{"concatstring2", funcTag, 33},
{"concatstring3", funcTag, 34},
{"concatstring4", funcTag, 35},
{"concatstring5", funcTag, 36},
{"concatstrings", funcTag, 38},
{"cmpstring", funcTag, 39},
{"intstring", funcTag, 42},
{"slicebytetostring", funcTag, 43},
{"slicebytetostringtmp", funcTag, 44},
{"slicerunetostring", funcTag, 47},
{"stringtoslicebyte", funcTag, 49},
{"stringtoslicerune", funcTag, 52},
{"slicecopy", funcTag, 53},
{"decoderune", funcTag, 54},
{"countrunes", funcTag, 55},
{"convI2I", funcTag, 56},
{"convT16", funcTag, 57},
{"convT32", funcTag, 57},
{"convT64", funcTag, 57},
{"convTstring", funcTag, 57},
{"convTslice", funcTag, 57},
{"convT2E", funcTag, 58},
{"convT2Enoptr", funcTag, 58},
{"convT2I", funcTag, 58},
{"convT2Inoptr", funcTag, 58},
{"assertE2I", funcTag, 56},
{"assertE2I2", funcTag, 59},
{"assertI2I", funcTag, 56},
{"assertI2I2", funcTag, 59},
{"panicdottypeE", funcTag, 60},
{"panicdottypeI", funcTag, 60},
{"panicnildottype", funcTag, 61},
{"ifaceeq", funcTag, 63},
{"efaceeq", funcTag, 63},
{"fastrand", funcTag, 65},
{"makemap64", funcTag, 67},
{"makemap", funcTag, 68},
{"makemap_small", funcTag, 69},
{"mapaccess1", funcTag, 70},
{"mapaccess1_fast32", funcTag, 71},
{"mapaccess1_fast64", funcTag, 71},
{"mapaccess1_faststr", funcTag, 71},
{"mapaccess1_fat", funcTag, 72},
{"mapaccess2", funcTag, 73},
{"mapaccess2_fast32", funcTag, 74},
{"mapaccess2_fast64", funcTag, 74},
{"mapaccess2_faststr", funcTag, 74},
{"mapaccess2_fat", funcTag, 75},
{"mapassign", funcTag, 70},
{"mapassign_fast32", funcTag, 71},
{"mapassign_fast32ptr", funcTag, 71},
{"mapassign_fast64", funcTag, 71},
{"mapassign_fast64ptr", funcTag, 71},
{"mapassign_faststr", funcTag, 71},
{"mapiterinit", funcTag, 76},
{"mapdelete", funcTag, 76},
{"mapdelete_fast32", funcTag, 77},
{"mapdelete_fast64", funcTag, 77},
{"mapdelete_faststr", funcTag, 77},
{"mapiternext", funcTag, 78},
{"mapclear", funcTag, 79},
{"makechan64", funcTag, 81},
{"makechan", funcTag, 82},
{"chanrecv1", funcTag, 84},
{"chanrecv2", funcTag, 85},
{"chansend1", funcTag, 87},
{"concatstring2", funcTag, 34},
{"concatstring3", funcTag, 35},
{"concatstring4", funcTag, 36},
{"concatstring5", funcTag, 37},
{"concatstrings", funcTag, 39},
{"cmpstring", funcTag, 40},
{"intstring", funcTag, 43},
{"slicebytetostring", funcTag, 44},
{"slicebytetostringtmp", funcTag, 45},
{"slicerunetostring", funcTag, 48},
{"stringtoslicebyte", funcTag, 50},
{"stringtoslicerune", funcTag, 53},
{"slicecopy", funcTag, 54},
{"decoderune", funcTag, 55},
{"countrunes", funcTag, 56},
{"convI2I", funcTag, 57},
{"convT16", funcTag, 58},
{"convT32", funcTag, 58},
{"convT64", funcTag, 58},
{"convTstring", funcTag, 58},
{"convTslice", funcTag, 58},
{"convT2E", funcTag, 59},
{"convT2Enoptr", funcTag, 59},
{"convT2I", funcTag, 59},
{"convT2Inoptr", funcTag, 59},
{"assertE2I", funcTag, 57},
{"assertE2I2", funcTag, 60},
{"assertI2I", funcTag, 57},
{"assertI2I2", funcTag, 60},
{"panicdottypeE", funcTag, 61},
{"panicdottypeI", funcTag, 61},
{"panicnildottype", funcTag, 62},
{"ifaceeq", funcTag, 64},
{"efaceeq", funcTag, 64},
{"fastrand", funcTag, 66},
{"makemap64", funcTag, 68},
{"makemap", funcTag, 69},
{"makemap_small", funcTag, 70},
{"mapaccess1", funcTag, 71},
{"mapaccess1_fast32", funcTag, 72},
{"mapaccess1_fast64", funcTag, 72},
{"mapaccess1_faststr", funcTag, 72},
{"mapaccess1_fat", funcTag, 73},
{"mapaccess2", funcTag, 74},
{"mapaccess2_fast32", funcTag, 75},
{"mapaccess2_fast64", funcTag, 75},
{"mapaccess2_faststr", funcTag, 75},
{"mapaccess2_fat", funcTag, 76},
{"mapassign", funcTag, 71},
{"mapassign_fast32", funcTag, 72},
{"mapassign_fast32ptr", funcTag, 72},
{"mapassign_fast64", funcTag, 72},
{"mapassign_fast64ptr", funcTag, 72},
{"mapassign_faststr", funcTag, 72},
{"mapiterinit", funcTag, 77},
{"mapdelete", funcTag, 77},
{"mapdelete_fast32", funcTag, 78},
{"mapdelete_fast64", funcTag, 78},
{"mapdelete_faststr", funcTag, 78},
{"mapiternext", funcTag, 79},
{"mapclear", funcTag, 80},
{"makechan64", funcTag, 82},
{"makechan", funcTag, 83},
{"chanrecv1", funcTag, 85},
{"chanrecv2", funcTag, 86},
{"chansend1", funcTag, 88},
{"closechan", funcTag, 30},
{"writeBarrier", varTag, 89},
{"typedmemmove", funcTag, 90},
{"typedmemclr", funcTag, 91},
{"typedslicecopy", funcTag, 92},
{"selectnbsend", funcTag, 93},
{"selectnbrecv", funcTag, 94},
{"selectnbrecv2", funcTag, 96},
{"selectsetpc", funcTag, 97},
{"selectgo", funcTag, 98},
{"writeBarrier", varTag, 90},
{"typedmemmove", funcTag, 91},
{"typedmemclr", funcTag, 92},
{"typedslicecopy", funcTag, 93},
{"selectnbsend", funcTag, 94},
{"selectnbrecv", funcTag, 95},
{"selectnbrecv2", funcTag, 97},
{"selectsetpc", funcTag, 98},
{"selectgo", funcTag, 99},
{"block", funcTag, 9},
{"makeslice", funcTag, 99},
{"makeslice64", funcTag, 100},
{"makeslicecopy", funcTag, 101},
{"growslice", funcTag, 103},
{"memmove", funcTag, 104},
{"memclrNoHeapPointers", funcTag, 105},
{"memclrHasPointers", funcTag, 105},
{"memequal", funcTag, 106},
{"memequal0", funcTag, 107},
{"memequal8", funcTag, 107},
{"memequal16", funcTag, 107},
{"memequal32", funcTag, 107},
{"memequal64", funcTag, 107},
{"memequal128", funcTag, 107},
{"f32equal", funcTag, 108},
{"f64equal", funcTag, 108},
{"c64equal", funcTag, 108},
{"c128equal", funcTag, 108},
{"strequal", funcTag, 108},
{"interequal", funcTag, 108},
{"nilinterequal", funcTag, 108},
{"memhash", funcTag, 109},
{"memhash0", funcTag, 110},
{"memhash8", funcTag, 110},
{"memhash16", funcTag, 110},
{"memhash32", funcTag, 110},
{"memhash64", funcTag, 110},
{"memhash128", funcTag, 110},
{"f32hash", funcTag, 110},
{"f64hash", funcTag, 110},
{"c64hash", funcTag, 110},
{"c128hash", funcTag, 110},
{"strhash", funcTag, 110},
{"interhash", funcTag, 110},
{"nilinterhash", funcTag, 110},
{"int64div", funcTag, 111},
{"uint64div", funcTag, 112},
{"int64mod", funcTag, 111},
{"uint64mod", funcTag, 112},
{"float64toint64", funcTag, 113},
{"float64touint64", funcTag, 114},
{"float64touint32", funcTag, 115},
{"int64tofloat64", funcTag, 116},
{"uint64tofloat64", funcTag, 117},
{"uint32tofloat64", funcTag, 118},
{"complex128div", funcTag, 119},
{"racefuncenter", funcTag, 120},
{"makeslice", funcTag, 100},
{"makeslice64", funcTag, 101},
{"makeslicecopy", funcTag, 102},
{"growslice", funcTag, 104},
{"memmove", funcTag, 105},
{"memclrNoHeapPointers", funcTag, 106},
{"memclrHasPointers", funcTag, 106},
{"memequal", funcTag, 107},
{"memequal0", funcTag, 108},
{"memequal8", funcTag, 108},
{"memequal16", funcTag, 108},
{"memequal32", funcTag, 108},
{"memequal64", funcTag, 108},
{"memequal128", funcTag, 108},
{"f32equal", funcTag, 109},
{"f64equal", funcTag, 109},
{"c64equal", funcTag, 109},
{"c128equal", funcTag, 109},
{"strequal", funcTag, 109},
{"interequal", funcTag, 109},
{"nilinterequal", funcTag, 109},
{"memhash", funcTag, 110},
{"memhash0", funcTag, 111},
{"memhash8", funcTag, 111},
{"memhash16", funcTag, 111},
{"memhash32", funcTag, 111},
{"memhash64", funcTag, 111},
{"memhash128", funcTag, 111},
{"f32hash", funcTag, 111},
{"f64hash", funcTag, 111},
{"c64hash", funcTag, 111},
{"c128hash", funcTag, 111},
{"strhash", funcTag, 111},
{"interhash", funcTag, 111},
{"nilinterhash", funcTag, 111},
{"int64div", funcTag, 112},
{"uint64div", funcTag, 113},
{"int64mod", funcTag, 112},
{"uint64mod", funcTag, 113},
{"float64toint64", funcTag, 114},
{"float64touint64", funcTag, 115},
{"float64touint32", funcTag, 116},
{"int64tofloat64", funcTag, 117},
{"uint64tofloat64", funcTag, 118},
{"uint32tofloat64", funcTag, 119},
{"complex128div", funcTag, 120},
{"racefuncenter", funcTag, 31},
{"racefuncenterfp", funcTag, 9},
{"racefuncexit", funcTag, 9},
{"raceread", funcTag, 120},
{"racewrite", funcTag, 120},
{"raceread", funcTag, 31},
{"racewrite", funcTag, 31},
{"racereadrange", funcTag, 121},
{"racewriterange", funcTag, 121},
{"msanread", funcTag, 121},
@ -233,96 +234,96 @@ func runtimeTypes() []*types.Type {
typs[28] = types.Types[TSTRING]
typs[29] = functype(nil, []*Node{anonfield(typs[28])}, nil)
typs[30] = functype(nil, []*Node{anonfield(typs[2])}, nil)
typs[31] = types.NewArray(typs[0], 32)
typs[32] = types.NewPtr(typs[31])
typs[33] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
typs[34] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
typs[35] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
typs[36] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
typs[37] = types.NewSlice(typs[28])
typs[38] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[37])}, []*Node{anonfield(typs[28])})
typs[39] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[15])})
typs[40] = types.NewArray(typs[0], 4)
typs[41] = types.NewPtr(typs[40])
typs[42] = functype(nil, []*Node{anonfield(typs[41]), anonfield(typs[22])}, []*Node{anonfield(typs[28])})
typs[43] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
typs[44] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
typs[45] = types.Runetype
typs[46] = types.NewSlice(typs[45])
typs[47] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[46])}, []*Node{anonfield(typs[28])})
typs[48] = types.NewSlice(typs[0])
typs[49] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[28])}, []*Node{anonfield(typs[48])})
typs[50] = types.NewArray(typs[45], 32)
typs[51] = types.NewPtr(typs[50])
typs[52] = functype(nil, []*Node{anonfield(typs[51]), anonfield(typs[28])}, []*Node{anonfield(typs[46])})
typs[53] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []*Node{anonfield(typs[15])})
typs[54] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[15])}, []*Node{anonfield(typs[45]), anonfield(typs[15])})
typs[55] = functype(nil, []*Node{anonfield(typs[28])}, []*Node{anonfield(typs[15])})
typs[56] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2])})
typs[57] = functype(nil, []*Node{anonfield(typs[2])}, []*Node{anonfield(typs[7])})
typs[58] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, []*Node{anonfield(typs[2])})
typs[59] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2]), anonfield(typs[6])})
typs[60] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
typs[61] = functype(nil, []*Node{anonfield(typs[1])}, nil)
typs[62] = types.NewPtr(typs[5])
typs[63] = functype(nil, []*Node{anonfield(typs[62]), anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
typs[64] = types.Types[TUINT32]
typs[65] = functype(nil, nil, []*Node{anonfield(typs[64])})
typs[66] = types.NewMap(typs[2], typs[2])
typs[67] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []*Node{anonfield(typs[66])})
typs[68] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []*Node{anonfield(typs[66])})
typs[69] = functype(nil, nil, []*Node{anonfield(typs[66])})
typs[70] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[3])}, []*Node{anonfield(typs[3])})
typs[71] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[2])}, []*Node{anonfield(typs[3])})
typs[72] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3])})
typs[73] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[3])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
typs[74] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[2])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
typs[75] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
typs[76] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[3])}, nil)
typs[77] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[2])}, nil)
typs[78] = functype(nil, []*Node{anonfield(typs[3])}, nil)
typs[79] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66])}, nil)
typs[80] = types.NewChan(typs[2], types.Cboth)
typs[81] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22])}, []*Node{anonfield(typs[80])})
typs[82] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[80])})
typs[83] = types.NewChan(typs[2], types.Crecv)
typs[84] = functype(nil, []*Node{anonfield(typs[83]), anonfield(typs[3])}, nil)
typs[85] = functype(nil, []*Node{anonfield(typs[83]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
typs[86] = types.NewChan(typs[2], types.Csend)
typs[87] = functype(nil, []*Node{anonfield(typs[86]), anonfield(typs[3])}, nil)
typs[88] = types.NewArray(typs[0], 3)
typs[89] = tostruct([]*Node{namedfield("enabled", typs[6]), namedfield("pad", typs[88]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])})
typs[90] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
typs[91] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, nil)
typs[92] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []*Node{anonfield(typs[15])})
typs[93] = functype(nil, []*Node{anonfield(typs[86]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
typs[94] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[83])}, []*Node{anonfield(typs[6])})
typs[95] = types.NewPtr(typs[6])
typs[96] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[95]), anonfield(typs[83])}, []*Node{anonfield(typs[6])})
typs[97] = functype(nil, []*Node{anonfield(typs[62])}, nil)
typs[98] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[62]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []*Node{anonfield(typs[15]), anonfield(typs[6])})
typs[99] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[7])})
typs[100] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[7])})
typs[101] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []*Node{anonfield(typs[7])})
typs[102] = types.NewSlice(typs[2])
typs[103] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[102]), anonfield(typs[15])}, []*Node{anonfield(typs[102])})
typs[104] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil)
typs[105] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, nil)
typs[106] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []*Node{anonfield(typs[6])})
typs[107] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
typs[108] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
typs[109] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
typs[110] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
typs[111] = functype(nil, []*Node{anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[22])})
typs[112] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, []*Node{anonfield(typs[24])})
typs[113] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[22])})
typs[114] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[24])})
typs[115] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[64])})
typs[116] = functype(nil, []*Node{anonfield(typs[22])}, []*Node{anonfield(typs[20])})
typs[117] = functype(nil, []*Node{anonfield(typs[24])}, []*Node{anonfield(typs[20])})
typs[118] = functype(nil, []*Node{anonfield(typs[64])}, []*Node{anonfield(typs[20])})
typs[119] = functype(nil, []*Node{anonfield(typs[26]), anonfield(typs[26])}, []*Node{anonfield(typs[26])})
typs[120] = functype(nil, []*Node{anonfield(typs[5])}, nil)
typs[31] = functype(nil, []*Node{anonfield(typs[5])}, nil)
typs[32] = types.NewArray(typs[0], 32)
typs[33] = types.NewPtr(typs[32])
typs[34] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
typs[35] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
typs[36] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
typs[37] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
typs[38] = types.NewSlice(typs[28])
typs[39] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[38])}, []*Node{anonfield(typs[28])})
typs[40] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[15])})
typs[41] = types.NewArray(typs[0], 4)
typs[42] = types.NewPtr(typs[41])
typs[43] = functype(nil, []*Node{anonfield(typs[42]), anonfield(typs[22])}, []*Node{anonfield(typs[28])})
typs[44] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
typs[45] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
typs[46] = types.Runetype
typs[47] = types.NewSlice(typs[46])
typs[48] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[47])}, []*Node{anonfield(typs[28])})
typs[49] = types.NewSlice(typs[0])
typs[50] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28])}, []*Node{anonfield(typs[49])})
typs[51] = types.NewArray(typs[46], 32)
typs[52] = types.NewPtr(typs[51])
typs[53] = functype(nil, []*Node{anonfield(typs[52]), anonfield(typs[28])}, []*Node{anonfield(typs[47])})
typs[54] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []*Node{anonfield(typs[15])})
typs[55] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[15])}, []*Node{anonfield(typs[46]), anonfield(typs[15])})
typs[56] = functype(nil, []*Node{anonfield(typs[28])}, []*Node{anonfield(typs[15])})
typs[57] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2])})
typs[58] = functype(nil, []*Node{anonfield(typs[2])}, []*Node{anonfield(typs[7])})
typs[59] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, []*Node{anonfield(typs[2])})
typs[60] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2]), anonfield(typs[6])})
typs[61] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
typs[62] = functype(nil, []*Node{anonfield(typs[1])}, nil)
typs[63] = types.NewPtr(typs[5])
typs[64] = functype(nil, []*Node{anonfield(typs[63]), anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
typs[65] = types.Types[TUINT32]
typs[66] = functype(nil, nil, []*Node{anonfield(typs[65])})
typs[67] = types.NewMap(typs[2], typs[2])
typs[68] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []*Node{anonfield(typs[67])})
typs[69] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []*Node{anonfield(typs[67])})
typs[70] = functype(nil, nil, []*Node{anonfield(typs[67])})
typs[71] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*Node{anonfield(typs[3])})
typs[72] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*Node{anonfield(typs[3])})
typs[73] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3])})
typs[74] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
typs[75] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
typs[76] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
typs[77] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, nil)
typs[78] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, nil)
typs[79] = functype(nil, []*Node{anonfield(typs[3])}, nil)
typs[80] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67])}, nil)
typs[81] = types.NewChan(typs[2], types.Cboth)
typs[82] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22])}, []*Node{anonfield(typs[81])})
typs[83] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[81])})
typs[84] = types.NewChan(typs[2], types.Crecv)
typs[85] = functype(nil, []*Node{anonfield(typs[84]), anonfield(typs[3])}, nil)
typs[86] = functype(nil, []*Node{anonfield(typs[84]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
typs[87] = types.NewChan(typs[2], types.Csend)
typs[88] = functype(nil, []*Node{anonfield(typs[87]), anonfield(typs[3])}, nil)
typs[89] = types.NewArray(typs[0], 3)
typs[90] = tostruct([]*Node{namedfield("enabled", typs[6]), namedfield("pad", typs[89]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])})
typs[91] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
typs[92] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, nil)
typs[93] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []*Node{anonfield(typs[15])})
typs[94] = functype(nil, []*Node{anonfield(typs[87]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
typs[95] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[84])}, []*Node{anonfield(typs[6])})
typs[96] = types.NewPtr(typs[6])
typs[97] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[96]), anonfield(typs[84])}, []*Node{anonfield(typs[6])})
typs[98] = functype(nil, []*Node{anonfield(typs[63])}, nil)
typs[99] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[63]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []*Node{anonfield(typs[15]), anonfield(typs[6])})
typs[100] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[7])})
typs[101] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[7])})
typs[102] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []*Node{anonfield(typs[7])})
typs[103] = types.NewSlice(typs[2])
typs[104] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[103]), anonfield(typs[15])}, []*Node{anonfield(typs[103])})
typs[105] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil)
typs[106] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, nil)
typs[107] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []*Node{anonfield(typs[6])})
typs[108] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
typs[109] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
typs[110] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
typs[111] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
typs[112] = functype(nil, []*Node{anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[22])})
typs[113] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, []*Node{anonfield(typs[24])})
typs[114] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[22])})
typs[115] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[24])})
typs[116] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[65])})
typs[117] = functype(nil, []*Node{anonfield(typs[22])}, []*Node{anonfield(typs[20])})
typs[118] = functype(nil, []*Node{anonfield(typs[24])}, []*Node{anonfield(typs[20])})
typs[119] = functype(nil, []*Node{anonfield(typs[65])}, []*Node{anonfield(typs[20])})
typs[120] = functype(nil, []*Node{anonfield(typs[26]), anonfield(typs[26])}, []*Node{anonfield(typs[26])})
typs[121] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[5])}, nil)
typs[122] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[1]), anonfield(typs[5])}, nil)
typs[123] = types.NewSlice(typs[7])
@ -331,7 +332,7 @@ func runtimeTypes() []*types.Type {
typs[126] = functype(nil, []*Node{anonfield(typs[125]), anonfield(typs[125])}, nil)
typs[127] = types.Types[TUINT16]
typs[128] = functype(nil, []*Node{anonfield(typs[127]), anonfield(typs[127])}, nil)
typs[129] = functype(nil, []*Node{anonfield(typs[64]), anonfield(typs[64])}, nil)
typs[129] = functype(nil, []*Node{anonfield(typs[65]), anonfield(typs[65])}, nil)
typs[130] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, nil)
return typs[:]
}

View file

@ -54,6 +54,7 @@ func printuint(uint64)
func printcomplex(complex128)
func printstring(string)
func printpointer(any)
func printuintptr(uintptr)
func printiface(any)
func printeface(any)
func printslice(any)

View file

@ -198,7 +198,7 @@ func capturevars(xfunc *Node) {
outer = nod(OADDR, outer, nil)
}
if Debug['m'] > 1 {
if Debug.m > 1 {
var name *types.Sym
if v.Name.Curfn != nil && v.Name.Curfn.Func.Nname != nil {
name = v.Name.Curfn.Func.Nname.Sym
@ -434,6 +434,8 @@ func typecheckpartialcall(fn *Node, sym *types.Sym) {
fn.Type = xfunc.Type
}
// makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed
// for partial calls.
func makepartialcall(fn *Node, t0 *types.Type, meth *types.Sym) *Node {
rcvrtype := fn.Left.Type
sym := methodSymSuffix(rcvrtype, meth, "-fm")
@ -500,6 +502,10 @@ func makepartialcall(fn *Node, t0 *types.Type, meth *types.Sym) *Node {
funcbody()
xfunc = typecheck(xfunc, ctxStmt)
// Need to typecheck the body of the just-generated wrapper.
// typecheckslice() requires that Curfn is set when processing an ORETURN.
Curfn = xfunc
typecheckslice(xfunc.Nbody.Slice(), ctxStmt)
sym.Def = asTypesNode(xfunc)
xtop = append(xtop, xfunc)
Curfn = savecurfn

View file

@ -283,7 +283,7 @@ func oldname(s *types.Sym) *Node {
c.Name.Defn = n
// Link into list of active closure variables.
// Popped from list in func closurebody.
// Popped from list in func funcLit.
c.Name.Param.Outer = n.Name.Param.Innermost
n.Name.Param.Innermost = c

View file

@ -0,0 +1,273 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/syntax"
"cmd/compile/internal/types"
"cmd/internal/obj"
"encoding/json"
"io/ioutil"
"log"
"path"
"sort"
"strconv"
"strings"
)
var embedlist []*Node
var embedCfg struct {
Patterns map[string][]string
Files map[string]string
}
func readEmbedCfg(file string) {
data, err := ioutil.ReadFile(file)
if err != nil {
log.Fatalf("-embedcfg: %v", err)
}
if err := json.Unmarshal(data, &embedCfg); err != nil {
log.Fatalf("%s: %v", file, err)
}
if embedCfg.Patterns == nil {
log.Fatalf("%s: invalid embedcfg: missing Patterns", file)
}
if embedCfg.Files == nil {
log.Fatalf("%s: invalid embedcfg: missing Files", file)
}
}
const (
embedUnknown = iota
embedBytes
embedString
embedFiles
)
var numLocalEmbed int
func varEmbed(p *noder, names []*Node, typ *Node, exprs []*Node, embeds []PragmaEmbed) (newExprs []*Node) {
haveEmbed := false
for _, decl := range p.file.DeclList {
imp, ok := decl.(*syntax.ImportDecl)
if !ok {
// imports always come first
break
}
path, _ := strconv.Unquote(imp.Path.Value)
if path == "embed" {
haveEmbed = true
break
}
}
pos := embeds[0].Pos
if !haveEmbed {
p.yyerrorpos(pos, "invalid go:embed: missing import \"embed\"")
return exprs
}
if embedCfg.Patterns == nil {
p.yyerrorpos(pos, "invalid go:embed: build system did not supply embed configuration")
return exprs
}
if len(names) > 1 {
p.yyerrorpos(pos, "go:embed cannot apply to multiple vars")
return exprs
}
if len(exprs) > 0 {
p.yyerrorpos(pos, "go:embed cannot apply to var with initializer")
return exprs
}
if typ == nil {
// Should not happen, since len(exprs) == 0 now.
p.yyerrorpos(pos, "go:embed cannot apply to var without type")
return exprs
}
kind := embedKindApprox(typ)
if kind == embedUnknown {
p.yyerrorpos(pos, "go:embed cannot apply to var of type %v", typ)
return exprs
}
// Build list of files to store.
have := make(map[string]bool)
var list []string
for _, e := range embeds {
for _, pattern := range e.Patterns {
files, ok := embedCfg.Patterns[pattern]
if !ok {
p.yyerrorpos(e.Pos, "invalid go:embed: build system did not map pattern: %s", pattern)
}
for _, file := range files {
if embedCfg.Files[file] == "" {
p.yyerrorpos(e.Pos, "invalid go:embed: build system did not map file: %s", file)
continue
}
if !have[file] {
have[file] = true
list = append(list, file)
}
if kind == embedFiles {
for dir := path.Dir(file); dir != "." && !have[dir]; dir = path.Dir(dir) {
have[dir] = true
list = append(list, dir+"/")
}
}
}
}
}
sort.Slice(list, func(i, j int) bool {
return embedFileLess(list[i], list[j])
})
if kind == embedString || kind == embedBytes {
if len(list) > 1 {
p.yyerrorpos(pos, "invalid go:embed: multiple files for type %v", typ)
return exprs
}
}
v := names[0]
if dclcontext != PEXTERN {
numLocalEmbed++
v = newnamel(v.Pos, lookupN("embed.", numLocalEmbed))
v.Sym.Def = asTypesNode(v)
v.Name.Param.Ntype = typ
v.SetClass(PEXTERN)
externdcl = append(externdcl, v)
exprs = []*Node{v}
}
v.Name.Param.SetEmbedFiles(list)
embedlist = append(embedlist, v)
return exprs
}
// embedKindApprox determines the kind of embedding variable, approximately.
// The match is approximate because we haven't done scope resolution yet and
// can't tell whether "string" and "byte" really mean "string" and "byte".
// The result must be confirmed later, after type checking, using embedKind.
func embedKindApprox(typ *Node) int {
if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == localpkg && myimportpath == "embed")) {
return embedFiles
}
// These are not guaranteed to match only string and []byte -
// maybe the local package has redefined one of those words.
// But it's the best we can do now during the noder.
// The stricter check happens later, in initEmbed calling embedKind.
if typ.Sym != nil && typ.Sym.Name == "string" && typ.Sym.Pkg == localpkg {
return embedString
}
if typ.Op == OTARRAY && typ.Left == nil && typ.Right.Sym != nil && typ.Right.Sym.Name == "byte" && typ.Right.Sym.Pkg == localpkg {
return embedBytes
}
return embedUnknown
}
// embedKind determines the kind of embedding variable.
func embedKind(typ *types.Type) int {
if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == localpkg && myimportpath == "embed")) {
return embedFiles
}
if typ == types.Types[TSTRING] {
return embedString
}
if typ.Sym == nil && typ.IsSlice() && typ.Elem() == types.Bytetype {
return embedBytes
}
return embedUnknown
}
func embedFileNameSplit(name string) (dir, elem string, isDir bool) {
if name[len(name)-1] == '/' {
isDir = true
name = name[:len(name)-1]
}
i := len(name) - 1
for i >= 0 && name[i] != '/' {
i--
}
if i < 0 {
return ".", name, isDir
}
return name[:i], name[i+1:], isDir
}
// embedFileLess implements the sort order for a list of embedded files.
// See the comment inside ../../../../embed/embed.go's Files struct for rationale.
func embedFileLess(x, y string) bool {
xdir, xelem, _ := embedFileNameSplit(x)
ydir, yelem, _ := embedFileNameSplit(y)
return xdir < ydir || xdir == ydir && xelem < yelem
}
func dumpembeds() {
for _, v := range embedlist {
initEmbed(v)
}
}
// initEmbed emits the init data for a //go:embed variable,
// which is either a string, a []byte, or an embed.FS.
func initEmbed(v *Node) {
files := v.Name.Param.EmbedFiles()
switch kind := embedKind(v.Type); kind {
case embedUnknown:
yyerrorl(v.Pos, "go:embed cannot apply to var of type %v", v.Type)
case embedString, embedBytes:
file := files[0]
fsym, size, err := fileStringSym(v.Pos, embedCfg.Files[file], kind == embedString, nil)
if err != nil {
yyerrorl(v.Pos, "embed %s: %v", file, err)
}
sym := v.Sym.Linksym()
off := 0
off = dsymptr(sym, off, fsym, 0) // data string
off = duintptr(sym, off, uint64(size)) // len
if kind == embedBytes {
duintptr(sym, off, uint64(size)) // cap for slice
}
case embedFiles:
slicedata := Ctxt.Lookup(`"".` + v.Sym.Name + `.files`)
off := 0
// []files pointed at by Files
off = dsymptr(slicedata, off, slicedata, 3*Widthptr) // []file, pointing just past slice
off = duintptr(slicedata, off, uint64(len(files)))
off = duintptr(slicedata, off, uint64(len(files)))
// embed/embed.go type file is:
// name string
// data string
// hash [16]byte
// Emit one of these per file in the set.
const hashSize = 16
hash := make([]byte, hashSize)
for _, file := range files {
off = dsymptr(slicedata, off, stringsym(v.Pos, file), 0) // file string
off = duintptr(slicedata, off, uint64(len(file)))
if strings.HasSuffix(file, "/") {
// entry for directory - no data
off = duintptr(slicedata, off, 0)
off = duintptr(slicedata, off, 0)
off += hashSize
} else {
fsym, size, err := fileStringSym(v.Pos, embedCfg.Files[file], true, hash)
if err != nil {
yyerrorl(v.Pos, "embed %s: %v", file, err)
}
off = dsymptr(slicedata, off, fsym, 0) // data string
off = duintptr(slicedata, off, uint64(size))
off = int(slicedata.WriteBytes(Ctxt, int64(off), hash))
}
}
ggloblsym(slicedata, int32(off), obj.RODATA|obj.LOCAL)
sym := v.Sym.Linksym()
dsymptr(sym, 0, slicedata, 0)
}
}

View file

@ -282,7 +282,7 @@ func addrescapes(n *Node) {
// moveToHeap records the parameter or local variable n as moved to the heap.
func moveToHeap(n *Node) {
if Debug['r'] != 0 {
if Debug.r != 0 {
Dump("MOVE", n)
}
if compiling_runtime {
@ -359,7 +359,7 @@ func moveToHeap(n *Node) {
n.Xoffset = 0
n.Name.Param.Heapaddr = heapaddr
n.Esc = EscHeap
if Debug['m'] != 0 {
if Debug.m != 0 {
Warnl(n.Pos, "moved to heap: %v", n)
}
}
@ -389,7 +389,7 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string {
// but we are reusing the ability to annotate an individual function
// argument and pass those annotations along to importing code.
if f.Type.IsUintptr() {
if Debug['m'] != 0 {
if Debug.m != 0 {
Warnl(f.Pos, "assuming %v is unsafe uintptr", name())
}
return unsafeUintptrTag
@ -404,11 +404,11 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string {
// External functions are assumed unsafe, unless
// //go:noescape is given before the declaration.
if fn.Func.Pragma&Noescape != 0 {
if Debug['m'] != 0 && f.Sym != nil {
if Debug.m != 0 && f.Sym != nil {
Warnl(f.Pos, "%v does not escape", name())
}
} else {
if Debug['m'] != 0 && f.Sym != nil {
if Debug.m != 0 && f.Sym != nil {
Warnl(f.Pos, "leaking param: %v", name())
}
esc.AddHeap(0)
@ -419,14 +419,14 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string {
if fn.Func.Pragma&UintptrEscapes != 0 {
if f.Type.IsUintptr() {
if Debug['m'] != 0 {
if Debug.m != 0 {
Warnl(f.Pos, "marking %v as escaping uintptr", name())
}
return uintptrEscapesTag
}
if f.IsDDD() && f.Type.Elem().IsUintptr() {
// final argument is ...uintptr.
if Debug['m'] != 0 {
if Debug.m != 0 {
Warnl(f.Pos, "marking %v as escaping ...uintptr", name())
}
return uintptrEscapesTag
@ -448,7 +448,7 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string {
esc := loc.paramEsc
esc.Optimize()
if Debug['m'] != 0 && !loc.escapes {
if Debug.m != 0 && !loc.escapes {
if esc.Empty() {
Warnl(f.Pos, "%v does not escape", name())
}

View file

@ -170,7 +170,7 @@ func (e *Escape) initFunc(fn *Node) {
Fatalf("unexpected node: %v", fn)
}
fn.Esc = EscFuncPlanned
if Debug['m'] > 3 {
if Debug.m > 3 {
Dump("escAnalyze", fn)
}
@ -247,7 +247,7 @@ func (e *Escape) stmt(n *Node) {
lineno = lno
}()
if Debug['m'] > 2 {
if Debug.m > 2 {
fmt.Printf("%v:[%d] %v stmt: %v\n", linestr(lineno), e.loopDepth, funcSym(e.curfn), n)
}
@ -275,11 +275,11 @@ func (e *Escape) stmt(n *Node) {
case OLABEL:
switch asNode(n.Sym.Label) {
case &nonlooping:
if Debug['m'] > 2 {
if Debug.m > 2 {
fmt.Printf("%v:%v non-looping label\n", linestr(lineno), n)
}
case &looping:
if Debug['m'] > 2 {
if Debug.m > 2 {
fmt.Printf("%v: %v looping label\n", linestr(lineno), n)
}
e.loopDepth++
@ -717,7 +717,7 @@ func (e *Escape) addrs(l Nodes) []EscHole {
func (e *Escape) assign(dst, src *Node, why string, where *Node) {
// Filter out some no-op assignments for escape analysis.
ignore := dst != nil && src != nil && isSelfAssign(dst, src)
if ignore && Debug['m'] != 0 {
if ignore && Debug.m != 0 {
Warnl(where.Pos, "%v ignoring self-assignment in %S", funcSym(e.curfn), where)
}
@ -931,7 +931,7 @@ func (k EscHole) note(where *Node, why string) EscHole {
if where == nil || why == "" {
Fatalf("note: missing where/why")
}
if Debug['m'] >= 2 || logopt.Enabled() {
if Debug.m >= 2 || logopt.Enabled() {
k.notes = &EscNote{
next: k.notes,
where: where,
@ -1077,9 +1077,9 @@ func (e *Escape) flow(k EscHole, src *EscLocation) {
return
}
if dst.escapes && k.derefs < 0 { // dst = &src
if Debug['m'] >= 2 || logopt.Enabled() {
if Debug.m >= 2 || logopt.Enabled() {
pos := linestr(src.n.Pos)
if Debug['m'] >= 2 {
if Debug.m >= 2 {
fmt.Printf("%s: %v escapes to heap:\n", pos, src.n)
}
explanation := e.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{})
@ -1179,8 +1179,8 @@ func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLoc
// that value flow for tagging the function
// later.
if l.isName(PPARAM) {
if (logopt.Enabled() || Debug['m'] >= 2) && !l.escapes {
if Debug['m'] >= 2 {
if (logopt.Enabled() || Debug.m >= 2) && !l.escapes {
if Debug.m >= 2 {
fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", linestr(l.n.Pos), l.n, e.explainLoc(root), base)
}
explanation := e.explainPath(root, l)
@ -1196,8 +1196,8 @@ func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLoc
// outlives it, then l needs to be heap
// allocated.
if addressOf && !l.escapes {
if logopt.Enabled() || Debug['m'] >= 2 {
if Debug['m'] >= 2 {
if logopt.Enabled() || Debug.m >= 2 {
if Debug.m >= 2 {
fmt.Printf("%s: %v escapes to heap:\n", linestr(l.n.Pos), l.n)
}
explanation := e.explainPath(root, l)
@ -1235,7 +1235,7 @@ func (e *Escape) explainPath(root, src *EscLocation) []*logopt.LoggedOpt {
for {
// Prevent infinite loop.
if visited[src] {
if Debug['m'] >= 2 {
if Debug.m >= 2 {
fmt.Printf("%s: warning: truncated explanation due to assignment cycle; see golang.org/issue/35518\n", pos)
}
break
@ -1263,7 +1263,7 @@ func (e *Escape) explainFlow(pos string, dst, srcloc *EscLocation, derefs int, n
if derefs >= 0 {
ops = strings.Repeat("*", derefs)
}
print := Debug['m'] >= 2
print := Debug.m >= 2
flow := fmt.Sprintf(" flow: %s = %s%v:", e.explainLoc(dst), ops, e.explainLoc(srcloc))
if print {
@ -1417,7 +1417,7 @@ func (e *Escape) finish(fns []*Node) {
if loc.escapes {
if n.Op != ONAME {
if Debug['m'] != 0 {
if Debug.m != 0 {
Warnl(n.Pos, "%S escapes to heap", n)
}
if logopt.Enabled() {
@ -1427,7 +1427,7 @@ func (e *Escape) finish(fns []*Node) {
n.Esc = EscHeap
addrescapes(n)
} else {
if Debug['m'] != 0 && n.Op != ONAME {
if Debug.m != 0 && n.Op != ONAME {
Warnl(n.Pos, "%S does not escape", n)
}
n.Esc = EscNone

View file

@ -31,7 +31,7 @@ func exportsym(n *Node) {
}
n.Sym.SetOnExportList(true)
if Debug['E'] != 0 {
if Debug.E != 0 {
fmt.Printf("export symbol %v\n", n.Sym)
}
@ -150,7 +150,7 @@ func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val
n.SetVal(val)
if Debug['E'] != 0 {
if Debug.E != 0 {
fmt.Printf("import const %v %L = %v\n", s, t, val)
}
}
@ -166,7 +166,7 @@ func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
n.Func = new(Func)
t.SetNname(asTypesNode(n))
if Debug['E'] != 0 {
if Debug.E != 0 {
fmt.Printf("import func %v%S\n", s, t)
}
}
@ -179,7 +179,7 @@ func importvar(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
return
}
if Debug['E'] != 0 {
if Debug.E != 0 {
fmt.Printf("import var %v %L\n", s, t)
}
}
@ -192,7 +192,7 @@ func importalias(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
return
}
if Debug['E'] != 0 {
if Debug.E != 0 {
fmt.Printf("import type %v = %L\n", s, t)
}
}

View file

@ -419,10 +419,17 @@ func (n *Node) format(s fmt.State, verb rune, mode fmtMode) {
func (n *Node) jconv(s fmt.State, flag FmtFlag) {
c := flag & FmtShort
// Useful to see which nodes in an AST printout are actually identical
fmt.Fprintf(s, " p(%p)", n)
if c == 0 && n.Name != nil && n.Name.Vargen != 0 {
fmt.Fprintf(s, " g(%d)", n.Name.Vargen)
}
if c == 0 && n.Name != nil && n.Name.Defn != nil {
// Useful to see where Defn is set and what node it points to
fmt.Fprintf(s, " defn(%p)", n.Name.Defn)
}
if n.Pos.IsKnown() {
pfx := ""
switch n.Pos.IsStmt() {
@ -492,6 +499,15 @@ func (n *Node) jconv(s fmt.State, flag FmtFlag) {
if n.Name.Assigned() {
fmt.Fprint(s, " assigned")
}
if n.Name.IsClosureVar() {
fmt.Fprint(s, " closurevar")
}
if n.Name.Captured() {
fmt.Fprint(s, " captured")
}
if n.Name.IsOutputParamHeapAddr() {
fmt.Fprint(s, " outputparamheapaddr")
}
}
if n.Bounded() {
fmt.Fprint(s, " bounded")
@ -792,6 +808,13 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited
return
}
if mode == FDbg {
b.WriteString(t.Etype.String())
b.WriteByte('-')
tconv2(b, t, flag, FErr, visited)
return
}
// At this point, we might call tconv2 recursively. Add the current type to the visited list so we don't
// try to print it recursively.
// We record the offset in the result buffer where the type's text starts. This offset serves as a reference
@ -805,12 +828,6 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited
visited[t] = b.Len()
defer delete(visited, t)
if mode == FDbg {
b.WriteString(t.Etype.String())
b.WriteByte('-')
tconv2(b, t, flag, FErr, visited)
return
}
switch t.Etype {
case TPTR:
b.WriteByte('*')
@ -1709,6 +1726,9 @@ func (n *Node) nodedump(s fmt.State, flag FmtFlag, mode fmtMode) {
}
}
if n.Op == OCLOSURE && n.Func.Closure != nil && n.Func.Closure.Func.Nname.Sym != nil {
mode.Fprintf(s, " fnName %v", n.Func.Closure.Func.Nname.Sym)
}
if n.Sym != nil && n.Op != ONAME {
mode.Fprintf(s, " %v", n.Sym)
}
@ -1724,6 +1744,16 @@ func (n *Node) nodedump(s fmt.State, flag FmtFlag, mode fmtMode) {
if n.Right != nil {
mode.Fprintf(s, "%v", n.Right)
}
if n.Func != nil && n.Func.Closure != nil && n.Func.Closure.Nbody.Len() != 0 {
indent(s)
// The function associated with a closure
mode.Fprintf(s, "%v-clofunc%v", n.Op, n.Func.Closure)
}
if n.Func != nil && n.Func.Dcl != nil && len(n.Func.Dcl) != 0 {
indent(s)
// The dcls for a func or closure
mode.Fprintf(s, "%v-dcl%v", n.Op, asNodes(n.Func.Dcl))
}
if n.List.Len() != 0 {
indent(s)
mode.Fprintf(s, "%v-list%v", n.Op, n.List)

View file

@ -61,12 +61,12 @@ type Class uint8
//go:generate stringer -type=Class
const (
Pxxx Class = iota // no class; used during ssa conversion to indicate pseudo-variables
PEXTERN // global variable
PEXTERN // global variables
PAUTO // local variables
PAUTOHEAP // local variable or parameter moved to heap
PAUTOHEAP // local variables or parameters moved to heap
PPARAM // input arguments
PPARAMOUT // output results
PFUNC // global function
PFUNC // global functions
// Careful: Class is stored in three bits in Node.flags.
_ = uint((1 << 3) - iota) // static assert for iota <= (1 << 3)
@ -116,7 +116,15 @@ var decldepth int32
var nolocalimports bool
var Debug [256]int
// gc debug flags
type DebugFlags struct {
P, B, C, E,
K, L, N, S,
W, e, h, j,
l, m, r, w int
}
var Debug DebugFlags
var debugstr string

View file

@ -70,12 +70,8 @@ func newProgs(fn *Node, worker int) *Progs {
pp.pos = fn.Pos
pp.settext(fn)
// PCDATA tables implicitly start with index -1.
pp.prevLive = LivenessIndex{-1, -1, false}
if go115ReduceLiveness {
pp.prevLive = LivenessIndex{-1, false}
pp.nextLive = pp.prevLive
} else {
pp.nextLive = LivenessInvalid
}
return pp
}
@ -120,21 +116,6 @@ func (pp *Progs) Prog(as obj.As) *obj.Prog {
Addrconst(&p.From, objabi.PCDATA_StackMapIndex)
Addrconst(&p.To, int64(idx))
}
if !go115ReduceLiveness {
if pp.nextLive.isUnsafePoint {
// Unsafe points are encoded as a special value in the
// register map.
pp.nextLive.regMapIndex = objabi.PCDATA_RegMapUnsafe
}
if pp.nextLive.regMapIndex != pp.prevLive.regMapIndex {
// Emit register map index change.
idx := pp.nextLive.regMapIndex
pp.prevLive.regMapIndex = idx
p := pp.Prog(obj.APCDATA)
Addrconst(&p.From, objabi.PCDATA_RegMapIndex)
Addrconst(&p.To, int64(idx))
}
} else {
if pp.nextLive.isUnsafePoint != pp.prevLive.isUnsafePoint {
// Emit unsafe-point marker.
pp.prevLive.isUnsafePoint = pp.nextLive.isUnsafePoint
@ -146,14 +127,13 @@ func (pp *Progs) Prog(as obj.As) *obj.Prog {
Addrconst(&p.To, objabi.PCDATA_UnsafePointSafe)
}
}
}
p := pp.next
pp.next = pp.NewProg()
pp.clearp(pp.next)
p.Link = pp.next
if !pp.pos.IsKnown() && Debug['K'] != 0 {
if !pp.pos.IsKnown() && Debug.K != 0 {
Warn("prog: unknown position (line 0)")
}
@ -322,6 +302,12 @@ func ggloblnod(nam *Node) {
if nam.Name.LibfuzzerExtraCounter() {
s.Type = objabi.SLIBFUZZER_EXTRA_COUNTER
}
if nam.Sym.Linkname != "" {
// Make sure linkname'd symbol is non-package. When a symbol is
// both imported and linkname'd, s.Pkg may not set to "_" in
// types.Sym.Linksym because LSym already exists. Set it here.
s.Pkg = "_"
}
}
func ggloblsym(s *obj.LSym, width int32, flags int16) {

View file

@ -1138,13 +1138,10 @@ func (w *exportWriter) stmt(n *Node) {
w.pos(n.Pos)
w.stmtList(n.Ninit)
w.exprsOrNil(n.Left, nil)
w.stmtList(n.List)
w.caseList(n)
case OCASE:
w.op(OCASE)
w.pos(n.Pos)
w.stmtList(n.List)
w.stmtList(n.Nbody)
// case OCASE:
// handled by caseList
case OFALL:
w.op(OFALL)
@ -1168,6 +1165,24 @@ func (w *exportWriter) stmt(n *Node) {
}
}
func (w *exportWriter) caseList(sw *Node) {
namedTypeSwitch := sw.Op == OSWITCH && sw.Left != nil && sw.Left.Op == OTYPESW && sw.Left.Left != nil
cases := sw.List.Slice()
w.uint64(uint64(len(cases)))
for _, cas := range cases {
if cas.Op != OCASE {
Fatalf("expected OCASE, got %v", cas)
}
w.pos(cas.Pos)
w.stmtList(cas.List)
if namedTypeSwitch {
w.localName(cas.Rlist.First())
}
w.stmtList(cas.Nbody)
}
}
func (w *exportWriter) exprList(list Nodes) {
for _, n := range list.Slice() {
w.expr(n)
@ -1232,6 +1247,19 @@ func (w *exportWriter) expr(n *Node) {
w.op(OTYPE)
w.typ(n.Type)
case OTYPESW:
w.op(OTYPESW)
w.pos(n.Pos)
var s *types.Sym
if n.Left != nil {
if n.Left.Op != ONONAME {
Fatalf("expected ONONAME, got %v", n.Left)
}
s = n.Left.Sym
}
w.localIdent(s, 0) // declared pseudo-variable, if any
w.exprsOrNil(n.Right, nil)
// case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC:
// should have been resolved by typechecking - handled by default case
@ -1266,8 +1294,13 @@ func (w *exportWriter) expr(n *Node) {
// case OSTRUCTKEY:
// unreachable - handled in case OSTRUCTLIT by elemList
// case OCALLPART:
// unimplemented - handled by default case
case OCALLPART:
// An OCALLPART is an OXDOT before type checking.
w.op(OXDOT)
w.pos(n.Pos)
w.expr(n.Left)
// Right node should be ONAME
w.selector(n.Right.Sym)
case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH:
w.op(OXDOT)

View file

@ -742,8 +742,8 @@ func (r *importReader) doInline(n *Node) {
importlist = append(importlist, n)
if Debug['E'] > 0 && Debug['m'] > 2 {
if Debug['m'] > 3 {
if Debug.E > 0 && Debug.m > 2 {
if Debug.m > 3 {
fmt.Printf("inl body for %v %#v: %+v\n", n, n.Type, asNodes(n.Func.Inl.Body))
} else {
fmt.Printf("inl body for %v %#v: %v\n", n, n.Type, asNodes(n.Func.Inl.Body))
@ -784,6 +784,28 @@ func (r *importReader) stmtList() []*Node {
return list
}
func (r *importReader) caseList(sw *Node) []*Node {
namedTypeSwitch := sw.Op == OSWITCH && sw.Left != nil && sw.Left.Op == OTYPESW && sw.Left.Left != nil
cases := make([]*Node, r.uint64())
for i := range cases {
cas := nodl(r.pos(), OCASE, nil, nil)
cas.List.Set(r.stmtList())
if namedTypeSwitch {
// Note: per-case variables will have distinct, dotted
// names after import. That's okay: swt.go only needs
// Sym for diagnostics anyway.
caseVar := newnamel(cas.Pos, r.ident())
declare(caseVar, dclcontext)
cas.Rlist.Set1(caseVar)
caseVar.Name.Defn = sw.Left
}
cas.Nbody.Set(r.stmtList())
cases[i] = cas
}
return cases
}
func (r *importReader) exprList() []*Node {
var list []*Node
for {
@ -831,6 +853,14 @@ func (r *importReader) node() *Node {
case OTYPE:
return typenod(r.typ())
case OTYPESW:
n := nodl(r.pos(), OTYPESW, nil, nil)
if s := r.ident(); s != nil {
n.Left = npos(n.Pos, newnoname(s))
}
n.Right, _ = r.exprsOrNil()
return n
// case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC:
// unreachable - should have been resolved by typechecking
@ -866,7 +896,7 @@ func (r *importReader) node() *Node {
// unreachable - handled in case OSTRUCTLIT by elemList
// case OCALLPART:
// unimplemented
// unreachable - mapped to case OXDOT below by exporter
// case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH:
// unreachable - mapped to case OXDOT below by exporter
@ -1025,16 +1055,11 @@ func (r *importReader) node() *Node {
n := nodl(r.pos(), op, nil, nil)
n.Ninit.Set(r.stmtList())
n.Left, _ = r.exprsOrNil()
n.List.Set(r.stmtList())
n.List.Set(r.caseList(n))
return n
case OCASE:
n := nodl(r.pos(), OCASE, nil, nil)
n.List.Set(r.exprList())
// TODO(gri) eventually we must declare variables for type switch
// statements (type switch statements are not yet exported)
n.Nbody.Set(r.stmtList())
return n
// case OCASE:
// handled by caseList
case OFALL:
n := nodl(r.pos(), OFALL, nil, nil)

View file

@ -7,7 +7,7 @@
// saves a copy of the body. Then inlcalls walks each function body to
// expand calls to inlinable functions.
//
// The debug['l'] flag controls the aggressiveness. Note that main() swaps level 0 and 1,
// The Debug.l flag controls the aggressiveness. Note that main() swaps level 0 and 1,
// making 1 the default and -l disable. Additional levels (beyond -l) may be buggy and
// are not supported.
// 0: disabled
@ -21,7 +21,7 @@
// The -d typcheckinl flag enables early typechecking of all imported bodies,
// which is useful to flush out bugs.
//
// The debug['m'] flag enables diagnostic output. a single -m is useful for verifying
// The Debug.m flag enables diagnostic output. a single -m is useful for verifying
// which calls get inlined or not, more is for debugging, and may go away at any point.
package gc
@ -85,7 +85,7 @@ func typecheckinl(fn *Node) {
return // typecheckinl on local function
}
if Debug['m'] > 2 || Debug_export != 0 {
if Debug.m > 2 || Debug_export != 0 {
fmt.Printf("typecheck import [%v] %L { %#v }\n", fn.Sym, fn, asNodes(fn.Func.Inl.Body))
}
@ -116,10 +116,10 @@ func caninl(fn *Node) {
}
var reason string // reason, if any, that the function was not inlined
if Debug['m'] > 1 || logopt.Enabled() {
if Debug.m > 1 || logopt.Enabled() {
defer func() {
if reason != "" {
if Debug['m'] > 1 {
if Debug.m > 1 {
fmt.Printf("%v: cannot inline %v: %s\n", fn.Line(), fn.Func.Nname, reason)
}
if logopt.Enabled() {
@ -187,7 +187,7 @@ func caninl(fn *Node) {
defer n.Func.SetInlinabilityChecked(true)
cc := int32(inlineExtraCallCost)
if Debug['l'] == 4 {
if Debug.l == 4 {
cc = 1 // this appears to yield better performance than 0.
}
@ -224,9 +224,9 @@ func caninl(fn *Node) {
// this is so export can find the body of a method
fn.Type.FuncType().Nname = asTypesNode(n)
if Debug['m'] > 1 {
if Debug.m > 1 {
fmt.Printf("%v: can inline %#v with cost %d as: %#v { %#v }\n", fn.Line(), n, inlineMaxBudget-visitor.budget, fn.Type, asNodes(n.Func.Inl.Body))
} else if Debug['m'] != 0 {
} else if Debug.m != 0 {
fmt.Printf("%v: can inline %v\n", fn.Line(), n)
}
if logopt.Enabled() {
@ -257,21 +257,39 @@ func inlFlood(n *Node) {
typecheckinl(n)
// Recursively identify all referenced functions for
// reexport. We want to include even non-called functions,
// because after inlining they might be callable.
inspectList(asNodes(n.Func.Inl.Body), func(n *Node) bool {
switch n.Op {
case ONAME:
// Mark any referenced global variables or
// functions for reexport. Skip methods,
// because they're reexported alongside their
// receiver type.
if n.Class() == PEXTERN || n.Class() == PFUNC && !n.isMethodExpression() {
switch n.Class() {
case PFUNC:
if n.isMethodExpression() {
inlFlood(asNode(n.Type.Nname()))
} else {
inlFlood(n)
exportsym(n)
}
case PEXTERN:
exportsym(n)
}
case OCALLFUNC, OCALLMETH:
// Recursively flood any functions called by
// this one.
inlFlood(asNode(n.Left.Type.Nname()))
case ODOTMETH:
fn := asNode(n.Type.Nname())
inlFlood(fn)
case OCALLPART:
// Okay, because we don't yet inline indirect
// calls to method values.
case OCLOSURE:
// If the closure is inlinable, we'll need to
// flood it too. But today we don't support
// inlining functions that contain closures.
//
// When we do, we'll probably want:
// inlFlood(n.Func.Closure.Func.Nname)
Fatalf("unexpected closure in inlinable function")
}
return true
})
@ -375,10 +393,8 @@ func (v *hairyVisitor) visit(n *Node) bool {
return true
case OCLOSURE,
OCALLPART,
ORANGE,
OSELECT,
OTYPESW,
OGO,
ODEFER,
ODCLTYPE, // can't print yet
@ -423,7 +439,7 @@ func (v *hairyVisitor) visit(n *Node) bool {
v.budget--
// When debugging, don't stop early, to get full cost of inlining this function
if v.budget < 0 && Debug['m'] < 2 && !logopt.Enabled() {
if v.budget < 0 && Debug.m < 2 && !logopt.Enabled() {
return true
}
@ -454,7 +470,7 @@ func inlcopy(n *Node) *Node {
}
m := n.copy()
if m.Func != nil {
if n.Op != OCALLPART && m.Func != nil {
Fatalf("unexpected Func: %v", m)
}
m.Left = inlcopy(n.Left)
@ -572,13 +588,11 @@ func inlnode(n *Node, maxCost int32, inlMap map[*Node]bool) *Node {
}
switch n.Op {
// inhibit inlining of their argument
case ODEFER, OGO:
switch n.Left.Op {
case OCALLFUNC, OCALLMETH:
n.Left.SetNoInline(true)
}
return n
// TODO do them here (or earlier),
// so escape analysis can avoid more heapmoves.
@ -668,7 +682,7 @@ func inlnode(n *Node, maxCost int32, inlMap map[*Node]bool) *Node {
switch n.Op {
case OCALLFUNC:
if Debug['m'] > 3 {
if Debug.m > 3 {
fmt.Printf("%v:call to func %+v\n", n.Line(), n.Left)
}
if isIntrinsicCall(n) {
@ -679,7 +693,7 @@ func inlnode(n *Node, maxCost int32, inlMap map[*Node]bool) *Node {
}
case OCALLMETH:
if Debug['m'] > 3 {
if Debug.m > 3 {
fmt.Printf("%v:call to meth %L\n", n.Line(), n.Left.Right)
}
@ -706,7 +720,14 @@ func inlCallee(fn *Node) *Node {
switch {
case fn.Op == ONAME && fn.Class() == PFUNC:
if fn.isMethodExpression() {
return asNode(fn.Sym.Def)
n := asNode(fn.Type.Nname())
// Check that receiver type matches fn.Left.
// TODO(mdempsky): Handle implicit dereference
// of pointer receiver argument?
if n == nil || !types.Identical(n.Type.Recv().Type, fn.Left.Type) {
return nil
}
return n
}
return fn
case fn.Op == OCLOSURE:
@ -719,6 +740,11 @@ func inlCallee(fn *Node) *Node {
func staticValue(n *Node) *Node {
for {
if n.Op == OCONVNOP {
n = n.Left
continue
}
n1 := staticValue1(n)
if n1 == nil {
return n
@ -809,14 +835,12 @@ func (v *reassignVisitor) visit(n *Node) *Node {
if n.Left == v.name && n != v.name.Name.Defn {
return n
}
return nil
case OAS2, OAS2FUNC, OAS2MAPR, OAS2DOTTYPE:
for _, p := range n.List.Slice() {
if p == v.name && n != v.name.Name.Defn {
return n
}
}
return nil
}
if a := v.visit(n.Left); a != nil {
return a
@ -909,7 +933,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
}
if inlMap[fn] {
if Debug['m'] > 1 {
if Debug.m > 1 {
fmt.Printf("%v: cannot inline %v into %v: repeated recursive cycle\n", n.Line(), fn, Curfn.funcname())
}
return n
@ -923,12 +947,12 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
}
// We have a function node, and it has an inlineable body.
if Debug['m'] > 1 {
if Debug.m > 1 {
fmt.Printf("%v: inlining call to %v %#v { %#v }\n", n.Line(), fn.Sym, fn.Type, asNodes(fn.Func.Inl.Body))
} else if Debug['m'] != 0 {
} else if Debug.m != 0 {
fmt.Printf("%v: inlining call to %v\n", n.Line(), fn)
}
if Debug['m'] > 2 {
if Debug.m > 2 {
fmt.Printf("%v: Before inlining: %+v\n", n.Line(), n)
}
@ -1009,15 +1033,28 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
}
}
nreturns := 0
inspectList(asNodes(fn.Func.Inl.Body), func(n *Node) bool {
if n != nil && n.Op == ORETURN {
nreturns++
}
return true
})
// We can delay declaring+initializing result parameters if:
// (1) there's only one "return" statement in the inlined
// function, and (2) the result parameters aren't named.
delayretvars := nreturns == 1
// temporaries for return values.
var retvars []*Node
for i, t := range fn.Type.Results().Fields().Slice() {
var m *Node
mpos := t.Pos
if n := asNode(t.Nname); n != nil && !n.isBlank() {
if n := asNode(t.Nname); n != nil && !n.isBlank() && !strings.HasPrefix(n.Sym.Name, "~r") {
m = inlvar(n)
m = typecheck(m, ctxExpr)
inlvars[n] = m
delayretvars = false // found a named result parameter
} else {
// anonymous return values, synthesize names for use in assignment that replaces return
m = retvar(t, i)
@ -1029,12 +1066,11 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
// were not part of the original callee.
if !strings.HasPrefix(m.Sym.Name, "~R") {
m.Name.SetInlFormal(true)
m.Pos = mpos
m.Pos = t.Pos
inlfvars = append(inlfvars, m)
}
}
ninit.Append(nod(ODCL, m, nil))
retvars = append(retvars, m)
}
@ -1095,12 +1131,15 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
ninit.Append(vas)
}
if !delayretvars {
// Zero the return parameters.
for _, n := range retvars {
ninit.Append(nod(ODCL, n, nil))
ras := nod(OAS, n, nil)
ras = typecheck(ras, ctxStmt)
ninit.Append(ras)
}
}
retlabel := autolabel(".i")
@ -1132,6 +1171,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
subst := inlsubst{
retlabel: retlabel,
retvars: retvars,
delayretvars: delayretvars,
inlvars: inlvars,
bases: make(map[*src.PosBase]*src.PosBase),
newInlIndex: newIndex,
@ -1172,7 +1212,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
}
}
if Debug['m'] > 2 {
if Debug.m > 2 {
fmt.Printf("%v: After inlining %+v\n\n", call.Line(), call)
}
@ -1183,7 +1223,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
// PAUTO's in the calling functions, and link them off of the
// PPARAM's, PAUTOS and PPARAMOUTs of the called function.
func inlvar(var_ *Node) *Node {
if Debug['m'] > 3 {
if Debug.m > 3 {
fmt.Printf("inlvar %+v\n", var_)
}
@ -1230,6 +1270,10 @@ type inlsubst struct {
// Temporary result variables.
retvars []*Node
// Whether result variables should be initialized at the
// "return" statement.
delayretvars bool
inlvars map[*Node]*Node
// bases maps from original PosBase to PosBase with an extra
@ -1262,13 +1306,13 @@ func (subst *inlsubst) node(n *Node) *Node {
switch n.Op {
case ONAME:
if inlvar := subst.inlvars[n]; inlvar != nil { // These will be set during inlnode
if Debug['m'] > 2 {
if Debug.m > 2 {
fmt.Printf("substituting name %+v -> %+v\n", n, inlvar)
}
return inlvar
}
if Debug['m'] > 2 {
if Debug.m > 2 {
fmt.Printf("not substituting name %+v\n", n)
}
return n
@ -1298,6 +1342,14 @@ func (subst *inlsubst) node(n *Node) *Node {
as.List.Append(n)
}
as.Rlist.Set(subst.list(n.List))
if subst.delayretvars {
for _, n := range as.List.Slice() {
as.Ninit.Append(nod(ODCL, n, nil))
n.Name.Defn = as
}
}
as = typecheck(as, ctxStmt)
m.Ninit.Append(as)
}
@ -1360,3 +1412,68 @@ func pruneUnusedAutos(ll []*Node, vis *hairyVisitor) []*Node {
}
return s
}
// devirtualize replaces interface method calls within fn with direct
// concrete-type method calls where applicable.
func devirtualize(fn *Node) {
Curfn = fn
inspectList(fn.Nbody, func(n *Node) bool {
if n.Op == OCALLINTER {
devirtualizeCall(n)
}
return true
})
}
func devirtualizeCall(call *Node) {
recv := staticValue(call.Left.Left)
if recv.Op != OCONVIFACE {
return
}
typ := recv.Left.Type
if typ.IsInterface() {
return
}
x := nodl(call.Left.Pos, ODOTTYPE, call.Left.Left, nil)
x.Type = typ
x = nodlSym(call.Left.Pos, OXDOT, x, call.Left.Sym)
x = typecheck(x, ctxExpr|ctxCallee)
switch x.Op {
case ODOTMETH:
if Debug.m != 0 {
Warnl(call.Pos, "devirtualizing %v to %v", call.Left, typ)
}
call.Op = OCALLMETH
call.Left = x
case ODOTINTER:
// Promoted method from embedded interface-typed field (#42279).
if Debug.m != 0 {
Warnl(call.Pos, "partially devirtualizing %v to %v", call.Left, typ)
}
call.Op = OCALLINTER
call.Left = x
default:
// TODO(mdempsky): Turn back into Fatalf after more testing.
if Debug.m != 0 {
Warnl(call.Pos, "failed to devirtualize %v (%v)", x, x.Op)
}
return
}
// Duplicated logic from typecheck for function call return
// value types.
//
// Receiver parameter size may have changed; need to update
// call.Type to get correct stack offsets for result
// parameters.
checkwidth(x.Type)
switch ft := x.Type; ft.NumResults() {
case 0:
case 1:
call.Type = ft.Results().Field(0).Type
default:
call.Type = ft.Results()
}
}

View file

@ -51,6 +51,7 @@ func TestIntendedInlining(t *testing.T) {
"funcPC",
"getArgInfoFast",
"getm",
"getMCache",
"isDirectIface",
"itabHashFunc",
"noescape",

View file

@ -34,8 +34,6 @@ import (
"strings"
)
var imported_unsafe bool
var (
buildid string
spectre string
@ -132,7 +130,7 @@ func hidePanic() {
// supportsDynlink reports whether or not the code generator for the given
// architecture supports the -shared and -dynlink flags.
func supportsDynlink(arch *sys.Arch) bool {
return arch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.I386, sys.PPC64, sys.S390X)
return arch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X)
}
// timing data for compiler phases
@ -211,18 +209,27 @@ func Main(archInit func(*Arch)) {
flag.BoolVar(&compiling_runtime, "+", false, "compiling runtime")
flag.BoolVar(&compiling_std, "std", false, "compiling standard library")
objabi.Flagcount("%", "debug non-static initializers", &Debug['%'])
objabi.Flagcount("B", "disable bounds checking", &Debug['B'])
objabi.Flagcount("C", "disable printing of columns in error messages", &Debug['C']) // TODO(gri) remove eventually
flag.StringVar(&localimport, "D", "", "set relative `path` for local imports")
objabi.Flagcount("E", "debug symbol export", &Debug['E'])
objabi.Flagcount("%", "debug non-static initializers", &Debug.P)
objabi.Flagcount("B", "disable bounds checking", &Debug.B)
objabi.Flagcount("C", "disable printing of columns in error messages", &Debug.C)
objabi.Flagcount("E", "debug symbol export", &Debug.E)
objabi.Flagcount("K", "debug missing line numbers", &Debug.K)
objabi.Flagcount("L", "show full file names in error messages", &Debug.L)
objabi.Flagcount("N", "disable optimizations", &Debug.N)
objabi.Flagcount("S", "print assembly listing", &Debug.S)
objabi.Flagcount("W", "debug parse tree after type checking", &Debug.W)
objabi.Flagcount("e", "no limit on number of errors reported", &Debug.e)
objabi.Flagcount("h", "halt on error", &Debug.h)
objabi.Flagcount("j", "debug runtime-initialized variables", &Debug.j)
objabi.Flagcount("l", "disable inlining", &Debug.l)
objabi.Flagcount("m", "print optimization decisions", &Debug.m)
objabi.Flagcount("r", "debug generated wrappers", &Debug.r)
objabi.Flagcount("w", "debug type checking", &Debug.w)
objabi.Flagfn1("I", "add `directory` to import search path", addidir)
objabi.Flagcount("K", "debug missing line numbers", &Debug['K'])
objabi.Flagcount("L", "show full file names in error messages", &Debug['L'])
objabi.Flagcount("N", "disable optimizations", &Debug['N'])
objabi.Flagcount("S", "print assembly listing", &Debug['S'])
objabi.AddVersionFlag() // -V
objabi.Flagcount("W", "debug parse tree after type checking", &Debug['W'])
flag.StringVar(&asmhdr, "asmhdr", "", "write assembly header to `file`")
flag.StringVar(&buildid, "buildid", "", "record `id` as the build id in the export metadata")
flag.IntVar(&nBackendWorkers, "c", 1, "concurrency during compilation, 1 means no concurrency")
@ -231,17 +238,13 @@ func Main(archInit func(*Arch)) {
flag.BoolVar(&flagDWARF, "dwarf", !Wasm, "generate DWARF symbols")
flag.BoolVar(&Ctxt.Flag_locationlists, "dwarflocationlists", true, "add location lists to DWARF in optimized mode")
flag.IntVar(&genDwarfInline, "gendwarfinl", 2, "generate DWARF inline info records")
objabi.Flagcount("e", "no limit on number of errors reported", &Debug['e'])
objabi.Flagcount("h", "halt on error", &Debug['h'])
objabi.Flagfn1("embedcfg", "read go:embed configuration from `file`", readEmbedCfg)
objabi.Flagfn1("importmap", "add `definition` of the form source=actual to import map", addImportMap)
objabi.Flagfn1("importcfg", "read import configuration from `file`", readImportCfg)
flag.StringVar(&flag_installsuffix, "installsuffix", "", "set pkg directory `suffix`")
objabi.Flagcount("j", "debug runtime-initialized variables", &Debug['j'])
objabi.Flagcount("l", "disable inlining", &Debug['l'])
flag.StringVar(&flag_lang, "lang", "", "release to compile for")
flag.StringVar(&linkobj, "linkobj", "", "write linker-specific object to `file`")
objabi.Flagcount("live", "debug liveness analysis", &debuglive)
objabi.Flagcount("m", "print optimization decisions", &Debug['m'])
if sys.MSanSupported(objabi.GOOS, objabi.GOARCH) {
flag.BoolVar(&flag_msan, "msan", false, "build code compatible with C/C++ memory sanitizer")
}
@ -249,7 +252,6 @@ func Main(archInit func(*Arch)) {
flag.StringVar(&outfile, "o", "", "write output to `file`")
flag.StringVar(&myimportpath, "p", "", "set expected package import `path`")
flag.BoolVar(&writearchive, "pack", false, "write to file.a instead of file.o")
objabi.Flagcount("r", "debug generated wrappers", &Debug['r'])
if sys.RaceDetectorSupported(objabi.GOOS, objabi.GOARCH) {
flag.BoolVar(&flag_race, "race", false, "enable race detector")
}
@ -259,7 +261,6 @@ func Main(archInit func(*Arch)) {
}
flag.StringVar(&pathPrefix, "trimpath", "", "remove `prefix` from recorded source file paths")
flag.BoolVar(&Debug_vlog, "v", false, "increase debug verbosity")
objabi.Flagcount("w", "debug type checking", &Debug['w'])
flag.BoolVar(&use_writebarrier, "wb", true, "enable write barrier")
var flag_shared bool
var flag_dynlink bool
@ -325,9 +326,9 @@ func Main(archInit func(*Arch)) {
Ctxt.Flag_shared = flag_dynlink || flag_shared
Ctxt.Flag_dynlink = flag_dynlink
Ctxt.Flag_optimize = Debug['N'] == 0
Ctxt.Flag_optimize = Debug.N == 0
Ctxt.Debugasm = Debug['S']
Ctxt.Debugasm = Debug.S
Ctxt.Debugvlog = Debug_vlog
if flagDWARF {
Ctxt.DebugInfo = debuginfo
@ -399,7 +400,7 @@ func Main(archInit func(*Arch)) {
instrumenting = true
}
if compiling_runtime && Debug['N'] != 0 {
if compiling_runtime && Debug.N != 0 {
log.Fatal("cannot disable optimizations while compiling runtime")
}
if nBackendWorkers < 1 {
@ -504,11 +505,11 @@ func Main(archInit func(*Arch)) {
}
// enable inlining. for now:
// default: inlining on. (debug['l'] == 1)
// -l: inlining off (debug['l'] == 0)
// -l=2, -l=3: inlining on again, with extra debugging (debug['l'] > 1)
if Debug['l'] <= 1 {
Debug['l'] = 1 - Debug['l']
// default: inlining on. (Debug.l == 1)
// -l: inlining off (Debug.l == 0)
// -l=2, -l=3: inlining on again, with extra debugging (Debug.l > 1)
if Debug.l <= 1 {
Debug.l = 1 - Debug.l
}
if jsonLogOpt != "" { // parse version,destination from json logging optimization.
@ -595,7 +596,7 @@ func Main(archInit func(*Arch)) {
timings.Start("fe", "typecheck", "top1")
for i := 0; i < len(xtop); i++ {
n := xtop[i]
if op := n.Op; op != ODCL && op != OAS && op != OAS2 && (op != ODCLTYPE || !n.Left.Name.Param.Alias) {
if op := n.Op; op != ODCL && op != OAS && op != OAS2 && (op != ODCLTYPE || !n.Left.Name.Param.Alias()) {
xtop[i] = typecheck(n, ctxStmt)
}
}
@ -607,7 +608,7 @@ func Main(archInit func(*Arch)) {
timings.Start("fe", "typecheck", "top2")
for i := 0; i < len(xtop); i++ {
n := xtop[i]
if op := n.Op; op == ODCL || op == OAS || op == OAS2 || op == ODCLTYPE && n.Left.Name.Param.Alias {
if op := n.Op; op == ODCL || op == OAS || op == OAS2 || op == ODCLTYPE && n.Left.Name.Param.Alias() {
xtop[i] = typecheck(n, ctxStmt)
}
}
@ -666,7 +667,7 @@ func Main(archInit func(*Arch)) {
// Phase 5: Inlining
timings.Start("fe", "inlining")
if Debug_typecheckinl != 0 {
// Typecheck imported function bodies if debug['l'] > 1,
// Typecheck imported function bodies if Debug.l > 1,
// otherwise lazily when used or re-exported.
for _, n := range importlist {
if n.Func.Inl != nil {
@ -680,7 +681,7 @@ func Main(archInit func(*Arch)) {
}
}
if Debug['l'] != 0 {
if Debug.l != 0 {
// Find functions that can be inlined and clone them before walk expands them.
visitBottomUp(xtop, func(list []*Node, recursive bool) {
numfns := numNonClosures(list)
@ -691,7 +692,7 @@ func Main(archInit func(*Arch)) {
// across more than one function.
caninl(n)
} else {
if Debug['m'] > 1 {
if Debug.m > 1 {
fmt.Printf("%v: cannot inline %v: recursive\n", n.Line(), n.Func.Nname)
}
}
@ -700,6 +701,13 @@ func Main(archInit func(*Arch)) {
})
}
for _, n := range xtop {
if n.Op == ODCLFUNC {
devirtualize(n)
}
}
Curfn = nil
// Phase 6: Escape analysis.
// Required for moving heap allocations onto stack,
// which in turn is required by the closure implementation,
@ -1175,7 +1183,6 @@ func importfile(f *Val) *types.Pkg {
}
if path_ == "unsafe" {
imported_unsafe = true
return unsafepkg
}
@ -1408,29 +1415,34 @@ func IsAlias(sym *types.Sym) bool {
return sym.Def != nil && asNode(sym.Def).Sym != sym
}
// By default, assume any debug flags are incompatible with concurrent compilation.
// A few are safe and potentially in common use for normal compiles, though; mark them as such here.
var concurrentFlagOK = [256]bool{
'B': true, // disabled bounds checking
'C': true, // disable printing of columns in error messages
'e': true, // no limit on errors; errors all come from non-concurrent code
'I': true, // add `directory` to import search path
'N': true, // disable optimizations
'l': true, // disable inlining
'w': true, // all printing happens before compilation
'W': true, // all printing happens before compilation
'S': true, // printing disassembly happens at the end (but see concurrentBackendAllowed below)
// By default, assume any debug flags are incompatible with concurrent
// compilation. A few are safe and potentially in common use for
// normal compiles, though; return true for those.
func concurrentFlagOk() bool {
// Report whether any debug flag that would prevent concurrent
// compilation is set, by zeroing out the allowed ones and then
// checking if the resulting struct is zero.
d := Debug
d.B = 0 // disable bounds checking
d.C = 0 // disable printing of columns in error messages
d.e = 0 // no limit on errors; errors all come from non-concurrent code
d.N = 0 // disable optimizations
d.l = 0 // disable inlining
d.w = 0 // all printing happens before compilation
d.W = 0 // all printing happens before compilation
d.S = 0 // printing disassembly happens at the end (but see concurrentBackendAllowed below)
return d == DebugFlags{}
}
func concurrentBackendAllowed() bool {
for i, x := range &Debug {
if x != 0 && !concurrentFlagOK[i] {
if !concurrentFlagOk() {
return false
}
}
// Debug['S'] by itself is ok, because all printing occurs
// Debug.S by itself is ok, because all printing occurs
// while writing the object file, and that is non-concurrent.
// Adding Debug_vlog, however, causes Debug['S'] to also print
// Adding Debug_vlog, however, causes Debug.S to also print
// while flushing the plist, which happens concurrently.
if Debug_vlog || debugstr != "" || debuglive > 0 {
return false

View file

@ -11,6 +11,7 @@ import (
"runtime"
"strconv"
"strings"
"unicode"
"unicode/utf8"
"cmd/compile/internal/syntax"
@ -90,7 +91,11 @@ func (p *noder) makeSrcPosBase(b0 *syntax.PosBase) *src.PosBase {
} else {
// line directive base
p0 := b0.Pos()
p1 := src.MakePos(p.makeSrcPosBase(p0.Base()), p0.Line(), p0.Col())
p0b := p0.Base()
if p0b == b0 {
panic("infinite recursion in makeSrcPosBase")
}
p1 := src.MakePos(p.makeSrcPosBase(p0b), p0.Line(), p0.Col())
b1 = src.NewLinePragmaBase(p1, fn, fileh(fn), b0.Line(), b0.Col())
}
p.basemap[b0] = b1
@ -135,6 +140,8 @@ type noder struct {
pragcgobuf [][]string
err chan syntax.Error
scope ScopeID
importedUnsafe bool
importedEmbed bool
// scopeVars is a stack tracking the number of variables declared in the
// current function at the moment each open scope was opened.
@ -236,7 +243,8 @@ type linkname struct {
func (p *noder) node() {
types.Block = 1
imported_unsafe = false
p.importedUnsafe = false
p.importedEmbed = false
p.setlineno(p.file.PkgName)
mkpackage(p.file.PkgName.Value)
@ -249,7 +257,7 @@ func (p *noder) node() {
xtop = append(xtop, p.decls(p.file.DeclList)...)
for _, n := range p.linknames {
if !imported_unsafe {
if !p.importedUnsafe {
p.yyerrorpos(n.pos, "//go:linkname only allowed in Go files that import \"unsafe\"")
continue
}
@ -324,7 +332,6 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) {
val := p.basicLit(imp.Path)
ipkg := importfile(&val)
if ipkg == nil {
if nerrors == 0 {
Fatalf("phase error in import")
@ -332,6 +339,13 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) {
return
}
if ipkg == unsafepkg {
p.importedUnsafe = true
}
if ipkg.Path == "embed" {
p.importedEmbed = true
}
ipkg.Direct = true
var my *types.Sym
@ -373,6 +387,20 @@ func (p *noder) varDecl(decl *syntax.VarDecl) []*Node {
}
if pragma, ok := decl.Pragma.(*Pragma); ok {
if len(pragma.Embeds) > 0 {
if !p.importedEmbed {
// This check can't be done when building the list pragma.Embeds
// because that list is created before the noder starts walking over the file,
// so at that point it hasn't seen the imports.
// We're left to check now, just before applying the //go:embed lines.
for _, e := range pragma.Embeds {
p.yyerrorpos(e.Pos, "//go:embed only allowed in Go files that import \"embed\"")
}
} else {
exprs = varEmbed(p, names, typ, exprs, pragma.Embeds)
}
pragma.Embeds = nil
}
p.checkUnused(pragma)
}
@ -455,17 +483,17 @@ func (p *noder) typeDecl(decl *syntax.TypeDecl) *Node {
param := n.Name.Param
param.Ntype = typ
param.Alias = decl.Alias
param.SetAlias(decl.Alias)
if pragma, ok := decl.Pragma.(*Pragma); ok {
if !decl.Alias {
param.Pragma = pragma.Flag & TypePragmas
param.SetPragma(pragma.Flag & TypePragmas)
pragma.Flag &^= TypePragmas
}
p.checkUnused(pragma)
}
nod := p.nod(decl, ODCLTYPE, n, nil)
if param.Alias && !langSupported(1, 9, localpkg) {
if param.Alias() && !langSupported(1, 9, localpkg) {
yyerrorl(nod.Pos, "type aliases only supported as of -lang=go1.9")
}
return nod
@ -1438,11 +1466,6 @@ func (p *noder) mkname(name *syntax.Name) *Node {
return mkname(p.name(name))
}
func (p *noder) newname(name *syntax.Name) *Node {
// TODO(mdempsky): Set line number?
return newname(p.name(name))
}
func (p *noder) wrapname(n syntax.Node, x *Node) *Node {
// These nodes do not carry line numbers.
// Introduce a wrapper node to give them the correct line.
@ -1498,6 +1521,7 @@ var allowedStdPragmas = map[string]bool{
"go:cgo_import_dynamic": true,
"go:cgo_ldflag": true,
"go:cgo_dynamic_linker": true,
"go:embed": true,
"go:generate": true,
}
@ -1505,6 +1529,7 @@ var allowedStdPragmas = map[string]bool{
type Pragma struct {
Flag PragmaFlag // collected bits
Pos []PragmaPos // position of each individual flag
Embeds []PragmaEmbed
}
type PragmaPos struct {
@ -1512,12 +1537,22 @@ type PragmaPos struct {
Pos syntax.Pos
}
type PragmaEmbed struct {
Pos syntax.Pos
Patterns []string
}
func (p *noder) checkUnused(pragma *Pragma) {
for _, pos := range pragma.Pos {
if pos.Flag&pragma.Flag != 0 {
p.yyerrorpos(pos.Pos, "misplaced compiler directive")
}
}
if len(pragma.Embeds) > 0 {
for _, e := range pragma.Embeds {
p.yyerrorpos(e.Pos, "misplaced go:embed directive")
}
}
}
func (p *noder) checkUnusedDuringParse(pragma *Pragma) {
@ -1526,6 +1561,11 @@ func (p *noder) checkUnusedDuringParse(pragma *Pragma) {
p.error(syntax.Error{Pos: pos.Pos, Msg: "misplaced compiler directive"})
}
}
if len(pragma.Embeds) > 0 {
for _, e := range pragma.Embeds {
p.error(syntax.Error{Pos: e.Pos, Msg: "misplaced go:embed directive"})
}
}
}
// pragma is called concurrently if files are parsed concurrently.
@ -1570,6 +1610,17 @@ func (p *noder) pragma(pos syntax.Pos, blankLine bool, text string, old syntax.P
}
p.linknames = append(p.linknames, linkname{pos, f[1], target})
case text == "go:embed", strings.HasPrefix(text, "go:embed "):
args, err := parseGoEmbed(text[len("go:embed"):])
if err != nil {
p.error(syntax.Error{Pos: pos, Msg: err.Error()})
}
if len(args) == 0 {
p.error(syntax.Error{Pos: pos, Msg: "usage: //go:embed pattern..."})
break
}
pragma.Embeds = append(pragma.Embeds, PragmaEmbed{pos, args})
case strings.HasPrefix(text, "go:cgo_import_dynamic "):
// This is permitted for general use because Solaris
// code relies on it in golang.org/x/sys/unix and others.
@ -1642,3 +1693,64 @@ func mkname(sym *types.Sym) *Node {
}
return n
}
// parseGoEmbed parses the text following "//go:embed" to extract the glob patterns.
// It accepts unquoted space-separated patterns as well as double-quoted and back-quoted Go strings.
// go/build/read.go also processes these strings and contains similar logic.
func parseGoEmbed(args string) ([]string, error) {
var list []string
for args = strings.TrimSpace(args); args != ""; args = strings.TrimSpace(args) {
var path string
Switch:
switch args[0] {
default:
i := len(args)
for j, c := range args {
if unicode.IsSpace(c) {
i = j
break
}
}
path = args[:i]
args = args[i:]
case '`':
i := strings.Index(args[1:], "`")
if i < 0 {
return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
}
path = args[1 : 1+i]
args = args[1+i+1:]
case '"':
i := 1
for ; i < len(args); i++ {
if args[i] == '\\' {
i++
continue
}
if args[i] == '"' {
q, err := strconv.Unquote(args[:i+1])
if err != nil {
return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args[:i+1])
}
path = q
args = args[i+1:]
break Switch
}
}
if i >= len(args) {
return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
}
}
if args != "" {
r, _ := utf8.DecodeRuneInString(args)
if !unicode.IsSpace(r) {
return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
}
}
list = append(list, path)
}
return list, nil
}

View file

@ -14,6 +14,8 @@ import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"sort"
"strconv"
)
@ -125,6 +127,7 @@ func dumpdata() {
itabsLen := len(itabs)
dumpimportstrings()
dumpbasictypes()
dumpembeds()
// Calls to dumpsignats can generate functions,
// like method wrappers and hash and equality routines.
@ -309,7 +312,7 @@ func addGCLocals() {
if fn == nil {
continue
}
for _, gcsym := range []*obj.LSym{fn.GCArgs, fn.GCLocals, fn.GCRegs} {
for _, gcsym := range []*obj.LSym{fn.GCArgs, fn.GCLocals} {
if gcsym != nil && !gcsym.OnList() {
ggloblsym(gcsym, int32(len(gcsym.P)), obj.RODATA|obj.DUPOK)
}
@ -358,28 +361,31 @@ func dbvec(s *obj.LSym, off int, bv bvec) int {
return off
}
const (
stringSymPrefix = "go.string."
stringSymPattern = ".gostring.%d.%x"
)
// stringsym returns a symbol containing the string s.
// The symbol contains the string data, not a string header.
func stringsym(pos src.XPos, s string) (data *obj.LSym) {
var symname string
if len(s) > 100 {
// Huge strings are hashed to avoid long names in object files.
// Indulge in some paranoia by writing the length of s, too,
// as protection against length extension attacks.
// Same pattern is known to fileStringSym below.
h := sha256.New()
io.WriteString(h, s)
symname = fmt.Sprintf(".gostring.%d.%x", len(s), h.Sum(nil))
symname = fmt.Sprintf(stringSymPattern, len(s), h.Sum(nil))
} else {
// Small strings get named directly by their contents.
symname = strconv.Quote(s)
}
const prefix = "go.string."
symdataname := prefix + symname
symdata := Ctxt.Lookup(symdataname)
symdata := Ctxt.Lookup(stringSymPrefix + symname)
if !symdata.OnList() {
// string data
off := dsname(symdata, 0, s, pos, "string")
off := dstringdata(symdata, 0, s, pos, "string")
ggloblsym(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL)
symdata.Set(obj.AttrContentAddressable, true)
}
@ -387,26 +393,122 @@ func stringsym(pos src.XPos, s string) (data *obj.LSym) {
return symdata
}
var slicebytes_gen int
// fileStringSym returns a symbol for the contents and the size of file.
// If readonly is true, the symbol shares storage with any literal string
// or other file with the same content and is placed in a read-only section.
// If readonly is false, the symbol is a read-write copy separate from any other,
// for use as the backing store of a []byte.
// The content hash of file is copied into hash. (If hash is nil, nothing is copied.)
// The returned symbol contains the data itself, not a string header.
func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.LSym, int64, error) {
f, err := os.Open(file)
if err != nil {
return nil, 0, err
}
defer f.Close()
info, err := f.Stat()
if err != nil {
return nil, 0, err
}
if !info.Mode().IsRegular() {
return nil, 0, fmt.Errorf("not a regular file")
}
size := info.Size()
if size <= 1*1024 {
data, err := ioutil.ReadAll(f)
if err != nil {
return nil, 0, err
}
if int64(len(data)) != size {
return nil, 0, fmt.Errorf("file changed between reads")
}
var sym *obj.LSym
if readonly {
sym = stringsym(pos, string(data))
} else {
sym = slicedata(pos, string(data)).Sym.Linksym()
}
if len(hash) > 0 {
sum := sha256.Sum256(data)
copy(hash, sum[:])
}
return sym, size, nil
}
if size > 2e9 {
// ggloblsym takes an int32,
// and probably the rest of the toolchain
// can't handle such big symbols either.
// See golang.org/issue/9862.
return nil, 0, fmt.Errorf("file too large")
}
func slicebytes(nam *Node, s string) {
slicebytes_gen++
symname := fmt.Sprintf(".gobytes.%d", slicebytes_gen)
// File is too big to read and keep in memory.
// Compute hash if needed for read-only content hashing or if the caller wants it.
var sum []byte
if readonly || len(hash) > 0 {
h := sha256.New()
n, err := io.Copy(h, f)
if err != nil {
return nil, 0, err
}
if n != size {
return nil, 0, fmt.Errorf("file changed between reads")
}
sum = h.Sum(nil)
copy(hash, sum)
}
var symdata *obj.LSym
if readonly {
symname := fmt.Sprintf(stringSymPattern, size, sum)
symdata = Ctxt.Lookup(stringSymPrefix + symname)
if !symdata.OnList() {
info := symdata.NewFileInfo()
info.Name = file
info.Size = size
ggloblsym(symdata, int32(size), obj.DUPOK|obj.RODATA|obj.LOCAL)
// Note: AttrContentAddressable cannot be set here,
// because the content-addressable-handling code
// does not know about file symbols.
}
} else {
// Emit a zero-length data symbol
// and then fix up length and content to use file.
symdata = slicedata(pos, "").Sym.Linksym()
symdata.Size = size
symdata.Type = objabi.SNOPTRDATA
info := symdata.NewFileInfo()
info.Name = file
info.Size = size
}
return symdata, size, nil
}
var slicedataGen int
func slicedata(pos src.XPos, s string) *Node {
slicedataGen++
symname := fmt.Sprintf(".gobytes.%d", slicedataGen)
sym := localpkg.Lookup(symname)
symnode := newname(sym)
sym.Def = asTypesNode(symnode)
lsym := sym.Linksym()
off := dsname(lsym, 0, s, nam.Pos, "slice")
off := dstringdata(lsym, 0, s, pos, "slice")
ggloblsym(lsym, int32(off), obj.NOPTR|obj.LOCAL)
return symnode
}
func slicebytes(nam *Node, s string) {
if nam.Op != ONAME {
Fatalf("slicebytes %v", nam)
}
slicesym(nam, symnode, int64(len(s)))
slicesym(nam, slicedata(nam.Pos, s), int64(len(s)))
}
func dsname(s *obj.LSym, off int, t string, pos src.XPos, what string) int {
func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int {
// Objects that are too large will cause the data section to overflow right away,
// causing a cryptic error message by the linker. Check for oversize objects here
// and provide a useful error message instead.

View file

@ -50,7 +50,7 @@ type Order struct {
// Order rewrites fn.Nbody to apply the ordering constraints
// described in the comment at the top of the file.
func order(fn *Node) {
if Debug['W'] > 1 {
if Debug.W > 1 {
s := fmt.Sprintf("\nbefore order %v", fn.Func.Nname.Sym)
dumplist(s, fn.Nbody)
}
@ -323,12 +323,7 @@ func (o *Order) stmtList(l Nodes) {
// and rewrites it to:
// m = OMAKESLICECOPY([]T, x, s); nil
func orderMakeSliceCopy(s []*Node) {
const go115makeslicecopy = true
if !go115makeslicecopy {
return
}
if Debug['N'] != 0 || instrumenting {
if Debug.N != 0 || instrumenting {
return
}

View file

@ -24,16 +24,6 @@ import (
"strings"
)
// go115ReduceLiveness disables register maps and only produces stack
// maps at call sites.
//
// In Go 1.15, we changed debug call injection to use conservative
// scanning instead of precise pointer maps, so these are no longer
// necessary.
//
// Keep in sync with runtime/preempt.go:go115ReduceLiveness.
const go115ReduceLiveness = true
// OpVarDef is an annotation for the liveness analysis, marking a place
// where a complete initialization (definition) of a variable begins.
// Since the liveness analysis can see initialization of single-word
@ -96,15 +86,15 @@ type BlockEffects struct {
//
// uevar: upward exposed variables (used before set in block)
// varkill: killed variables (set in block)
uevar varRegVec
varkill varRegVec
uevar bvec
varkill bvec
// Computed during Liveness.solve using control flow information:
//
// livein: variables live at block entry
// liveout: variables live at block exit
livein varRegVec
liveout varRegVec
livein bvec
liveout bvec
}
// A collection of global state used by liveness analysis.
@ -128,16 +118,14 @@ type Liveness struct {
// current Block during Liveness.epilogue. Indexed in Value
// order for that block. Additionally, for the entry block
// livevars[0] is the entry bitmap. Liveness.compact moves
// these to stackMaps and regMaps.
livevars []varRegVec
// these to stackMaps.
livevars []bvec
// livenessMap maps from safe points (i.e., CALLs) to their
// liveness map indexes.
livenessMap LivenessMap
stackMapSet bvecSet
stackMaps []bvec
regMapSet map[liveRegMask]int
regMaps []liveRegMask
cache progeffectscache
}
@ -158,7 +146,7 @@ func (m *LivenessMap) reset() {
delete(m.vals, k)
}
}
m.deferreturn = LivenessInvalid
m.deferreturn = LivenessDontCare
}
func (m *LivenessMap) set(v *ssa.Value, i LivenessIndex) {
@ -166,27 +154,17 @@ func (m *LivenessMap) set(v *ssa.Value, i LivenessIndex) {
}
func (m LivenessMap) Get(v *ssa.Value) LivenessIndex {
if !go115ReduceLiveness {
// All safe-points are in the map, so if v isn't in
// the map, it's an unsafe-point.
if idx, ok := m.vals[v.ID]; ok {
return idx
}
return LivenessInvalid
}
// If v isn't in the map, then it's a "don't care" and not an
// unsafe-point.
if idx, ok := m.vals[v.ID]; ok {
return idx
}
return LivenessIndex{StackMapDontCare, StackMapDontCare, false}
return LivenessIndex{StackMapDontCare, false}
}
// LivenessIndex stores the liveness map information for a Value.
type LivenessIndex struct {
stackMapIndex int
regMapIndex int // only for !go115ReduceLiveness
// isUnsafePoint indicates that this is an unsafe-point.
//
@ -197,8 +175,10 @@ type LivenessIndex struct {
isUnsafePoint bool
}
// LivenessInvalid indicates an unsafe point with no stack map.
var LivenessInvalid = LivenessIndex{StackMapDontCare, StackMapDontCare, true} // only for !go115ReduceLiveness
// LivenessDontCare indicates that the liveness information doesn't
// matter. Currently it is used in deferreturn liveness when we don't
// actually need it. It should never be emitted to the PCDATA stream.
var LivenessDontCare = LivenessIndex{StackMapDontCare, true}
// StackMapDontCare indicates that the stack map index at a Value
// doesn't matter.
@ -212,46 +192,12 @@ func (idx LivenessIndex) StackMapValid() bool {
return idx.stackMapIndex != StackMapDontCare
}
func (idx LivenessIndex) RegMapValid() bool {
return idx.regMapIndex != StackMapDontCare
}
type progeffectscache struct {
retuevar []int32
tailuevar []int32
initialized bool
}
// varRegVec contains liveness bitmaps for variables and registers.
type varRegVec struct {
vars bvec
regs liveRegMask
}
func (v *varRegVec) Eq(v2 varRegVec) bool {
return v.vars.Eq(v2.vars) && v.regs == v2.regs
}
func (v *varRegVec) Copy(v2 varRegVec) {
v.vars.Copy(v2.vars)
v.regs = v2.regs
}
func (v *varRegVec) Clear() {
v.vars.Clear()
v.regs = 0
}
func (v *varRegVec) Or(v1, v2 varRegVec) {
v.vars.Or(v1.vars, v2.vars)
v.regs = v1.regs | v2.regs
}
func (v *varRegVec) AndNot(v1, v2 varRegVec) {
v.vars.AndNot(v1.vars, v2.vars)
v.regs = v1.regs &^ v2.regs
}
// livenessShouldTrack reports whether the liveness analysis
// should track the variable n.
// We don't care about variables that have no pointers,
@ -400,110 +346,6 @@ func affectedNode(v *ssa.Value) (*Node, ssa.SymEffect) {
}
}
// regEffects returns the registers affected by v.
func (lv *Liveness) regEffects(v *ssa.Value) (uevar, kill liveRegMask) {
if go115ReduceLiveness {
return 0, 0
}
if v.Op == ssa.OpPhi {
// All phi node arguments must come from the same
// register and the result must also go to that
// register, so there's no overall effect.
return 0, 0
}
addLocs := func(mask liveRegMask, v *ssa.Value, ptrOnly bool) liveRegMask {
if int(v.ID) >= len(lv.f.RegAlloc) {
// v has no allocated registers.
return mask
}
loc := lv.f.RegAlloc[v.ID]
if loc == nil {
// v has no allocated registers.
return mask
}
if v.Op == ssa.OpGetG {
// GetG represents the G register, which is a
// pointer, but not a valid GC register. The
// current G is always reachable, so it's okay
// to ignore this register.
return mask
}
// Collect registers and types from v's location.
var regs [2]*ssa.Register
nreg := 0
switch loc := loc.(type) {
case ssa.LocalSlot:
return mask
case *ssa.Register:
if ptrOnly && !v.Type.HasPointers() {
return mask
}
regs[0] = loc
nreg = 1
case ssa.LocPair:
// The value will have TTUPLE type, and the
// children are nil or *ssa.Register.
if v.Type.Etype != types.TTUPLE {
v.Fatalf("location pair %s has non-tuple type %v", loc, v.Type)
}
for i, loc1 := range &loc {
if loc1 == nil {
continue
}
if ptrOnly && !v.Type.FieldType(i).HasPointers() {
continue
}
regs[nreg] = loc1.(*ssa.Register)
nreg++
}
default:
v.Fatalf("weird RegAlloc location: %s (%T)", loc, loc)
}
// Add register locations to vars.
for _, reg := range regs[:nreg] {
if reg.GCNum() == -1 {
if ptrOnly {
v.Fatalf("pointer in non-pointer register %v", reg)
} else {
continue
}
}
mask |= 1 << uint(reg.GCNum())
}
return mask
}
// v clobbers all registers it writes to (whether or not the
// write is pointer-typed).
kill = addLocs(0, v, false)
for _, arg := range v.Args {
// v uses all registers is reads from, but we only
// care about marking those containing pointers.
uevar = addLocs(uevar, arg, true)
}
return uevar, kill
}
type liveRegMask uint32 // only if !go115ReduceLiveness
func (m liveRegMask) niceString(config *ssa.Config) string {
if m == 0 {
return "<none>"
}
str := ""
for i, reg := range config.GCRegMap {
if m&(1<<uint(i)) != 0 {
if str != "" {
str += ","
}
str += reg.String()
}
}
return str
}
type livenessFuncCache struct {
be []BlockEffects
livenessMap LivenessMap
@ -519,8 +361,6 @@ func newliveness(fn *Node, f *ssa.Func, vars []*Node, idx map[*Node]int32, stkpt
vars: vars,
idx: idx,
stkptrsize: stkptrsize,
regMapSet: make(map[liveRegMask]int),
}
// Significant sources of allocation are kept in the ssa.Cache
@ -533,7 +373,7 @@ func newliveness(fn *Node, f *ssa.Func, vars []*Node, idx map[*Node]int32, stkpt
if cap(lc.be) >= f.NumBlocks() {
lv.be = lc.be[:f.NumBlocks()]
}
lv.livenessMap = LivenessMap{vals: lc.livenessMap.vals, deferreturn: LivenessInvalid}
lv.livenessMap = LivenessMap{vals: lc.livenessMap.vals, deferreturn: LivenessDontCare}
lc.livenessMap.vals = nil
}
if lv.be == nil {
@ -546,10 +386,10 @@ func newliveness(fn *Node, f *ssa.Func, vars []*Node, idx map[*Node]int32, stkpt
for _, b := range f.Blocks {
be := lv.blockEffects(b)
be.uevar = varRegVec{vars: bulk.next()}
be.varkill = varRegVec{vars: bulk.next()}
be.livein = varRegVec{vars: bulk.next()}
be.liveout = varRegVec{vars: bulk.next()}
be.uevar = bulk.next()
be.varkill = bulk.next()
be.livein = bulk.next()
be.liveout = bulk.next()
}
lv.livenessMap.reset()
@ -637,20 +477,6 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) {
}
}
// usedRegs returns the maximum width of the live register map.
func (lv *Liveness) usedRegs() int32 {
var any liveRegMask
for _, live := range lv.regMaps {
any |= live
}
i := int32(0)
for any != 0 {
any >>= 1
i++
}
return i
}
// Generates live pointer value maps for arguments and local variables. The
// this argument and the in arguments are always assumed live. The vars
// argument is a slice of *Nodes.
@ -851,10 +677,6 @@ func (lv *Liveness) markUnsafePoints() {
// particular, call Values can have a stack map in case the callee
// grows the stack, but not themselves be a safe-point.
func (lv *Liveness) hasStackMap(v *ssa.Value) bool {
// The runtime only has safe-points in function prologues, so
// we only need stack maps at call sites. go:nosplit functions
// are similar.
if go115ReduceLiveness || compiling_runtime || lv.f.NoSplit {
if !v.Op.IsCall() {
return false
}
@ -865,17 +687,6 @@ func (lv *Liveness) hasStackMap(v *ssa.Value) bool {
return false
}
return true
}
switch v.Op {
case ssa.OpInitMem, ssa.OpArg, ssa.OpSP, ssa.OpSB,
ssa.OpSelect0, ssa.OpSelect1, ssa.OpGetG,
ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive,
ssa.OpPhi:
// These don't produce code (see genssa).
return false
}
return !lv.unsafePoints.Get(int32(v.ID))
}
// Initializes the sets for solving the live variables. Visits all the
@ -891,17 +702,13 @@ func (lv *Liveness) prologue() {
// effects with the each prog effects.
for j := len(b.Values) - 1; j >= 0; j-- {
pos, e := lv.valueEffects(b.Values[j])
regUevar, regKill := lv.regEffects(b.Values[j])
if e&varkill != 0 {
be.varkill.vars.Set(pos)
be.uevar.vars.Unset(pos)
be.varkill.Set(pos)
be.uevar.Unset(pos)
}
be.varkill.regs |= regKill
be.uevar.regs &^= regKill
if e&uevar != 0 {
be.uevar.vars.Set(pos)
be.uevar.Set(pos)
}
be.uevar.regs |= regUevar
}
}
}
@ -911,8 +718,8 @@ func (lv *Liveness) solve() {
// These temporary bitvectors exist to avoid successive allocations and
// frees within the loop.
nvars := int32(len(lv.vars))
newlivein := varRegVec{vars: bvalloc(nvars)}
newliveout := varRegVec{vars: bvalloc(nvars)}
newlivein := bvalloc(nvars)
newliveout := bvalloc(nvars)
// Walk blocks in postorder ordering. This improves convergence.
po := lv.f.Postorder()
@ -930,11 +737,11 @@ func (lv *Liveness) solve() {
switch b.Kind {
case ssa.BlockRet:
for _, pos := range lv.cache.retuevar {
newliveout.vars.Set(pos)
newliveout.Set(pos)
}
case ssa.BlockRetJmp:
for _, pos := range lv.cache.tailuevar {
newliveout.vars.Set(pos)
newliveout.Set(pos)
}
case ssa.BlockExit:
// panic exit - nothing to do
@ -969,7 +776,7 @@ func (lv *Liveness) solve() {
// variables at each safe point locations.
func (lv *Liveness) epilogue() {
nvars := int32(len(lv.vars))
liveout := varRegVec{vars: bvalloc(nvars)}
liveout := bvalloc(nvars)
livedefer := bvalloc(nvars) // always-live variables
// If there is a defer (that could recover), then all output
@ -1025,12 +832,11 @@ func (lv *Liveness) epilogue() {
{
// Reserve an entry for function entry.
live := bvalloc(nvars)
lv.livevars = append(lv.livevars, varRegVec{vars: live})
lv.livevars = append(lv.livevars, live)
}
for _, b := range lv.f.Blocks {
be := lv.blockEffects(b)
firstBitmapIndex := len(lv.livevars)
// Walk forward through the basic block instructions and
// allocate liveness maps for those instructions that need them.
@ -1040,7 +846,7 @@ func (lv *Liveness) epilogue() {
}
live := bvalloc(nvars)
lv.livevars = append(lv.livevars, varRegVec{vars: live})
lv.livevars = append(lv.livevars, live)
}
// walk backward, construct maps at each safe point
@ -1056,21 +862,18 @@ func (lv *Liveness) epilogue() {
live := &lv.livevars[index]
live.Or(*live, liveout)
live.vars.Or(live.vars, livedefer) // only for non-entry safe points
live.Or(*live, livedefer) // only for non-entry safe points
index--
}
// Update liveness information.
pos, e := lv.valueEffects(v)
regUevar, regKill := lv.regEffects(v)
if e&varkill != 0 {
liveout.vars.Unset(pos)
liveout.Unset(pos)
}
liveout.regs &^= regKill
if e&uevar != 0 {
liveout.vars.Set(pos)
liveout.Set(pos)
}
liveout.regs |= regUevar
}
if b == lv.f.Entry {
@ -1080,7 +883,7 @@ func (lv *Liveness) epilogue() {
// Check to make sure only input variables are live.
for i, n := range lv.vars {
if !liveout.vars.Get(int32(i)) {
if !liveout.Get(int32(i)) {
continue
}
if n.Class() == PPARAM {
@ -1094,32 +897,16 @@ func (lv *Liveness) epilogue() {
live.Or(*live, liveout)
}
// Check that no registers are live across calls.
// For closure calls, the CALLclosure is the last use
// of the context register, so it's dead after the call.
index = int32(firstBitmapIndex)
for _, v := range b.Values {
if lv.hasStackMap(v) {
live := lv.livevars[index]
if v.Op.IsCall() && live.regs != 0 {
lv.printDebug()
v.Fatalf("%v register %s recorded as live at call", lv.fn.Func.Nname, live.regs.niceString(lv.f.Config))
}
index++
}
}
// The liveness maps for this block are now complete. Compact them.
lv.compact(b)
}
// If we have an open-coded deferreturn call, make a liveness map for it.
if lv.fn.Func.OpenCodedDeferDisallowed() {
lv.livenessMap.deferreturn = LivenessInvalid
lv.livenessMap.deferreturn = LivenessDontCare
} else {
lv.livenessMap.deferreturn = LivenessIndex{
stackMapIndex: lv.stackMapSet.add(livedefer),
regMapIndex: 0, // entry regMap, containing no live registers
isUnsafePoint: false,
}
}
@ -1136,20 +923,10 @@ func (lv *Liveness) epilogue() {
lv.f.Fatalf("%v %L recorded as live on entry", lv.fn.Func.Nname, n)
}
}
if !go115ReduceLiveness {
// Check that no registers are live at function entry.
// The context register, if any, comes from a
// LoweredGetClosurePtr operation first thing in the function,
// so it doesn't appear live at entry.
if regs := lv.regMaps[0]; regs != 0 {
lv.printDebug()
lv.f.Fatalf("%v register %s recorded as live on entry", lv.fn.Func.Nname, regs.niceString(lv.f.Config))
}
}
}
// Compact coalesces identical bitmaps from lv.livevars into the sets
// lv.stackMapSet and lv.regMaps.
// lv.stackMapSet.
//
// Compact clears lv.livevars.
//
@ -1165,45 +942,23 @@ func (lv *Liveness) epilogue() {
// PCDATA tables cost about 100k. So for now we keep using a single index for
// both bitmap lists.
func (lv *Liveness) compact(b *ssa.Block) {
add := func(live varRegVec, isUnsafePoint bool) LivenessIndex { // only if !go115ReduceLiveness
// Deduplicate the stack map.
stackIndex := lv.stackMapSet.add(live.vars)
// Deduplicate the register map.
regIndex, ok := lv.regMapSet[live.regs]
if !ok {
regIndex = len(lv.regMapSet)
lv.regMapSet[live.regs] = regIndex
lv.regMaps = append(lv.regMaps, live.regs)
}
return LivenessIndex{stackIndex, regIndex, isUnsafePoint}
}
pos := 0
if b == lv.f.Entry {
// Handle entry stack map.
if !go115ReduceLiveness {
add(lv.livevars[0], false)
} else {
lv.stackMapSet.add(lv.livevars[0].vars)
}
lv.stackMapSet.add(lv.livevars[0])
pos++
}
for _, v := range b.Values {
if go115ReduceLiveness {
hasStackMap := lv.hasStackMap(v)
isUnsafePoint := lv.allUnsafe || lv.unsafePoints.Get(int32(v.ID))
idx := LivenessIndex{StackMapDontCare, StackMapDontCare, isUnsafePoint}
idx := LivenessIndex{StackMapDontCare, isUnsafePoint}
if hasStackMap {
idx.stackMapIndex = lv.stackMapSet.add(lv.livevars[pos].vars)
idx.stackMapIndex = lv.stackMapSet.add(lv.livevars[pos])
pos++
}
if hasStackMap || isUnsafePoint {
lv.livenessMap.set(v, idx)
}
} else if lv.hasStackMap(v) {
isUnsafePoint := lv.allUnsafe || lv.unsafePoints.Get(int32(v.ID))
lv.livenessMap.set(v, add(lv.livevars[pos], isUnsafePoint))
pos++
}
}
// Reset livevars.
@ -1250,8 +1005,8 @@ func (lv *Liveness) showlive(v *ssa.Value, live bvec) {
Warnl(pos, s)
}
func (lv *Liveness) printbvec(printed bool, name string, live varRegVec) bool {
if live.vars.IsEmpty() && live.regs == 0 {
func (lv *Liveness) printbvec(printed bool, name string, live bvec) bool {
if live.IsEmpty() {
return printed
}
@ -1264,19 +1019,18 @@ func (lv *Liveness) printbvec(printed bool, name string, live varRegVec) bool {
comma := ""
for i, n := range lv.vars {
if !live.vars.Get(int32(i)) {
if !live.Get(int32(i)) {
continue
}
fmt.Printf("%s%s", comma, n.Sym.Name)
comma = ","
}
fmt.Printf("%s%s", comma, live.regs.niceString(lv.f.Config))
return true
}
// printeffect is like printbvec, but for valueEffects and regEffects.
func (lv *Liveness) printeffect(printed bool, name string, pos int32, x bool, regMask liveRegMask) bool {
if !x && regMask == 0 {
// printeffect is like printbvec, but for valueEffects.
func (lv *Liveness) printeffect(printed bool, name string, pos int32, x bool) bool {
if !x {
return printed
}
if !printed {
@ -1288,15 +1042,7 @@ func (lv *Liveness) printeffect(printed bool, name string, pos int32, x bool, re
if x {
fmt.Printf("%s", lv.vars[pos].Sym.Name)
}
for j, reg := range lv.f.Config.GCRegMap {
if regMask&(1<<uint(j)) != 0 {
if x {
fmt.Printf(",")
}
x = true
fmt.Printf("%v", reg)
}
}
return true
}
@ -1364,15 +1110,14 @@ func (lv *Liveness) printDebug() {
pcdata := lv.livenessMap.Get(v)
pos, effect := lv.valueEffects(v)
regUevar, regKill := lv.regEffects(v)
printed = false
printed = lv.printeffect(printed, "uevar", pos, effect&uevar != 0, regUevar)
printed = lv.printeffect(printed, "varkill", pos, effect&varkill != 0, regKill)
printed = lv.printeffect(printed, "uevar", pos, effect&uevar != 0)
printed = lv.printeffect(printed, "varkill", pos, effect&varkill != 0)
if printed {
fmt.Printf("\n")
}
if pcdata.StackMapValid() || pcdata.RegMapValid() {
if pcdata.StackMapValid() {
fmt.Printf("\tlive=")
printed = false
if pcdata.StackMapValid() {
@ -1388,16 +1133,6 @@ func (lv *Liveness) printDebug() {
printed = true
}
}
if pcdata.RegMapValid() { // only if !go115ReduceLiveness
regLive := lv.regMaps[pcdata.regMapIndex]
if regLive != 0 {
if printed {
fmt.Printf(",")
}
fmt.Printf("%s", regLive.niceString(lv.f.Config))
printed = true
}
}
fmt.Printf("\n")
}
@ -1423,7 +1158,7 @@ func (lv *Liveness) printDebug() {
// first word dumped is the total number of bitmaps. The second word is the
// length of the bitmaps. All bitmaps are assumed to be of equal length. The
// remaining bytes are the raw bitmaps.
func (lv *Liveness) emit() (argsSym, liveSym, regsSym *obj.LSym) {
func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) {
// Size args bitmaps to be just large enough to hold the largest pointer.
// First, find the largest Xoffset node we care about.
// (Nodes without pointers aren't in lv.vars; see livenessShouldTrack.)
@ -1452,7 +1187,7 @@ func (lv *Liveness) emit() (argsSym, liveSym, regsSym *obj.LSym) {
maxLocals := lv.stkptrsize
// Temporary symbols for encoding bitmaps.
var argsSymTmp, liveSymTmp, regsSymTmp obj.LSym
var argsSymTmp, liveSymTmp obj.LSym
args := bvalloc(int32(maxArgs / int64(Widthptr)))
aoff := duint32(&argsSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps
@ -1472,24 +1207,6 @@ func (lv *Liveness) emit() (argsSym, liveSym, regsSym *obj.LSym) {
loff = dbvec(&liveSymTmp, loff, locals)
}
if !go115ReduceLiveness {
regs := bvalloc(lv.usedRegs())
roff := duint32(&regsSymTmp, 0, uint32(len(lv.regMaps))) // number of bitmaps
roff = duint32(&regsSymTmp, roff, uint32(regs.n)) // number of bits in each bitmap
if regs.n > 32 {
// Our uint32 conversion below won't work.
Fatalf("GP registers overflow uint32")
}
if regs.n > 0 {
for _, live := range lv.regMaps {
regs.Clear()
regs.b[0] = uint32(live)
roff = dbvec(&regsSymTmp, roff, regs)
}
}
}
// Give these LSyms content-addressable names,
// so that they can be de-duplicated.
// This provides significant binary size savings.
@ -1502,11 +1219,7 @@ func (lv *Liveness) emit() (argsSym, liveSym, regsSym *obj.LSym) {
lsym.Set(obj.AttrContentAddressable, true)
})
}
if !go115ReduceLiveness {
return makeSym(&argsSymTmp), makeSym(&liveSymTmp), makeSym(&regsSymTmp)
}
// TODO(go115ReduceLiveness): Remove regsSym result
return makeSym(&argsSymTmp), makeSym(&liveSymTmp), nil
return makeSym(&argsSymTmp), makeSym(&liveSymTmp)
}
// Entry pointer for liveness analysis. Solves for the liveness of
@ -1553,7 +1266,7 @@ func liveness(e *ssafn, f *ssa.Func, pp *Progs) LivenessMap {
// Emit the live pointer map data structures
ls := e.curfn.Func.lsym
fninfo := ls.Func()
fninfo.GCArgs, fninfo.GCLocals, fninfo.GCRegs = lv.emit()
fninfo.GCArgs, fninfo.GCLocals = lv.emit()
p := pp.Prog(obj.AFUNCDATA)
Addrconst(&p.From, objabi.FUNCDATA_ArgsPointerMaps)
@ -1567,14 +1280,6 @@ func liveness(e *ssafn, f *ssa.Func, pp *Progs) LivenessMap {
p.To.Name = obj.NAME_EXTERN
p.To.Sym = fninfo.GCLocals
if !go115ReduceLiveness {
p = pp.Prog(obj.AFUNCDATA)
Addrconst(&p.From, objabi.FUNCDATA_RegPointerMaps)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = fninfo.GCRegs
}
return lv.livenessMap
}

View file

@ -466,7 +466,7 @@ func walkrange(n *Node) *Node {
//
// where == for keys of map m is reflexive.
func isMapClear(n *Node) bool {
if Debug['N'] != 0 || instrumenting {
if Debug.N != 0 || instrumenting {
return false
}
@ -533,7 +533,7 @@ func mapClear(m *Node) *Node {
//
// Parameters are as in walkrange: "for v1, v2 = range a".
func arrayClear(n, v1, v2, a *Node) bool {
if Debug['N'] != 0 || instrumenting {
if Debug.N != 0 || instrumenting {
return false
}

View file

@ -1275,9 +1275,8 @@ func dtypesym(t *types.Type) *obj.LSym {
}
ot = dgopkgpath(lsym, ot, tpkg)
xcount := sort.Search(n, func(i int) bool { return !types.IsExported(m[i].name.Name) })
ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t))
ot = duintptr(lsym, ot, uint64(xcount))
ot = duintptr(lsym, ot, uint64(n))
ot = duintptr(lsym, ot, uint64(n))
dataAdd := imethodSize() * n
ot = dextratype(lsym, ot, t, dataAdd)

View file

@ -75,8 +75,19 @@ func (v *bottomUpVisitor) visit(n *Node) uint32 {
inspectList(n.Nbody, func(n *Node) bool {
switch n.Op {
case OCALLFUNC, OCALLMETH:
fn := asNode(n.Left.Type.Nname())
case ONAME:
if n.Class() == PFUNC {
if n.isMethodExpression() {
n = asNode(n.Type.Nname())
}
if n != nil && n.Name.Defn != nil {
if m := v.visit(n.Name.Defn); m < min {
min = m
}
}
}
case ODOTMETH:
fn := asNode(n.Type.Nname())
if fn != nil && fn.Op == ONAME && fn.Class() == PFUNC && fn.Name.Defn != nil {
if m := v.visit(fn.Name.Defn); m < min {
min = m

View file

@ -39,7 +39,7 @@ func (s *InitSchedule) append(n *Node) {
// staticInit adds an initialization statement n to the schedule.
func (s *InitSchedule) staticInit(n *Node) {
if !s.tryStaticInit(n) {
if Debug['%'] != 0 {
if Debug.P != 0 {
Dump("nonstatic", n)
}
s.append(n)
@ -375,11 +375,6 @@ func readonlystaticname(t *types.Type) *Node {
return n
}
func isLiteral(n *Node) bool {
// Treat nils as zeros rather than literals.
return n.Op == OLITERAL && n.Val().Ctype() != CTNIL
}
func (n *Node) isSimpleName() bool {
return n.Op == ONAME && n.Class() != PAUTOHEAP && n.Class() != PEXTERN
}
@ -404,7 +399,7 @@ const (
func getdyn(n *Node, top bool) initGenType {
switch n.Op {
default:
if isLiteral(n) {
if n.isGoConst() {
return initConst
}
return initDynamic
@ -559,7 +554,7 @@ func fixedlit(ctxt initContext, kind initKind, n *Node, var_ *Node, init *Nodes)
continue
}
islit := isLiteral(value)
islit := value.isGoConst()
if (kind == initKindStatic && !islit) || (kind == initKindDynamic && islit) {
continue
}
@ -732,7 +727,7 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
continue
}
if vstat != nil && isLiteral(value) { // already set by copy from static value
if vstat != nil && value.isGoConst() { // already set by copy from static value
continue
}

View file

@ -59,7 +59,7 @@ func initssaconfig() {
_ = types.NewPtr(types.Types[TINT64]) // *int64
_ = types.NewPtr(types.Errortype) // *error
types.NewPtrCacheEnabled = false
ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, *types_, Ctxt, Debug['N'] == 0)
ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, *types_, Ctxt, Debug.N == 0)
ssaConfig.SoftFloat = thearch.SoftFloat
ssaConfig.Race = flag_race
ssaCaches = make([]ssa.Cache, nBackendWorkers)
@ -72,9 +72,9 @@ func initssaconfig() {
deferproc = sysfunc("deferproc")
deferprocStack = sysfunc("deferprocStack")
Deferreturn = sysfunc("deferreturn")
Duffcopy = sysvar("duffcopy") // asm func with special ABI
Duffzero = sysvar("duffzero") // asm func with special ABI
gcWriteBarrier = sysvar("gcWriteBarrier") // asm func with special ABI
Duffcopy = sysfunc("duffcopy")
Duffzero = sysfunc("duffzero")
gcWriteBarrier = sysfunc("gcWriteBarrier")
goschedguarded = sysfunc("goschedguarded")
growslice = sysfunc("growslice")
msanread = sysfunc("msanread")
@ -105,51 +105,51 @@ func initssaconfig() {
// asm funcs with special ABI
if thearch.LinkArch.Name == "amd64" {
GCWriteBarrierReg = map[int16]*obj.LSym{
x86.REG_AX: sysvar("gcWriteBarrier"),
x86.REG_CX: sysvar("gcWriteBarrierCX"),
x86.REG_DX: sysvar("gcWriteBarrierDX"),
x86.REG_BX: sysvar("gcWriteBarrierBX"),
x86.REG_BP: sysvar("gcWriteBarrierBP"),
x86.REG_SI: sysvar("gcWriteBarrierSI"),
x86.REG_R8: sysvar("gcWriteBarrierR8"),
x86.REG_R9: sysvar("gcWriteBarrierR9"),
x86.REG_AX: sysfunc("gcWriteBarrier"),
x86.REG_CX: sysfunc("gcWriteBarrierCX"),
x86.REG_DX: sysfunc("gcWriteBarrierDX"),
x86.REG_BX: sysfunc("gcWriteBarrierBX"),
x86.REG_BP: sysfunc("gcWriteBarrierBP"),
x86.REG_SI: sysfunc("gcWriteBarrierSI"),
x86.REG_R8: sysfunc("gcWriteBarrierR8"),
x86.REG_R9: sysfunc("gcWriteBarrierR9"),
}
}
if thearch.LinkArch.Family == sys.Wasm {
BoundsCheckFunc[ssa.BoundsIndex] = sysvar("goPanicIndex")
BoundsCheckFunc[ssa.BoundsIndexU] = sysvar("goPanicIndexU")
BoundsCheckFunc[ssa.BoundsSliceAlen] = sysvar("goPanicSliceAlen")
BoundsCheckFunc[ssa.BoundsSliceAlenU] = sysvar("goPanicSliceAlenU")
BoundsCheckFunc[ssa.BoundsSliceAcap] = sysvar("goPanicSliceAcap")
BoundsCheckFunc[ssa.BoundsSliceAcapU] = sysvar("goPanicSliceAcapU")
BoundsCheckFunc[ssa.BoundsSliceB] = sysvar("goPanicSliceB")
BoundsCheckFunc[ssa.BoundsSliceBU] = sysvar("goPanicSliceBU")
BoundsCheckFunc[ssa.BoundsSlice3Alen] = sysvar("goPanicSlice3Alen")
BoundsCheckFunc[ssa.BoundsSlice3AlenU] = sysvar("goPanicSlice3AlenU")
BoundsCheckFunc[ssa.BoundsSlice3Acap] = sysvar("goPanicSlice3Acap")
BoundsCheckFunc[ssa.BoundsSlice3AcapU] = sysvar("goPanicSlice3AcapU")
BoundsCheckFunc[ssa.BoundsSlice3B] = sysvar("goPanicSlice3B")
BoundsCheckFunc[ssa.BoundsSlice3BU] = sysvar("goPanicSlice3BU")
BoundsCheckFunc[ssa.BoundsSlice3C] = sysvar("goPanicSlice3C")
BoundsCheckFunc[ssa.BoundsSlice3CU] = sysvar("goPanicSlice3CU")
BoundsCheckFunc[ssa.BoundsIndex] = sysfunc("goPanicIndex")
BoundsCheckFunc[ssa.BoundsIndexU] = sysfunc("goPanicIndexU")
BoundsCheckFunc[ssa.BoundsSliceAlen] = sysfunc("goPanicSliceAlen")
BoundsCheckFunc[ssa.BoundsSliceAlenU] = sysfunc("goPanicSliceAlenU")
BoundsCheckFunc[ssa.BoundsSliceAcap] = sysfunc("goPanicSliceAcap")
BoundsCheckFunc[ssa.BoundsSliceAcapU] = sysfunc("goPanicSliceAcapU")
BoundsCheckFunc[ssa.BoundsSliceB] = sysfunc("goPanicSliceB")
BoundsCheckFunc[ssa.BoundsSliceBU] = sysfunc("goPanicSliceBU")
BoundsCheckFunc[ssa.BoundsSlice3Alen] = sysfunc("goPanicSlice3Alen")
BoundsCheckFunc[ssa.BoundsSlice3AlenU] = sysfunc("goPanicSlice3AlenU")
BoundsCheckFunc[ssa.BoundsSlice3Acap] = sysfunc("goPanicSlice3Acap")
BoundsCheckFunc[ssa.BoundsSlice3AcapU] = sysfunc("goPanicSlice3AcapU")
BoundsCheckFunc[ssa.BoundsSlice3B] = sysfunc("goPanicSlice3B")
BoundsCheckFunc[ssa.BoundsSlice3BU] = sysfunc("goPanicSlice3BU")
BoundsCheckFunc[ssa.BoundsSlice3C] = sysfunc("goPanicSlice3C")
BoundsCheckFunc[ssa.BoundsSlice3CU] = sysfunc("goPanicSlice3CU")
} else {
BoundsCheckFunc[ssa.BoundsIndex] = sysvar("panicIndex")
BoundsCheckFunc[ssa.BoundsIndexU] = sysvar("panicIndexU")
BoundsCheckFunc[ssa.BoundsSliceAlen] = sysvar("panicSliceAlen")
BoundsCheckFunc[ssa.BoundsSliceAlenU] = sysvar("panicSliceAlenU")
BoundsCheckFunc[ssa.BoundsSliceAcap] = sysvar("panicSliceAcap")
BoundsCheckFunc[ssa.BoundsSliceAcapU] = sysvar("panicSliceAcapU")
BoundsCheckFunc[ssa.BoundsSliceB] = sysvar("panicSliceB")
BoundsCheckFunc[ssa.BoundsSliceBU] = sysvar("panicSliceBU")
BoundsCheckFunc[ssa.BoundsSlice3Alen] = sysvar("panicSlice3Alen")
BoundsCheckFunc[ssa.BoundsSlice3AlenU] = sysvar("panicSlice3AlenU")
BoundsCheckFunc[ssa.BoundsSlice3Acap] = sysvar("panicSlice3Acap")
BoundsCheckFunc[ssa.BoundsSlice3AcapU] = sysvar("panicSlice3AcapU")
BoundsCheckFunc[ssa.BoundsSlice3B] = sysvar("panicSlice3B")
BoundsCheckFunc[ssa.BoundsSlice3BU] = sysvar("panicSlice3BU")
BoundsCheckFunc[ssa.BoundsSlice3C] = sysvar("panicSlice3C")
BoundsCheckFunc[ssa.BoundsSlice3CU] = sysvar("panicSlice3CU")
BoundsCheckFunc[ssa.BoundsIndex] = sysfunc("panicIndex")
BoundsCheckFunc[ssa.BoundsIndexU] = sysfunc("panicIndexU")
BoundsCheckFunc[ssa.BoundsSliceAlen] = sysfunc("panicSliceAlen")
BoundsCheckFunc[ssa.BoundsSliceAlenU] = sysfunc("panicSliceAlenU")
BoundsCheckFunc[ssa.BoundsSliceAcap] = sysfunc("panicSliceAcap")
BoundsCheckFunc[ssa.BoundsSliceAcapU] = sysfunc("panicSliceAcapU")
BoundsCheckFunc[ssa.BoundsSliceB] = sysfunc("panicSliceB")
BoundsCheckFunc[ssa.BoundsSliceBU] = sysfunc("panicSliceBU")
BoundsCheckFunc[ssa.BoundsSlice3Alen] = sysfunc("panicSlice3Alen")
BoundsCheckFunc[ssa.BoundsSlice3AlenU] = sysfunc("panicSlice3AlenU")
BoundsCheckFunc[ssa.BoundsSlice3Acap] = sysfunc("panicSlice3Acap")
BoundsCheckFunc[ssa.BoundsSlice3AcapU] = sysfunc("panicSlice3AcapU")
BoundsCheckFunc[ssa.BoundsSlice3B] = sysfunc("panicSlice3B")
BoundsCheckFunc[ssa.BoundsSlice3BU] = sysfunc("panicSlice3BU")
BoundsCheckFunc[ssa.BoundsSlice3C] = sysfunc("panicSlice3C")
BoundsCheckFunc[ssa.BoundsSlice3CU] = sysfunc("panicSlice3CU")
}
if thearch.LinkArch.PtrSize == 4 {
ExtendCheckFunc[ssa.BoundsIndex] = sysvar("panicExtendIndex")
@ -357,7 +357,7 @@ func buildssa(fn *Node, worker int) *ssa.Func {
s.fwdVars = map[*Node]*ssa.Value{}
s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem)
s.hasOpenDefers = Debug['N'] == 0 && s.hasdefer && !s.curfn.Func.OpenCodedDeferDisallowed()
s.hasOpenDefers = Debug.N == 0 && s.hasdefer && !s.curfn.Func.OpenCodedDeferDisallowed()
switch {
case s.hasOpenDefers && (Ctxt.Flag_shared || Ctxt.Flag_dynlink) && thearch.LinkArch.Name == "386":
// Don't support open-coded defers for 386 ONLY when using shared
@ -409,11 +409,17 @@ func buildssa(fn *Node, worker int) *ssa.Func {
// Generate addresses of local declarations
s.decladdrs = map[*Node]*ssa.Value{}
var args []ssa.Param
var results []ssa.Param
for _, n := range fn.Func.Dcl {
switch n.Class() {
case PPARAM, PPARAMOUT:
case PPARAM:
s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type), n, s.sp, s.startmem)
if n.Class() == PPARAMOUT && s.canSSA(n) {
args = append(args, ssa.Param{Type: n.Type, Offset: int32(n.Xoffset)})
case PPARAMOUT:
s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type), n, s.sp, s.startmem)
results = append(results, ssa.Param{Type: n.Type, Offset: int32(n.Xoffset)})
if s.canSSA(n) {
// Save ssa-able PPARAMOUT variables so we can
// store them back to the stack at the end of
// the function.
@ -741,7 +747,7 @@ func (s *state) pushLine(line src.XPos) {
// the frontend may emit node with line number missing,
// use the parent line number in this case.
line = s.peekPos()
if Debug['K'] != 0 {
if Debug.K != 0 {
Warn("buildssa: unknown position (line 0)")
}
} else {
@ -1214,7 +1220,7 @@ func (s *state) stmt(n *Node) {
// Check whether we're writing the result of an append back to the same slice.
// If so, we handle it specially to avoid write barriers on the fast
// (non-growth) path.
if !samesafeexpr(n.Left, rhs.List.First()) || Debug['N'] != 0 {
if !samesafeexpr(n.Left, rhs.List.First()) || Debug.N != 0 {
break
}
// If the slice can be SSA'd, it'll be on the stack,
@ -2472,6 +2478,11 @@ func (s *state) expr(n *Node) *ssa.Value {
a := s.expr(n.Left)
b := s.expr(n.Right)
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
case OANDNOT:
a := s.expr(n.Left)
b := s.expr(n.Right)
b = s.newValue1(s.ssaOp(OBITNOT, b.Type), b.Type, b)
return s.newValue2(s.ssaOp(OAND, n.Type), a.Type, a, b)
case OLSH, ORSH:
a := s.expr(n.Left)
b := s.expr(n.Right)
@ -3389,6 +3400,13 @@ func init() {
return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
},
sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "LoadAcq64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
},
sys.PPC64)
addF("runtime/internal/atomic", "Loadp",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem())
@ -3427,6 +3445,12 @@ func init() {
return nil
},
sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "StoreRel64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.PPC64)
addF("runtime/internal/atomic", "Xchg",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
@ -3434,14 +3458,64 @@ func init() {
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Xchg64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
},
sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
type atomicOpEmitter func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType)
makeAtomicGuardedIntrinsicARM64 := func(op0, op1 ssa.Op, typ, rtyp types.EType, emit atomicOpEmitter) intrinsicBuilder {
return func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
// Target Atomic feature is identified by dynamic detection
addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), arm64HasATOMICS, s.sb)
v := s.load(types.Types[TBOOL], addr)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
bTrue := s.f.NewBlock(ssa.BlockPlain)
bFalse := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bTrue)
b.AddEdgeTo(bFalse)
b.Likely = ssa.BranchLikely
// We have atomic instructions - use it directly.
s.startBlock(bTrue)
emit(s, n, args, op1, typ)
s.endBlock().AddEdgeTo(bEnd)
// Use original instruction sequence.
s.startBlock(bFalse)
emit(s, n, args, op0, typ)
s.endBlock().AddEdgeTo(bEnd)
// Merge results.
s.startBlock(bEnd)
if rtyp == TNIL {
return nil
} else {
return s.variable(n, types.Types[rtyp])
}
}
}
atomicXchgXaddEmitterARM64 := func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
v := s.newValue3(op, types.NewTuple(types.Types[typ], types.TypeMem), args[0], args[1], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
}
addF("runtime/internal/atomic", "Xchg",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange32, ssa.OpAtomicExchange32Variant, TUINT32, TUINT32, atomicXchgXaddEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Xchg64",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange64, ssa.OpAtomicExchange64Variant, TUINT64, TUINT64, atomicXchgXaddEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Xadd",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
@ -3458,46 +3532,11 @@ func init() {
},
sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
makeXaddARM64 := func(op0 ssa.Op, op1 ssa.Op, ty types.EType) func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
// Target Atomic feature is identified by dynamic detection
addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), arm64HasATOMICS, s.sb)
v := s.load(types.Types[TBOOL], addr)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
bTrue := s.f.NewBlock(ssa.BlockPlain)
bFalse := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bTrue)
b.AddEdgeTo(bFalse)
b.Likely = ssa.BranchUnlikely // most machines don't have Atomics nowadays
// We have atomic instructions - use it directly.
s.startBlock(bTrue)
v0 := s.newValue3(op1, types.NewTuple(types.Types[ty], types.TypeMem), args[0], args[1], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v0)
s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[ty], v0)
s.endBlock().AddEdgeTo(bEnd)
// Use original instruction sequence.
s.startBlock(bFalse)
v1 := s.newValue3(op0, types.NewTuple(types.Types[ty], types.TypeMem), args[0], args[1], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v1)
s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[ty], v1)
s.endBlock().AddEdgeTo(bEnd)
// Merge results.
s.startBlock(bEnd)
return s.variable(n, types.Types[ty])
}
}
addF("runtime/internal/atomic", "Xadd",
makeXaddARM64(ssa.OpAtomicAdd32, ssa.OpAtomicAdd32Variant, TUINT32),
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd32, ssa.OpAtomicAdd32Variant, TUINT32, TUINT32, atomicXchgXaddEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Xadd64",
makeXaddARM64(ssa.OpAtomicAdd64, ssa.OpAtomicAdd64Variant, TUINT64),
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd64, ssa.OpAtomicAdd64Variant, TUINT64, TUINT64, atomicXchgXaddEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Cas",
@ -3506,14 +3545,14 @@ func init() {
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Cas64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
},
sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "CasRel",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
@ -3522,18 +3561,60 @@ func init() {
},
sys.PPC64)
atomicCasEmitterARM64 := func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
v := s.newValue4(op, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
}
addF("runtime/internal/atomic", "Cas",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap32, ssa.OpAtomicCompareAndSwap32Variant, TUINT32, TBOOL, atomicCasEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Cas64",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap64, ssa.OpAtomicCompareAndSwap64Variant, TUINT64, TBOOL, atomicCasEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "And8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X)
sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "And",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "Or8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "Or",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X)
atomicAndOrEmitterARM64 := func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
s.vars[&memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem())
}
addF("runtime/internal/atomic", "And8",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd8, ssa.OpAtomicAnd8Variant, TNIL, TNIL, atomicAndOrEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "And",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd32, ssa.OpAtomicAnd32Variant, TNIL, TNIL, atomicAndOrEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Or8",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr8, ssa.OpAtomicOr8Variant, TNIL, TNIL, atomicAndOrEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Or",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr32, ssa.OpAtomicOr32Variant, TNIL, TNIL, atomicAndOrEmitterARM64),
sys.ARM64)
alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...)
alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...)
@ -3542,9 +3623,19 @@ func init() {
alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...)
alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...)
alias("runtime/internal/atomic", "LoadAcq", "runtime/internal/atomic", "Load", lwatomics...)
alias("runtime/internal/atomic", "LoadAcq64", "runtime/internal/atomic", "Load64", lwatomics...)
alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...)
alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...) // linknamed
alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...)
alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...) // linknamed
alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...)
alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...)
alias("runtime/internal/atomic", "StoreRel", "runtime/internal/atomic", "Store", lwatomics...)
alias("runtime/internal/atomic", "StoreRel64", "runtime/internal/atomic", "Store64", lwatomics...)
alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...)
alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...) // linknamed
alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...)
alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...) // linknamed
alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...)
alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...)
alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...)
@ -4709,7 +4800,7 @@ func (s *state) getClosureAndRcvr(fn *Node) (*ssa.Value, *ssa.Value) {
s.nilCheck(itab)
itabidx := fn.Xoffset + 2*int64(Widthptr) + 8 // offset of fun field in runtime.itab
closure := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab)
rcvr := s.newValue1(ssa.OpIData, types.Types[TUINTPTR], i)
rcvr := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, i)
return closure, rcvr
}
@ -4830,7 +4921,7 @@ func (s *state) addr(n *Node) *ssa.Value {
// canSSA reports whether n is SSA-able.
// n must be an ONAME (or an ODOT sequence with an ONAME base).
func (s *state) canSSA(n *Node) bool {
if Debug['N'] != 0 {
if Debug.N != 0 {
return false
}
for n.Op == ODOT || (n.Op == OINDEX && n.Left.Type.IsArray()) {
@ -4869,7 +4960,7 @@ func (s *state) canSSA(n *Node) bool {
if n.Class() == PPARAM && n.Sym != nil && n.Sym.Name == ".this" {
// wrappers generated by genwrapper need to update
// the .this pointer in place.
// TODO: treat as a PPARMOUT?
// TODO: treat as a PPARAMOUT?
return false
}
return canSSAType(n.Type)
@ -4941,7 +5032,7 @@ func (s *state) nilCheck(ptr *ssa.Value) {
func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
idx = s.extendIndex(idx, len, kind, bounded)
if bounded || Debug['B'] != 0 {
if bounded || Debug.B != 0 {
// If bounded or bounds checking is flag-disabled, then no check necessary,
// just return the extended index.
//
@ -5182,7 +5273,10 @@ func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip ski
case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex():
s.store(t, left, right)
case t.IsPtrShaped():
// no scalar fields.
if t.IsPtr() && t.Elem().NotInHeap() {
s.store(t, left, right) // see issue 42032
}
// otherwise, no scalar fields.
case t.IsString():
if skip&skipLen != 0 {
return
@ -5226,6 +5320,9 @@ func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip ski
func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
switch {
case t.IsPtrShaped():
if t.IsPtr() && t.Elem().NotInHeap() {
break // see issue 42032
}
s.store(t, left, right)
case t.IsString():
ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right)
@ -6213,7 +6310,7 @@ func genssa(f *ssa.Func, pp *Progs) {
// instruction. We won't use the actual liveness map on a
// control instruction. Just mark it something that is
// preemptible, unless this function is "all unsafe".
s.pp.nextLive = LivenessIndex{-1, -1, allUnsafe(f)}
s.pp.nextLive = LivenessIndex{-1, allUnsafe(f)}
// Emit values in block
thearch.SSAMarkMoves(&s, b)
@ -6291,7 +6388,7 @@ func genssa(f *ssa.Func, pp *Progs) {
}
// Emit control flow instructions for block
var next *ssa.Block
if i < len(f.Blocks)-1 && Debug['N'] == 0 {
if i < len(f.Blocks)-1 && Debug.N == 0 {
// If -N, leave next==nil so every block with successors
// ends in a JMP (except call blocks - plive doesn't like
// select{send,recv} followed by a JMP call). Helps keep
@ -6599,7 +6696,7 @@ func (s *state) extendIndex(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bo
} else {
lo = s.newValue1(ssa.OpInt64Lo, types.Types[TUINT], idx)
}
if bounded || Debug['B'] != 0 {
if bounded || Debug.B != 0 {
return lo
}
bNext := s.f.NewBlock(ssa.BlockPlain)
@ -6873,56 +6970,38 @@ func (e *ssafn) Auto(pos src.XPos, t *types.Type) ssa.GCNode {
}
func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
ptrType := types.NewPtr(types.Types[TUINT8])
lenType := types.Types[TINT]
if n.Class() == PAUTO && !n.Name.Addrtaken() {
// Split this string up into two separate variables.
p := e.splitSlot(&name, ".ptr", 0, ptrType)
l := e.splitSlot(&name, ".len", ptrType.Size(), lenType)
p := e.SplitSlot(&name, ".ptr", 0, ptrType)
l := e.SplitSlot(&name, ".len", ptrType.Size(), lenType)
return p, l
}
// Return the two parts of the larger variable.
return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)}
}
func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
u := types.Types[TUINTPTR]
t := types.NewPtr(types.Types[TUINT8])
if n.Class() == PAUTO && !n.Name.Addrtaken() {
// Split this interface up into two separate variables.
f := ".itab"
if n.Type.IsEmptyInterface() {
f = ".type"
}
c := e.splitSlot(&name, f, 0, u) // see comment in plive.go:onebitwalktype1.
d := e.splitSlot(&name, ".data", u.Size(), t)
c := e.SplitSlot(&name, f, 0, u) // see comment in plive.go:onebitwalktype1.
d := e.SplitSlot(&name, ".data", u.Size(), t)
return c, d
}
// Return the two parts of the larger variable.
return ssa.LocalSlot{N: n, Type: u, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + int64(Widthptr)}
}
func (e *ssafn) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
ptrType := types.NewPtr(name.Type.Elem())
lenType := types.Types[TINT]
if n.Class() == PAUTO && !n.Name.Addrtaken() {
// Split this slice up into three separate variables.
p := e.splitSlot(&name, ".ptr", 0, ptrType)
l := e.splitSlot(&name, ".len", ptrType.Size(), lenType)
c := e.splitSlot(&name, ".cap", ptrType.Size()+lenType.Size(), lenType)
p := e.SplitSlot(&name, ".ptr", 0, ptrType)
l := e.SplitSlot(&name, ".len", ptrType.Size(), lenType)
c := e.SplitSlot(&name, ".cap", ptrType.Size()+lenType.Size(), lenType)
return p, l, c
}
// Return the three parts of the larger variable.
return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off},
ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)},
ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(2*Widthptr)}
}
func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
s := name.Type.Size() / 2
var t *types.Type
if s == 8 {
@ -6930,53 +7009,30 @@ func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot)
} else {
t = types.Types[TFLOAT32]
}
if n.Class() == PAUTO && !n.Name.Addrtaken() {
// Split this complex up into two separate variables.
r := e.splitSlot(&name, ".real", 0, t)
i := e.splitSlot(&name, ".imag", t.Size(), t)
r := e.SplitSlot(&name, ".real", 0, t)
i := e.SplitSlot(&name, ".imag", t.Size(), t)
return r, i
}
// Return the two parts of the larger variable.
return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + s}
}
func (e *ssafn) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
var t *types.Type
if name.Type.IsSigned() {
t = types.Types[TINT32]
} else {
t = types.Types[TUINT32]
}
if n.Class() == PAUTO && !n.Name.Addrtaken() {
// Split this int64 up into two separate variables.
if thearch.LinkArch.ByteOrder == binary.BigEndian {
return e.splitSlot(&name, ".hi", 0, t), e.splitSlot(&name, ".lo", t.Size(), types.Types[TUINT32])
return e.SplitSlot(&name, ".hi", 0, t), e.SplitSlot(&name, ".lo", t.Size(), types.Types[TUINT32])
}
return e.splitSlot(&name, ".hi", t.Size(), t), e.splitSlot(&name, ".lo", 0, types.Types[TUINT32])
}
// Return the two parts of the larger variable.
if thearch.LinkArch.ByteOrder == binary.BigEndian {
return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off + 4}
}
return ssa.LocalSlot{N: n, Type: t, Off: name.Off + 4}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off}
return e.SplitSlot(&name, ".hi", t.Size(), t), e.SplitSlot(&name, ".lo", 0, types.Types[TUINT32])
}
func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot {
n := name.N.(*Node)
st := name.Type
ft := st.FieldType(i)
var offset int64
for f := 0; f < i; f++ {
offset += st.FieldType(f).Size()
}
if n.Class() == PAUTO && !n.Name.Addrtaken() {
// Note: the _ field may appear several times. But
// have no fear, identically-named but distinct Autos are
// ok, albeit maybe confusing for a debugger.
return e.splitSlot(&name, "."+st.FieldName(i), offset, ft)
}
return ssa.LocalSlot{N: n, Type: ft, Off: name.Off + st.FieldOff(i)}
return e.SplitSlot(&name, "."+st.FieldName(i), st.FieldOff(i), st.FieldType(i))
}
func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot {
@ -6986,19 +7042,23 @@ func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot {
e.Fatalf(n.Pos, "bad array size")
}
et := at.Elem()
if n.Class() == PAUTO && !n.Name.Addrtaken() {
return e.splitSlot(&name, "[0]", 0, et)
}
return ssa.LocalSlot{N: n, Type: et, Off: name.Off}
return e.SplitSlot(&name, "[0]", 0, et)
}
func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym {
return itabsym(it, offset)
}
// splitSlot returns a slot representing the data of parent starting at offset.
func (e *ssafn) splitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
s := &types.Sym{Name: parent.N.(*Node).Sym.Name + suffix, Pkg: localpkg}
// SplitSlot returns a slot representing the data of parent starting at offset.
func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
node := parent.N.(*Node)
if node.Class() != PAUTO || node.Name.Addrtaken() {
// addressed things and non-autos retain their parents (i.e., cannot truly be split)
return ssa.LocalSlot{N: node, Type: t, Off: parent.Off + offset}
}
s := &types.Sym{Name: node.Sym.Name + suffix, Pkg: localpkg}
n := &Node{
Name: new(Name),

View file

@ -96,7 +96,7 @@ func flusherrors() {
}
func hcrash() {
if Debug['h'] != 0 {
if Debug.h != 0 {
flusherrors()
if outfile != "" {
os.Remove(outfile)
@ -107,7 +107,7 @@ func hcrash() {
}
func linestr(pos src.XPos) string {
return Ctxt.OutermostPos(pos).Format(Debug['C'] == 0, Debug['L'] == 1)
return Ctxt.OutermostPos(pos).Format(Debug.C == 0, Debug.L == 1)
}
// lasterror keeps track of the most recently issued error.
@ -153,7 +153,7 @@ func yyerrorl(pos src.XPos, format string, args ...interface{}) {
hcrash()
nerrors++
if nsavederrors+nerrors >= 10 && Debug['e'] == 0 {
if nsavederrors+nerrors >= 10 && Debug.e == 0 {
flusherrors()
fmt.Printf("%v: too many errors\n", linestr(pos))
errorexit()
@ -175,7 +175,7 @@ func Warn(fmt_ string, args ...interface{}) {
func Warnl(line src.XPos, fmt_ string, args ...interface{}) {
adderr(line, fmt_, args...)
if Debug['m'] != 0 {
if Debug.m != 0 {
flusherrors()
}
}
@ -222,7 +222,7 @@ func hasUniquePos(n *Node) bool {
}
if !n.Pos.IsKnown() {
if Debug['K'] != 0 {
if Debug.K != 0 {
Warn("setlineno: unknown position (line 0)")
}
return false
@ -348,7 +348,7 @@ func newname(s *types.Sym) *Node {
return n
}
// newname returns a new ONAME Node associated with symbol s at position pos.
// newnamel returns a new ONAME Node associated with symbol s at position pos.
// The caller is responsible for setting n.Name.Curfn.
func newnamel(pos src.XPos, s *types.Sym) *Node {
if s == nil {
@ -1506,7 +1506,7 @@ func structargs(tl *types.Type, mustname bool) []*Node {
// method - M func (t T)(), a TFIELD type struct
// newnam - the eventual mangled name of this function
func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) {
if false && Debug['r'] != 0 {
if false && Debug.r != 0 {
fmt.Printf("genwrapper rcvrtype=%v method=%v newnam=%v\n", rcvr, method, newnam)
}
@ -1579,7 +1579,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) {
fn.Nbody.Append(call)
}
if false && Debug['r'] != 0 {
if false && Debug.r != 0 {
dumplist("genwrapper body", fn.Nbody)
}
@ -1720,7 +1720,7 @@ func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool
// the method does not exist for value types.
rcvr := tm.Type.Recv().Type
if rcvr.IsPtr() && !t0.IsPtr() && !followptr && !isifacemethod(tm.Type) {
if false && Debug['r'] != 0 {
if false && Debug.r != 0 {
yyerror("interface pointer mismatch")
}
@ -1854,8 +1854,10 @@ func isdirectiface(t *types.Type) bool {
}
switch t.Etype {
case TPTR,
TCHAN,
case TPTR:
// Pointers to notinheap types must be stored indirectly. See issue 42076.
return !t.Elem().NotInHeap()
case TCHAN,
TMAP,
TFUNC,
TUNSAFEPTR:

View file

@ -142,7 +142,7 @@ const (
_, _ // second nodeInitorder bit
_, nodeHasBreak
_, nodeNoInline // used internally by inliner to indicate that a function call should not be inlined; set for OCALLFUNC and OCALLMETH only
_, nodeImplicit // implicit OADDR or ODEREF; ++/-- statement represented as OASOP; or ANDNOT lowered to OAND
_, nodeImplicit // implicit OADDR or ODEREF; ++/-- statement represented as OASOP
_, nodeIsDDD // is the argument variadic
_, nodeDiag // already printed error about this
_, nodeColas // OAS resulting from :=
@ -247,7 +247,7 @@ func (n *Node) Val() Val {
// SetVal sets the Val for the node, which must not have been used with SetOpt.
func (n *Node) SetVal(v Val) {
if n.HasOpt() {
Debug['h'] = 1
Debug.h = 1
Dump("have Opt", n)
Fatalf("have Opt")
}
@ -270,7 +270,7 @@ func (n *Node) SetOpt(x interface{}) {
return
}
if n.HasVal() {
Debug['h'] = 1
Debug.h = 1
Dump("have Val", n)
Fatalf("have Val")
}
@ -460,14 +460,14 @@ type Param struct {
// x1 := xN.Defn
// x1.Innermost = xN.Outer
//
// We leave xN.Innermost set so that we can still get to the original
// We leave x1.Innermost set so that we can still get to the original
// variable quickly. Not shown here, but once we're
// done parsing a function and no longer need xN.Outer for the
// lexical x reference links as described above, closurebody
// lexical x reference links as described above, funcLit
// recomputes xN.Outer as the semantic x reference link tree,
// even filling in x in intermediate closures that might not
// have mentioned it along the way to inner closures that did.
// See closurebody for details.
// See funcLit for details.
//
// During the eventual compilation, then, for closure variables we have:
//
@ -480,11 +480,87 @@ type Param struct {
Innermost *Node
Outer *Node
// OTYPE
//
// TODO: Should Func pragmas also be stored on the Name?
Pragma PragmaFlag
Alias bool // node is alias for Ntype (only used when type-checking ODCLTYPE)
// OTYPE & ONAME //go:embed info,
// sharing storage to reduce gc.Param size.
// Extra is nil, or else *Extra is a *paramType or an *embedFileList.
Extra *interface{}
}
type paramType struct {
flag PragmaFlag
alias bool
}
type embedFileList []string
// Pragma returns the PragmaFlag for p, which must be for an OTYPE.
func (p *Param) Pragma() PragmaFlag {
if p.Extra == nil {
return 0
}
return (*p.Extra).(*paramType).flag
}
// SetPragma sets the PragmaFlag for p, which must be for an OTYPE.
func (p *Param) SetPragma(flag PragmaFlag) {
if p.Extra == nil {
if flag == 0 {
return
}
p.Extra = new(interface{})
*p.Extra = &paramType{flag: flag}
return
}
(*p.Extra).(*paramType).flag = flag
}
// Alias reports whether p, which must be for an OTYPE, is a type alias.
func (p *Param) Alias() bool {
if p.Extra == nil {
return false
}
t, ok := (*p.Extra).(*paramType)
if !ok {
return false
}
return t.alias
}
// SetAlias sets whether p, which must be for an OTYPE, is a type alias.
func (p *Param) SetAlias(alias bool) {
if p.Extra == nil {
if !alias {
return
}
p.Extra = new(interface{})
*p.Extra = &paramType{alias: alias}
return
}
(*p.Extra).(*paramType).alias = alias
}
// EmbedFiles returns the list of embedded files for p,
// which must be for an ONAME var.
func (p *Param) EmbedFiles() []string {
if p.Extra == nil {
return nil
}
return *(*p.Extra).(*embedFileList)
}
// SetEmbedFiles sets the list of embedded files for p,
// which must be for an ONAME var.
func (p *Param) SetEmbedFiles(list []string) {
if p.Extra == nil {
if len(list) == 0 {
return
}
f := embedFileList(list)
p.Extra = new(interface{})
*p.Extra = &f
return
}
*(*p.Extra).(*embedFileList) = list
}
// Functions
@ -555,7 +631,7 @@ type Func struct {
Ntype *Node // signature
Top int // top context (ctxCallee, etc)
Closure *Node // OCLOSURE <-> ODCLFUNC
Nname *Node
Nname *Node // The ONAME node associated with an ODCLFUNC (both have same Type)
lsym *obj.LSym
Inl *Inline
@ -697,7 +773,7 @@ const (
OCALLPART // Left.Right (method expression x.Method, not called)
OCAP // cap(Left)
OCLOSE // close(Left)
OCLOSURE // func Type { Body } (func literal)
OCLOSURE // func Type { Func.Closure.Nbody } (func literal)
OCOMPLIT // Right{List} (composite literal, not yet lowered to specific form)
OMAPLIT // Type{List} (composite literal, Type is map)
OSTRUCTLIT // Type{List} (composite literal, Type is struct)
@ -789,7 +865,12 @@ const (
// statements
OBLOCK // { List } (block of code)
OBREAK // break [Sym]
OCASE // case List: Nbody (List==nil means default)
// OCASE: case List: Nbody (List==nil means default)
// For OTYPESW, List is a OTYPE node for the specified type (or OLITERAL
// for nil), and, if a type-switch variable is specified, Rlist is an
// ONAME for the version of the type-switch variable with the specified
// type.
OCASE
OCONTINUE // continue [Sym]
ODEFER // defer Left (Left must be call)
OEMPTY // no-op (empty statement)
@ -813,14 +894,18 @@ const (
ORETURN // return List
OSELECT // select { List } (List is list of OCASE)
OSWITCH // switch Ninit; Left { List } (List is a list of OCASE)
OTYPESW // Left = Right.(type) (appears as .Left of OSWITCH)
// OTYPESW: Left := Right.(type) (appears as .Left of OSWITCH)
// Left is nil if there is no type-switch variable
OTYPESW
// types
OTCHAN // chan int
OTMAP // map[string]int
OTSTRUCT // struct{}
OTINTER // interface{}
OTFUNC // func()
// OTFUNC: func() - Left is receiver field, List is list of param fields, Rlist is
// list of result fields.
OTFUNC
OTARRAY // []int, [8]int, [N]int or [...]int
// misc

View file

@ -257,12 +257,12 @@ func typecheck(n *Node, top int) (res *Node) {
// are substituted.
cycle := cycleFor(n)
for _, n1 := range cycle {
if n1.Name != nil && !n1.Name.Param.Alias {
if n1.Name != nil && !n1.Name.Param.Alias() {
// Cycle is ok. But if n is an alias type and doesn't
// have a type yet, we have a recursive type declaration
// with aliases that we can't handle properly yet.
// Report an error rather than crashing later.
if n.Name != nil && n.Name.Param.Alias && n.Type == nil {
if n.Name != nil && n.Name.Param.Alias() && n.Type == nil {
lineno = n.Pos
Fatalf("cannot handle alias type declaration (issue #25838): %v", n)
}
@ -2065,11 +2065,6 @@ func typecheck1(n *Node, top int) (res *Node) {
n.Type = nil
return n
case OCASE:
ok |= ctxStmt
typecheckslice(n.List.Slice(), ctxExpr)
typecheckslice(n.Nbody.Slice(), ctxStmt)
case ODCLFUNC:
ok |= ctxStmt
typecheckfunc(n)
@ -2516,7 +2511,7 @@ func lookdot(n *Node, t *types.Type, dostrcmp int) *types.Field {
n.Left = nod(OADDR, n.Left, nil)
n.Left.SetImplicit(true)
n.Left = typecheck(n.Left, ctxType|ctxExpr)
} else if tt.IsPtr() && !rcvr.IsPtr() && types.Identical(tt.Elem(), rcvr) {
} else if tt.IsPtr() && (!rcvr.IsPtr() || rcvr.IsPtr() && rcvr.Elem().NotInHeap()) && types.Identical(tt.Elem(), rcvr) {
n.Left = nod(ODEREF, n.Left, nil)
n.Left.SetImplicit(true)
n.Left = typecheck(n.Left, ctxType|ctxExpr)
@ -3504,7 +3499,7 @@ func setUnderlying(t, underlying *types.Type) {
}
// Propagate go:notinheap pragma from the Name to the Type.
if n.Name != nil && n.Name.Param != nil && n.Name.Param.Pragma&NotInHeap != 0 {
if n.Name != nil && n.Name.Param != nil && n.Name.Param.Pragma()&NotInHeap != 0 {
t.SetNotInHeap(true)
}
@ -3676,7 +3671,7 @@ func typecheckdef(n *Node) {
n.Name.Defn = typecheck(n.Name.Defn, ctxStmt) // fills in n.Type
case OTYPE:
if p := n.Name.Param; p.Alias {
if p := n.Name.Param; p.Alias() {
// Type alias declaration: Simply use the rhs type - no need
// to create a new type.
// If we have a syntax error, p.Ntype may be nil.

View file

@ -21,7 +21,7 @@ const zeroValSize = 1024 // must match value of runtime/map.go:maxZero
func walk(fn *Node) {
Curfn = fn
if Debug['W'] != 0 {
if Debug.W != 0 {
s := fmt.Sprintf("\nbefore walk %v", Curfn.Func.Nname.Sym)
dumplist(s, Curfn.Nbody)
}
@ -63,14 +63,14 @@ func walk(fn *Node) {
return
}
walkstmtlist(Curfn.Nbody.Slice())
if Debug['W'] != 0 {
if Debug.W != 0 {
s := fmt.Sprintf("after walk %v", Curfn.Func.Nname.Sym)
dumplist(s, Curfn.Nbody)
}
zeroResults()
heapmoves()
if Debug['W'] != 0 && Curfn.Func.Enter.Len() > 0 {
if Debug.W != 0 && Curfn.Func.Enter.Len() > 0 {
s := fmt.Sprintf("enter %v", Curfn.Func.Nname.Sym)
dumplist(s, Curfn.Func.Enter)
}
@ -436,7 +436,7 @@ func walkexpr(n *Node, init *Nodes) *Node {
lno := setlineno(n)
if Debug['w'] > 1 {
if Debug.w > 1 {
Dump("before walk expr", n)
}
@ -474,7 +474,7 @@ opswitch:
ODEREF, OSPTR, OITAB, OIDATA, OADDR:
n.Left = walkexpr(n.Left, init)
case OEFACE, OAND, OSUB, OMUL, OADD, OOR, OXOR, OLSH, ORSH:
case OEFACE, OAND, OANDNOT, OSUB, OMUL, OADD, OOR, OXOR, OLSH, ORSH:
n.Left = walkexpr(n.Left, init)
n.Right = walkexpr(n.Right, init)
@ -965,14 +965,6 @@ opswitch:
fn := basicnames[param] + "to" + basicnames[result]
n = conv(mkcall(fn, types.Types[result], init, conv(n.Left, types.Types[param])), n.Type)
case OANDNOT:
n.Left = walkexpr(n.Left, init)
n.Op = OAND
n.SetImplicit(true) // for walkCheckPtrArithmetic
n.Right = nod(OBITNOT, n.Right, nil)
n.Right = typecheck(n.Right, ctxExpr)
n.Right = walkexpr(n.Right, init)
case ODIV, OMOD:
n.Left = walkexpr(n.Left, init)
n.Right = walkexpr(n.Right, init)
@ -997,7 +989,7 @@ opswitch:
// runtime calls late in SSA processing.
if Widthreg < 8 && (et == TINT64 || et == TUINT64) {
if n.Right.Op == OLITERAL {
// Leave div/mod by constant powers of 2.
// Leave div/mod by constant powers of 2 or small 16-bit constants.
// The SSA backend will handle those.
switch et {
case TINT64:
@ -1010,6 +1002,9 @@ opswitch:
}
case TUINT64:
c := uint64(n.Right.Int64Val())
if c < 1<<16 {
break opswitch
}
if c != 0 && c&(c-1) == 0 {
break opswitch
}
@ -1049,7 +1044,7 @@ opswitch:
}
if t.IsArray() {
n.SetBounded(bounded(r, t.NumElem()))
if Debug['m'] != 0 && n.Bounded() && !Isconst(n.Right, CTINT) {
if Debug.m != 0 && n.Bounded() && !Isconst(n.Right, CTINT) {
Warn("index bounds check elided")
}
if smallintconst(n.Right) && !n.Bounded() {
@ -1057,7 +1052,7 @@ opswitch:
}
} else if Isconst(n.Left, CTSTR) {
n.SetBounded(bounded(r, int64(len(n.Left.StringVal()))))
if Debug['m'] != 0 && n.Bounded() && !Isconst(n.Right, CTINT) {
if Debug.m != 0 && n.Bounded() && !Isconst(n.Right, CTINT) {
Warn("index bounds check elided")
}
if smallintconst(n.Right) && !n.Bounded() {
@ -1599,7 +1594,7 @@ opswitch:
updateHasCall(n)
if Debug['w'] != 0 && n != nil {
if Debug.w != 0 && n != nil {
Dump("after walk expr", n)
}
@ -1965,7 +1960,17 @@ func walkprint(nn *Node, init *Nodes) *Node {
on = syslook("printiface")
}
on = substArgTypes(on, n.Type) // any-1
case TPTR, TCHAN, TMAP, TFUNC, TUNSAFEPTR:
case TPTR:
if n.Type.Elem().NotInHeap() {
on = syslook("printuintptr")
n = nod(OCONV, n, nil)
n.Type = types.Types[TUNSAFEPTR]
n = nod(OCONV, n, nil)
n.Type = types.Types[TUINTPTR]
break
}
fallthrough
case TCHAN, TMAP, TFUNC, TUNSAFEPTR:
on = syslook("printpointer")
on = substArgTypes(on, n.Type) // any-1
case TSLICE:
@ -2819,7 +2824,7 @@ func appendslice(n *Node, init *Nodes) *Node {
// isAppendOfMake reports whether n is of the form append(x , make([]T, y)...).
// isAppendOfMake assumes n has already been typechecked.
func isAppendOfMake(n *Node) bool {
if Debug['N'] != 0 || instrumenting {
if Debug.N != 0 || instrumenting {
return false
}
@ -3609,14 +3614,20 @@ func bounded(n *Node, max int64) bool {
}
switch n.Op {
case OAND:
case OAND, OANDNOT:
v := int64(-1)
if smallintconst(n.Left) {
switch {
case smallintconst(n.Left):
v = n.Left.Int64Val()
} else if smallintconst(n.Right) {
case smallintconst(n.Right):
v = n.Right.Int64Val()
if n.Op == OANDNOT {
v = ^v
if !sign {
v &= 1<<uint(bits) - 1
}
}
}
if 0 <= v && v < max {
return true
}
@ -3976,7 +3987,7 @@ func canMergeLoads() bool {
// isRuneCount reports whether n is of the form len([]rune(string)).
// These are optimized into a call to runtime.countrunes.
func isRuneCount(n *Node) bool {
return Debug['N'] == 0 && !instrumenting && n.Op == OLEN && n.Left.Op == OSTR2RUNES
return Debug.N == 0 && !instrumenting && n.Op == OLEN && n.Left.Op == OSTR2RUNES
}
func walkCheckPtrAlignment(n *Node, init *Nodes, count *Node) *Node {
@ -4045,12 +4056,8 @@ func walkCheckPtrArithmetic(n *Node, init *Nodes) *Node {
case OADD:
walk(n.Left)
walk(n.Right)
case OSUB:
case OSUB, OANDNOT:
walk(n.Left)
case OAND:
if n.Implicit() { // was OANDNOT
walk(n.Left)
}
case OCONVNOP:
if n.Left.Type.IsUnsafePtr() {
n.Left = cheapexpr(n.Left, init)

View file

@ -213,15 +213,15 @@ func s15a8(x *[15]int64) [15]int64 {
`"relatedInformation":[`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: flow: y = z:"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from y := z (assign-pair)"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: flow: ~r1 = y:"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: flow: ~R0 = y:"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":11},"end":{"line":4,"character":11}}},"message":"inlineLoc"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from y.b (dot of pointer)"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":11},"end":{"line":4,"character":11}}},"message":"inlineLoc"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from \u0026y.b (address-of)"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":9},"end":{"line":4,"character":9}}},"message":"inlineLoc"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from ~r1 = \u003cN\u003e (assign-pair)"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: flow: ~r2 = ~r1:"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: from return (*int)(~r1) (return)"}]}`)
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from ~R0 = \u003cN\u003e (assign-pair)"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: flow: ~r2 = ~R0:"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: from return (*int)(~R0) (return)"}]}`)
})
}

View file

@ -166,34 +166,46 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p2.To.Reg = v.Reg1()
case ssa.OpPPC64LoweredAtomicAnd8,
ssa.OpPPC64LoweredAtomicOr8:
ssa.OpPPC64LoweredAtomicAnd32,
ssa.OpPPC64LoweredAtomicOr8,
ssa.OpPPC64LoweredAtomicOr32:
// LWSYNC
// LBAR (Rarg0), Rtmp
// LBAR/LWAR (Rarg0), Rtmp
// AND/OR Rarg1, Rtmp
// STBCCC Rtmp, (Rarg0)
// STBCCC/STWCCC Rtmp, (Rarg0)
// BNE -3(PC)
ld := ppc64.ALBAR
st := ppc64.ASTBCCC
if v.Op == ssa.OpPPC64LoweredAtomicAnd32 || v.Op == ssa.OpPPC64LoweredAtomicOr32 {
ld = ppc64.ALWAR
st = ppc64.ASTWCCC
}
r0 := v.Args[0].Reg()
r1 := v.Args[1].Reg()
// LWSYNC - Assuming shared data not write-through-required nor
// caching-inhibited. See Appendix B.2.2.2 in the ISA 2.07b.
plwsync := s.Prog(ppc64.ALWSYNC)
plwsync.To.Type = obj.TYPE_NONE
p := s.Prog(ppc64.ALBAR)
// LBAR or LWAR
p := s.Prog(ld)
p.From.Type = obj.TYPE_MEM
p.From.Reg = r0
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REGTMP
// AND/OR reg1,out
p1 := s.Prog(v.Op.Asm())
p1.From.Type = obj.TYPE_REG
p1.From.Reg = r1
p1.To.Type = obj.TYPE_REG
p1.To.Reg = ppc64.REGTMP
p2 := s.Prog(ppc64.ASTBCCC)
// STBCCC or STWCCC
p2 := s.Prog(st)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = ppc64.REGTMP
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = r0
p2.RegTo2 = ppc64.REGTMP
// BNE retry
p3 := s.Prog(ppc64.ABNE)
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
@ -637,6 +649,24 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
// Auxint holds encoded rotate + mask
case ssa.OpPPC64RLWINM, ssa.OpPPC64RLWMI:
rot, _, _, mask := ssa.DecodePPC64RotateMask(v.AuxInt)
p := s.Prog(v.Op.Asm())
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()}
p.Reg = v.Args[0].Reg()
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(rot)}
p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: int64(mask)})
// Auxint holds mask
case ssa.OpPPC64RLWNM:
_, _, _, mask := ssa.DecodePPC64RotateMask(v.AuxInt)
p := s.Prog(v.Op.Asm())
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()}
p.Reg = v.Args[0].Reg()
p.From = obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[1].Reg()}
p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: int64(mask)})
case ssa.OpPPC64MADDLD:
r := v.Reg()
r1 := v.Args[0].Reg()

View file

@ -25,7 +25,15 @@ func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
return p
}
// TODO(jsing): Add a duff zero implementation for medium sized ranges.
if cnt <= int64(128*gc.Widthptr) {
p = pp.Appendpp(p, riscv.AADDI, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_A0, 0)
p.Reg = riscv.REG_SP
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero
p.To.Offset = 8 * (128 - cnt/int64(gc.Widthptr))
return p
}
// Loop, zeroing pointer width bytes at a time.
// ADD $(off), SP, T0

View file

@ -190,7 +190,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// input args need no code
case ssa.OpPhi:
gc.CheckLoweredPhi(v)
case ssa.OpCopy, ssa.OpRISCV64MOVconvert:
case ssa.OpCopy, ssa.OpRISCV64MOVconvert, ssa.OpRISCV64MOVDreg:
if v.Type.IsMemory() {
return
}
@ -208,6 +208,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = rs
p.To.Type = obj.TYPE_REG
p.To.Reg = rd
case ssa.OpRISCV64MOVDnop:
if v.Reg() != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
// nothing to do
case ssa.OpLoadReg:
if v.Type.IsFlags() {
v.Fatalf("load flags not implemented: %v", v.LongString())
@ -228,6 +233,37 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
gc.AddrAuto(&p.To, v)
case ssa.OpSP, ssa.OpSB, ssa.OpGetG:
// nothing to do
case ssa.OpRISCV64MOVBreg, ssa.OpRISCV64MOVHreg, ssa.OpRISCV64MOVWreg,
ssa.OpRISCV64MOVBUreg, ssa.OpRISCV64MOVHUreg, ssa.OpRISCV64MOVWUreg:
a := v.Args[0]
for a.Op == ssa.OpCopy || a.Op == ssa.OpRISCV64MOVDreg {
a = a.Args[0]
}
as := v.Op.Asm()
rs := v.Args[0].Reg()
rd := v.Reg()
if a.Op == ssa.OpLoadReg {
t := a.Type
switch {
case v.Op == ssa.OpRISCV64MOVBreg && t.Size() == 1 && t.IsSigned(),
v.Op == ssa.OpRISCV64MOVHreg && t.Size() == 2 && t.IsSigned(),
v.Op == ssa.OpRISCV64MOVWreg && t.Size() == 4 && t.IsSigned(),
v.Op == ssa.OpRISCV64MOVBUreg && t.Size() == 1 && !t.IsSigned(),
v.Op == ssa.OpRISCV64MOVHUreg && t.Size() == 2 && !t.IsSigned(),
v.Op == ssa.OpRISCV64MOVWUreg && t.Size() == 4 && !t.IsSigned():
// arg is a proper-typed load and already sign/zero-extended
if rs == rd {
return
}
as = riscv.AMOV
default:
}
}
p := s.Prog(as)
p.From.Type = obj.TYPE_REG
p.From.Reg = rs
p.To.Type = obj.TYPE_REG
p.To.Reg = rd
case ssa.OpRISCV64ADD, ssa.OpRISCV64SUB, ssa.OpRISCV64SUBW, ssa.OpRISCV64XOR, ssa.OpRISCV64OR, ssa.OpRISCV64AND,
ssa.OpRISCV64SLL, ssa.OpRISCV64SRA, ssa.OpRISCV64SRL,
ssa.OpRISCV64SLT, ssa.OpRISCV64SLTU, ssa.OpRISCV64MUL, ssa.OpRISCV64MULW, ssa.OpRISCV64MULH,
@ -572,6 +608,20 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpRISCV64DUFFZERO:
p := s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero
p.To.Offset = v.AuxInt
case ssa.OpRISCV64DUFFCOPY:
p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffcopy
p.To.Offset = v.AuxInt
default:
v.Fatalf("Unhandled op %v", v.Op)
}

View file

@ -182,11 +182,23 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
i := v.Aux.(s390x.RotateParams)
p := s.Prog(v.Op.Asm())
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(i.Start)}
p.RestArgs = []obj.Addr{
p.SetRestArgs([]obj.Addr{
{Type: obj.TYPE_CONST, Offset: int64(i.End)},
{Type: obj.TYPE_CONST, Offset: int64(i.Amount)},
{Type: obj.TYPE_REG, Reg: r2},
}
})
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: r1}
case ssa.OpS390XRISBGZ:
r1 := v.Reg()
r2 := v.Args[0].Reg()
i := v.Aux.(s390x.RotateParams)
p := s.Prog(v.Op.Asm())
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(i.Start)}
p.SetRestArgs([]obj.Addr{
{Type: obj.TYPE_CONST, Offset: int64(i.End)},
{Type: obj.TYPE_CONST, Offset: int64(i.Amount)},
{Type: obj.TYPE_REG, Reg: r2},
})
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: r1}
case ssa.OpS390XADD, ssa.OpS390XADDW,
ssa.OpS390XSUB, ssa.OpS390XSUBW,
@ -360,7 +372,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpS390XSLDconst, ssa.OpS390XSLWconst,
ssa.OpS390XSRDconst, ssa.OpS390XSRWconst,
ssa.OpS390XSRADconst, ssa.OpS390XSRAWconst,
ssa.OpS390XRLLGconst, ssa.OpS390XRLLconst:
ssa.OpS390XRLLconst:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
@ -761,6 +773,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
case ssa.OpS390XLAN, ssa.OpS390XLAO:
// LA(N|O) Ry, TMP, 0(Rx)
op := s.Prog(v.Op.Asm())
op.From.Type = obj.TYPE_REG
op.From.Reg = v.Args[1].Reg()
op.Reg = s390x.REGTMP
op.To.Type = obj.TYPE_MEM
op.To.Reg = v.Args[0].Reg()
case ssa.OpS390XLANfloor, ssa.OpS390XLAOfloor:
r := v.Args[0].Reg() // clobbered, assumed R1 in comments
@ -905,7 +925,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(s390x.NotEqual & s390x.NotUnordered) // unordered is not possible
p.Reg = s390x.REG_R3
p.RestArgs = []obj.Addr{{Type: obj.TYPE_CONST, Offset: 0}}
p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: 0})
if b.Succs[0].Block() != next {
s.Br(s390x.ABR, b.Succs[0].Block())
}
@ -948,17 +968,17 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(mask & s390x.NotUnordered) // unordered is not possible
p.Reg = b.Controls[0].Reg()
p.RestArgs = []obj.Addr{{Type: obj.TYPE_REG, Reg: b.Controls[1].Reg()}}
p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: b.Controls[1].Reg()})
case ssa.BlockS390XCGIJ, ssa.BlockS390XCIJ:
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(mask & s390x.NotUnordered) // unordered is not possible
p.Reg = b.Controls[0].Reg()
p.RestArgs = []obj.Addr{{Type: obj.TYPE_CONST, Offset: int64(int8(b.AuxInt))}}
p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: int64(int8(b.AuxInt))})
case ssa.BlockS390XCLGIJ, ssa.BlockS390XCLIJ:
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(mask & s390x.NotUnordered) // unordered is not possible
p.Reg = b.Controls[0].Reg()
p.RestArgs = []obj.Addr{{Type: obj.TYPE_CONST, Offset: int64(uint8(b.AuxInt))}}
p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: int64(uint8(b.AuxInt))})
default:
b.Fatalf("branch not implemented: %s", b.LongString())
}

View file

@ -59,22 +59,22 @@ func addressingModes(f *Func) {
v.AuxInt += p.AuxInt
case [2]auxType{auxSymValAndOff, auxInt32}:
vo := ValAndOff(v.AuxInt)
if !vo.canAdd(p.AuxInt) {
if !vo.canAdd64(p.AuxInt) {
continue
}
v.AuxInt = vo.add(p.AuxInt)
v.AuxInt = int64(vo.addOffset64(p.AuxInt))
case [2]auxType{auxSymValAndOff, auxSymOff}:
vo := ValAndOff(v.AuxInt)
if v.Aux != nil && p.Aux != nil {
continue
}
if !vo.canAdd(p.AuxInt) {
if !vo.canAdd64(p.AuxInt) {
continue
}
if p.Aux != nil {
v.Aux = p.Aux
}
v.AuxInt = vo.add(p.AuxInt)
v.AuxInt = int64(vo.addOffset64(p.AuxInt))
case [2]auxType{auxSymOff, auxNone}:
// nothing to do
case [2]auxType{auxSymValAndOff, auxNone}:

View file

@ -35,7 +35,7 @@ func branchelim(f *Func) {
for _, b := range f.Blocks {
for _, v := range b.Values {
switch v.Op {
case OpLoad, OpAtomicLoad8, OpAtomicLoad32, OpAtomicLoad64, OpAtomicLoadPtr, OpAtomicLoadAcq32:
case OpLoad, OpAtomicLoad8, OpAtomicLoad32, OpAtomicLoad64, OpAtomicLoadPtr, OpAtomicLoadAcq32, OpAtomicLoadAcq64:
loadAddr.add(v.Args[0].ID)
case OpMove:
loadAddr.add(v.Args[1].ID)

View file

@ -304,37 +304,39 @@ commas. For example:
`
}
if phase == "check" && flag == "on" {
if phase == "check" {
switch flag {
case "on":
checkEnabled = val != 0
debugPoset = checkEnabled // also turn on advanced self-checking in prove's datastructure
return ""
}
if phase == "check" && flag == "off" {
case "off":
checkEnabled = val == 0
debugPoset = checkEnabled
return ""
}
if phase == "check" && flag == "seed" {
case "seed":
checkEnabled = true
checkRandSeed = val
debugPoset = checkEnabled
return ""
}
}
alltime := false
allmem := false
alldump := false
if phase == "all" {
if flag == "time" {
switch flag {
case "time":
alltime = val != 0
} else if flag == "mem" {
case "mem":
allmem = val != 0
} else if flag == "dump" {
case "dump":
alldump = val != 0
if alldump {
BuildDump = valString
}
} else {
default:
return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option", flag, phase)
}
}
@ -429,7 +431,7 @@ var passes = [...]pass{
{name: "early copyelim", fn: copyelim},
{name: "early deadcode", fn: deadcode}, // remove generated dead code to avoid doing pointless work during opt
{name: "short circuit", fn: shortcircuit},
{name: "decompose args", fn: decomposeArgs, required: true},
{name: "decompose args", fn: decomposeArgs, required: !go116lateCallExpansion, disabled: go116lateCallExpansion}, // handled by late call lowering
{name: "decompose user", fn: decomposeUser, required: true},
{name: "pre-opt deadcode", fn: deadcode},
{name: "opt", fn: opt, required: true}, // NB: some generic rules know the name of the opt pass. TODO: split required rules and optimizing rules
@ -441,8 +443,8 @@ var passes = [...]pass{
{name: "nilcheckelim", fn: nilcheckelim},
{name: "prove", fn: prove},
{name: "early fuse", fn: fuseEarly},
{name: "expand calls", fn: expandCalls, required: true},
{name: "decompose builtin", fn: decomposeBuiltIn, required: true},
{name: "expand calls", fn: expandCalls, required: true},
{name: "softfloat", fn: softfloat, required: true},
{name: "late opt", fn: opt, required: true}, // TODO: split required rules and optimizing rules
{name: "dead auto elim", fn: elimDeadAutosGeneric},

View file

@ -149,6 +149,7 @@ type Frontend interface {
SplitStruct(LocalSlot, int) LocalSlot
SplitArray(LocalSlot) LocalSlot // array must be length 1
SplitInt64(LocalSlot) (LocalSlot, LocalSlot) // returns (hi, lo)
SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot
// DerefItab dereferences an itab function
// entry, given the symbol of the itab and
@ -198,9 +199,9 @@ const (
const go116lateCallExpansion = true
// LateCallExpansionEnabledWithin returns true if late call expansion should be tested
// within compilation of a function/method triggered by GOSSAHASH (defaults to "yes").
// within compilation of a function/method.
func LateCallExpansionEnabledWithin(f *Func) bool {
return go116lateCallExpansion && f.DebugTest // Currently set up for GOSSAHASH bug searches
return go116lateCallExpansion
}
// NewConfig returns a new configuration object for the given architecture.

View file

@ -6,6 +6,7 @@ package ssa
import (
"cmd/compile/internal/types"
"sort"
)
// decompose converts phi ops on compound builtin types into phi
@ -31,77 +32,79 @@ func decomposeBuiltIn(f *Func) {
}
// Split up named values into their components.
// accumulate old names for aggregates (that are decomposed) in toDelete for efficient bulk deletion,
// accumulate new LocalSlots in newNames for addition after the iteration. This decomposition is for
// builtin types with leaf components, and thus there is no need to reprocess the newly create LocalSlots.
var toDelete []namedVal
var newNames []LocalSlot
for _, name := range f.Names {
for i, name := range f.Names {
t := name.Type
switch {
case t.IsInteger() && t.Size() > f.Config.RegSize:
hiName, loName := f.fe.SplitInt64(name)
newNames = append(newNames, hiName, loName)
for _, v := range f.NamedValues[name] {
for j, v := range f.NamedValues[name] {
if v.Op != OpInt64Make {
continue
}
f.NamedValues[hiName] = append(f.NamedValues[hiName], v.Args[0])
f.NamedValues[loName] = append(f.NamedValues[loName], v.Args[1])
toDelete = append(toDelete, namedVal{i, j})
}
delete(f.NamedValues, name)
case t.IsComplex():
rName, iName := f.fe.SplitComplex(name)
newNames = append(newNames, rName, iName)
for _, v := range f.NamedValues[name] {
for j, v := range f.NamedValues[name] {
if v.Op != OpComplexMake {
continue
}
f.NamedValues[rName] = append(f.NamedValues[rName], v.Args[0])
f.NamedValues[iName] = append(f.NamedValues[iName], v.Args[1])
toDelete = append(toDelete, namedVal{i, j})
}
delete(f.NamedValues, name)
case t.IsString():
ptrName, lenName := f.fe.SplitString(name)
newNames = append(newNames, ptrName, lenName)
for _, v := range f.NamedValues[name] {
for j, v := range f.NamedValues[name] {
if v.Op != OpStringMake {
continue
}
f.NamedValues[ptrName] = append(f.NamedValues[ptrName], v.Args[0])
f.NamedValues[lenName] = append(f.NamedValues[lenName], v.Args[1])
toDelete = append(toDelete, namedVal{i, j})
}
delete(f.NamedValues, name)
case t.IsSlice():
ptrName, lenName, capName := f.fe.SplitSlice(name)
newNames = append(newNames, ptrName, lenName, capName)
for _, v := range f.NamedValues[name] {
for j, v := range f.NamedValues[name] {
if v.Op != OpSliceMake {
continue
}
f.NamedValues[ptrName] = append(f.NamedValues[ptrName], v.Args[0])
f.NamedValues[lenName] = append(f.NamedValues[lenName], v.Args[1])
f.NamedValues[capName] = append(f.NamedValues[capName], v.Args[2])
toDelete = append(toDelete, namedVal{i, j})
}
delete(f.NamedValues, name)
case t.IsInterface():
typeName, dataName := f.fe.SplitInterface(name)
newNames = append(newNames, typeName, dataName)
for _, v := range f.NamedValues[name] {
for j, v := range f.NamedValues[name] {
if v.Op != OpIMake {
continue
}
f.NamedValues[typeName] = append(f.NamedValues[typeName], v.Args[0])
f.NamedValues[dataName] = append(f.NamedValues[dataName], v.Args[1])
toDelete = append(toDelete, namedVal{i, j})
}
delete(f.NamedValues, name)
case t.IsFloat():
// floats are never decomposed, even ones bigger than RegSize
newNames = append(newNames, name)
case t.Size() > f.Config.RegSize:
f.Fatalf("undecomposed named type %s %v", name, t)
default:
newNames = append(newNames, name)
}
}
f.Names = newNames
deleteNamedVals(f, toDelete)
f.Names = append(f.Names, newNames...)
}
func decomposeBuiltInPhi(v *Value) {
@ -263,14 +266,20 @@ func decomposeUserArrayInto(f *Func, name LocalSlot, slots []LocalSlot) []LocalS
f.Fatalf("array not of size 1")
}
elemName := f.fe.SplitArray(name)
var keep []*Value
for _, v := range f.NamedValues[name] {
if v.Op != OpArrayMake1 {
keep = append(keep, v)
continue
}
f.NamedValues[elemName] = append(f.NamedValues[elemName], v.Args[0])
}
if len(keep) == 0 {
// delete the name for the array as a whole
delete(f.NamedValues, name)
} else {
f.NamedValues[name] = keep
}
if t.Elem().IsArray() {
return decomposeUserArrayInto(f, elemName, slots)
@ -300,17 +309,23 @@ func decomposeUserStructInto(f *Func, name LocalSlot, slots []LocalSlot) []Local
}
makeOp := StructMakeOp(n)
var keep []*Value
// create named values for each struct field
for _, v := range f.NamedValues[name] {
if v.Op != makeOp {
keep = append(keep, v)
continue
}
for i := 0; i < len(fnames); i++ {
f.NamedValues[fnames[i]] = append(f.NamedValues[fnames[i]], v.Args[i])
}
}
// remove the name of the struct as a whole
if len(keep) == 0 {
// delete the name for the struct as a whole
delete(f.NamedValues, name)
} else {
f.NamedValues[name] = keep
}
// now that this f.NamedValues contains values for the struct
// fields, recurse into nested structs
@ -400,3 +415,35 @@ func StructMakeOp(nf int) Op {
}
panic("too many fields in an SSAable struct")
}
type namedVal struct {
locIndex, valIndex int // f.NamedValues[f.Names[locIndex]][valIndex] = key
}
// deleteNamedVals removes particular values with debugger names from f's naming data structures
func deleteNamedVals(f *Func, toDelete []namedVal) {
// Arrange to delete from larger indices to smaller, to ensure swap-with-end deletion does not invalid pending indices.
sort.Slice(toDelete, func(i, j int) bool {
if toDelete[i].locIndex != toDelete[j].locIndex {
return toDelete[i].locIndex > toDelete[j].locIndex
}
return toDelete[i].valIndex > toDelete[j].valIndex
})
// Get rid of obsolete names
for _, d := range toDelete {
loc := f.Names[d.locIndex]
vals := f.NamedValues[loc]
l := len(vals) - 1
if l > 0 {
vals[d.valIndex] = vals[l]
f.NamedValues[loc] = vals[:l]
} else {
delete(f.NamedValues, loc)
l = len(f.Names) - 1
f.Names[d.locIndex] = f.Names[l]
f.Names = f.Names[:l]
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -125,6 +125,10 @@ func (d DummyFrontend) SplitStruct(s LocalSlot, i int) LocalSlot {
func (d DummyFrontend) SplitArray(s LocalSlot) LocalSlot {
return LocalSlot{N: s.N, Type: s.Type.Elem(), Off: s.Off}
}
func (d DummyFrontend) SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot {
return LocalSlot{N: parent.N, Type: t, Off: offset}
}
func (DummyFrontend) Line(_ src.XPos) string {
return "unknown.go:0"
}

View file

@ -191,11 +191,6 @@ func flagalloc(f *Func) {
b.FlagsLiveAtEnd = end[b.ID] != nil
}
const go115flagallocdeadcode = true
if !go115flagallocdeadcode {
return
}
// Remove any now-dead values.
// The number of values to remove is likely small,
// and removing them requires processing all values in a block,

View file

@ -310,7 +310,7 @@
(Const32 ...) => (MOVLconst ...)
(Const(32|64)F ...) => (MOVS(S|D)const ...)
(ConstNil) => (MOVLconst [0])
(ConstBool [c]) => (MOVLconst [int32(b2i(c))])
(ConstBool [c]) => (MOVLconst [b2i32(c)])
// Lowering calls
(StaticCall ...) => (CALLstatic ...)
@ -640,31 +640,31 @@
// it compiles to a thunk call).
(MOV(L|W|B|SS|SD|BLSX|WLSX)load [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
&& (base.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOV(L|W|B|SS|SD|BLSX|WLSX)load [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOV(L|W|B|SS|SD|BLSX|WLSX)load [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOV(L|W|B|SS|SD)store [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
&& (base.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOV(L|W|B|SS|SD)store [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
(MOV(L|W|B|SS|SD)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOV(L|W|B)storeconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && sc.canAdd32(off)
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOV(L|W|B)storeconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
(MOV(L|W|B)storeconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
((ADD|SUB|MUL|AND|OR|XOR)Lload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
((ADD|SUB|MUL|AND|OR|XOR)Lload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
((ADD|SUB|MUL|AND|OR|XOR)Lload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
((ADD|SUB|MUL|DIV)SSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
((ADD|SUB|MUL|DIV)SSload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
((ADD|SUB|MUL|DIV)SSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
((ADD|SUB|MUL|DIV)SDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
((ADD|SUB|MUL|DIV)SDload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
((ADD|SUB|MUL|DIV)SDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
&& valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
((ADD|AND|OR|XOR)Lconstmodify [valoff1.addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
((ADD|AND|OR|XOR)Lconstmodify [valoff1.addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
// Merge load/store to op
((ADD|AND|OR|XOR|SUB|MUL)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|AND|OR|XOR|SUB|MUL)Lload x [off] {sym} ptr mem)
@ -679,37 +679,37 @@
// fold LEALs together
(LEAL [off1] {sym1} (LEAL [off2] {sym2} x)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(LEAL [off1+off2] {mergeSymTyped(sym1,sym2)} x)
(LEAL [off1+off2] {mergeSym(sym1,sym2)} x)
// LEAL into LEAL1
(LEAL1 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
(LEAL1 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
// LEAL1 into LEAL
(LEAL [off1] {sym1} (LEAL1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(LEAL1 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
// LEAL into LEAL[248]
(LEAL2 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
(LEAL2 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
(LEAL4 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
(LEAL4 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
(LEAL8 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
(LEAL8 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
// LEAL[248] into LEAL
(LEAL [off1] {sym1} (LEAL2 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(LEAL2 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
(LEAL [off1] {sym1} (LEAL4 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(LEAL4 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
(LEAL [off1] {sym1} (LEAL8 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(LEAL8 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
// LEAL[1248] into LEAL[1248]. Only some such merges are possible.
(LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} y y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(LEAL2 [off1+off2] {mergeSymTyped(sym1, sym2)} x y)
(LEAL2 [off1+off2] {mergeSym(sym1, sym2)} x y)
(LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(LEAL2 [off1+off2] {mergeSymTyped(sym1, sym2)} y x)
(LEAL2 [off1+off2] {mergeSym(sym1, sym2)} y x)
(LEAL2 [off1] {sym} x (LEAL1 [off2] {nil} y y)) && is32Bit(int64(off1)+2*int64(off2)) =>
(LEAL4 [off1+2*off2] {sym} x y)
(LEAL4 [off1] {sym} x (LEAL1 [off2] {nil} y y)) && is32Bit(int64(off1)+4*int64(off2)) =>
@ -993,49 +993,49 @@
&& x.Uses == 1
&& a.Off() + 1 == c.Off()
&& clobber(x)
=> (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), int32(a.Off()))] {s} p mem)
=> (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p mem)
(MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem))
&& x.Uses == 1
&& a.Off() + 1 == c.Off()
&& clobber(x)
=> (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), int32(a.Off()))] {s} p mem)
=> (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p mem)
(MOVBstoreconst [c] {s} p1 x:(MOVBstoreconst [a] {s} p0 mem))
&& x.Uses == 1
&& a.Off() == c.Off()
&& sequentialAddresses(p0, p1, 1)
&& clobber(x)
=> (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), int32(a.Off()))] {s} p0 mem)
=> (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p0 mem)
(MOVBstoreconst [a] {s} p0 x:(MOVBstoreconst [c] {s} p1 mem))
&& x.Uses == 1
&& a.Off() == c.Off()
&& sequentialAddresses(p0, p1, 1)
&& clobber(x)
=> (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), int32(a.Off()))] {s} p0 mem)
=> (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p0 mem)
(MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
&& x.Uses == 1
&& a.Off() + 2 == c.Off()
&& clobber(x)
=> (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), int32(a.Off()))] {s} p mem)
=> (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p mem)
(MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem))
&& x.Uses == 1
&& ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
&& clobber(x)
=> (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), int32(a.Off()))] {s} p mem)
=> (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p mem)
(MOVWstoreconst [c] {s} p1 x:(MOVWstoreconst [a] {s} p0 mem))
&& x.Uses == 1
&& a.Off() == c.Off()
&& sequentialAddresses(p0, p1, 2)
&& clobber(x)
=> (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), int32(a.Off()))] {s} p0 mem)
=> (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p0 mem)
(MOVWstoreconst [a] {s} p0 x:(MOVWstoreconst [c] {s} p1 mem))
&& x.Uses == 1
&& a.Off() == c.Off()
&& sequentialAddresses(p0, p1, 2)
&& clobber(x)
=> (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), int32(a.Off()))] {s} p0 mem)
=> (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p0 mem)
// Combine stores into larger (unaligned) stores.
(MOVBstore [i] {s} p (SHR(W|L)const [8] w) x:(MOVBstore [i-1] {s} p w mem))

View file

@ -401,7 +401,7 @@
(Const32F ...) => (MOVSSconst ...)
(Const64F ...) => (MOVSDconst ...)
(ConstNil ) => (MOVQconst [0])
(ConstBool [c]) => (MOVLconst [int32(b2i(c))])
(ConstBool [c]) => (MOVLconst [b2i32(c)])
// Lowering calls
(StaticCall ...) => (CALLstatic ...)
@ -531,7 +531,9 @@
// Atomic memory updates.
(AtomicAnd8 ptr val mem) => (ANDBlock ptr val mem)
(AtomicAnd32 ptr val mem) => (ANDLlock ptr val mem)
(AtomicOr8 ptr val mem) => (ORBlock ptr val mem)
(AtomicOr32 ptr val mem) => (ORLlock ptr val mem)
// Write barrier.
(WB ...) => (LoweredWB ...)
@ -581,7 +583,7 @@
((NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(int64(c))
=> ((ULT|UGE) (BTQconst [int8(log32(c))] x))
((NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c)
=> ((ULT|UGE) (BTQconst [int8(log2(c))] x))
=> ((ULT|UGE) (BTQconst [int8(log64(c))] x))
(SET(NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) => (SET(B|AE) (BTL x y))
(SET(NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => (SET(B|AE) (BTQ x y))
(SET(NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(int64(c))
@ -589,7 +591,7 @@
(SET(NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(int64(c))
=> (SET(B|AE) (BTQconst [int8(log32(c))] x))
(SET(NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c)
=> (SET(B|AE) (BTQconst [int8(log2(c))] x))
=> (SET(B|AE) (BTQconst [int8(log64(c))] x))
// SET..store variant
(SET(NE|EQ)store [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
=> (SET(B|AE)store [off] {sym} ptr (BTL x y) mem)
@ -600,7 +602,7 @@
(SET(NE|EQ)store [off] {sym} ptr (TESTQconst [c] x) mem) && isUint64PowerOfTwo(int64(c))
=> (SET(B|AE)store [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem)
(SET(NE|EQ)store [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isUint64PowerOfTwo(c)
=> (SET(B|AE)store [off] {sym} ptr (BTQconst [int8(log2(c))] x) mem)
=> (SET(B|AE)store [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem)
// Handle bit-testing in the form (a>>b)&1 != 0 by building the above rules
// and further combining shifts.
@ -629,7 +631,7 @@
((ORL|XORL)const [c] x) && isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
=> (BT(S|C)Lconst [int8(log32(c))] x)
((ORQ|XORQ) (MOVQconst [c]) x) && isUint64PowerOfTwo(c) && uint64(c) >= 128
=> (BT(S|C)Qconst [int8(log2(c))] x)
=> (BT(S|C)Qconst [int8(log64(c))] x)
((ORL|XORL) (MOVLconst [c]) x) && isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
=> (BT(S|C)Lconst [int8(log32(c))] x)
@ -640,7 +642,7 @@
(ANDLconst [c] x) && isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
=> (BTRLconst [int8(log32(^c))] x)
(ANDQ (MOVQconst [c]) x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 128
=> (BTRQconst [int8(log2(^c))] x)
=> (BTRQconst [int8(log64(^c))] x)
(ANDL (MOVLconst [c]) x) && isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
=> (BTRLconst [int8(log32(^c))] x)
@ -957,7 +959,7 @@
(MUL(Q|L)const [73] x) => (LEA(Q|L)8 x (LEA(Q|L)8 <v.Type> x x))
(MUL(Q|L)const [81] x) => (LEA(Q|L)8 (LEA(Q|L)8 <v.Type> x x) (LEA(Q|L)8 <v.Type> x x))
(MUL(Q|L)const [c] x) && isPowerOfTwo(int64(c)+1) && c >= 15 => (SUB(Q|L) (SHL(Q|L)const <v.Type> [int8(log2(int64(c)+1))] x) x)
(MUL(Q|L)const [c] x) && isPowerOfTwo64(int64(c)+1) && c >= 15 => (SUB(Q|L) (SHL(Q|L)const <v.Type> [int8(log64(int64(c)+1))] x) x)
(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-1) && c >= 17 => (LEA(Q|L)1 (SHL(Q|L)const <v.Type> [int8(log32(c-1))] x) x)
(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-2) && c >= 34 => (LEA(Q|L)2 (SHL(Q|L)const <v.Type> [int8(log32(c-2))] x) x)
(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-4) && c >= 68 => (LEA(Q|L)4 (SHL(Q|L)const <v.Type> [int8(log32(c-4))] x) x)
@ -1135,80 +1137,80 @@
// what variables are being read/written by the ops.
(MOV(Q|L|W|B|SS|SD|O|BQSX|WQSX|LQSX)load [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(MOV(Q|L|W|B|SS|SD|O|BQSX|WQSX|LQSX)load [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOV(Q|L|W|B|SS|SD|O|BQSX|WQSX|LQSX)load [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOV(Q|L|W|B|SS|SD|O)store [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(MOV(Q|L|W|B|SS|SD|O)store [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
(MOV(Q|L|W|B|SS|SD|O)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOV(Q|L|W|B)storeconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off) =>
(MOV(Q|L|W|B)storeconst [ValAndOff(sc).addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
(MOV(Q|L|W|B)storeconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
((ADD|SUB|AND|OR|XOR)Qload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
((ADD|SUB|AND|OR|XOR)Qload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
((ADD|SUB|AND|OR|XOR)Qload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
((ADD|SUB|AND|OR|XOR)Lload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
(CMP(Q|L|W|B)load [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(CMP(Q|L|W|B)load [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
(CMP(Q|L|W|B)load [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(CMP(Q|L|W|B)constload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
&& ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) =>
(CMP(Q|L|W|B)constload [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
(CMP(Q|L|W|B)constload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
((ADD|SUB|MUL|DIV)SSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
((ADD|SUB|MUL|DIV)SSload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
((ADD|SUB|MUL|DIV)SSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
((ADD|SUB|MUL|DIV)SDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
((ADD|SUB|MUL|DIV)SDload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
((ADD|SUB|MUL|DIV)SDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
&& ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) =>
((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
&& ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) =>
((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
// fold LEAQs together
(LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(LEAQ [off1+off2] {mergeSymTyped(sym1,sym2)} x)
(LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
// LEAQ into LEAQ1
(LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
(LEAQ1 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
// LEAQ1 into LEAQ
(LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(LEAQ1 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
// LEAQ into LEAQ[248]
(LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
(LEAQ2 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
(LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
(LEAQ4 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
(LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
(LEAQ8 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
// LEAQ[248] into LEAQ
(LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(LEAQ2 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
(LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(LEAQ4 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
(LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(LEAQ8 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
// LEAQ[1248] into LEAQ[1248]. Only some such merges are possible.
(LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(LEAQ2 [off1+off2] {mergeSymTyped(sym1, sym2)} x y)
(LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} x y)
(LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(LEAQ2 [off1+off2] {mergeSymTyped(sym1, sym2)} y x)
(LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} y x)
(LEAQ2 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil =>
(LEAQ4 [off1+2*off2] {sym1} x y)
(LEAQ4 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil =>
@ -1998,31 +2000,31 @@
=> (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem)
(MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVQload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVLload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVBload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVQstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
(MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVLstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
(MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
(MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
(MOVQstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
(MOVQstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
(MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
(MOVLstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
(MOVLstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
(MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
(MOVWstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
(MOVWstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
(MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
(MOVBstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
(MOVBstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
(MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => (MOVQload [off1+off2] {sym} ptr mem)
(MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => (MOVLload [off1+off2] {sym} ptr mem)
@ -2058,17 +2060,17 @@
(MOV(Q|L|B)atomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
(MOV(Q|L|B)atomicload [off1+off2] {sym} ptr mem)
(MOV(Q|L|B)atomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(MOV(Q|L|B)atomicload [off1+off2] {mergeSymTyped(sym1, sym2)} ptr mem)
(MOV(Q|L|B)atomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem)
// Merge ADDQconst and LEAQ into atomic stores.
(XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
(XCHGQ [off1+off2] {sym} val ptr mem)
(XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB =>
(XCHGQ [off1+off2] {mergeSymTyped(sym1,sym2)} val ptr mem)
(XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
(XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
(XCHGL [off1+off2] {sym} val ptr mem)
(XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB =>
(XCHGL [off1+off2] {mergeSymTyped(sym1,sym2)} val ptr mem)
(XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
// Merge ADDQconst into atomic adds.
// TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions.

View file

@ -902,7 +902,9 @@ func init() {
// Atomic memory updates.
{name: "ANDBlock", argLength: 3, reg: gpstore, asm: "ANDB", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, // *(arg0+auxint+aux) &= arg1
{name: "ANDLlock", argLength: 3, reg: gpstore, asm: "ANDL", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, // *(arg0+auxint+aux) &= arg1
{name: "ORBlock", argLength: 3, reg: gpstore, asm: "ORB", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, // *(arg0+auxint+aux) |= arg1
{name: "ORLlock", argLength: 3, reg: gpstore, asm: "ORL", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, // *(arg0+auxint+aux) |= arg1
}
var AMD64blocks = []blockData{

View file

@ -169,10 +169,10 @@
(Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 => (SRAconst (SLLconst <typ.UInt32> x [24]) [31])
// constants
(Const(8|16|32) ...) -> (MOVWconst ...)
(Const(32F|64F) ...) -> (MOV(F|D)const ...)
(Const(8|16|32) [val]) => (MOVWconst [int32(val)])
(Const(32|64)F [val]) => (MOV(F|D)const [float64(val)])
(ConstNil) => (MOVWconst [0])
(ConstBool ...) -> (MOVWconst ...)
(ConstBool [b]) => (MOVWconst [b2i32(b)])
// truncations
// Because we ignore high parts of registers, truncates are just copies.
@ -243,10 +243,10 @@
(Leq16U x y) => (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
(Leq32U x y) => (LessEqualU (CMP x y))
(OffPtr [off] ptr:(SP)) -> (MOVWaddr [off] ptr)
(OffPtr [off] ptr) -> (ADDconst [off] ptr)
(OffPtr [off] ptr:(SP)) => (MOVWaddr [int32(off)] ptr)
(OffPtr [off] ptr) => (ADDconst [int32(off)] ptr)
(Addr ...) -> (MOVWaddr ...)
(Addr {sym} base) => (MOVWaddr {sym} base)
(LocalAddr {sym} base _) => (MOVWaddr {sym} base)
// loads
@ -433,30 +433,30 @@
(MOVDstore [off1] {sym} (SUBconst [off2] ptr) val mem) => (MOVDstore [off1-off2] {sym} ptr val mem)
(MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
(MOVBload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
(MOVBUload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
(MOVHload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
(MOVHUload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
(MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
(MOVFload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
(MOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
(MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
(MOVHstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
(MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
(MOVFstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
(MOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
// replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBreg x)
@ -1052,8 +1052,8 @@
(BICshiftRL x (MOVWconst [c]) [d]) => (BICconst x [int32(uint32(c)>>uint64(d))])
(BICshiftRA x (MOVWconst [c]) [d]) => (BICconst x [c>>uint64(d)])
(MVNshiftLL (MOVWconst [c]) [d]) => (MOVWconst [^(c<<uint64(d))])
(MVNshiftRL (MOVWconst [c]) [d]) -> (MOVWconst [^int64(uint32(c)>>uint64(d))])
(MVNshiftRA (MOVWconst [c]) [d]) -> (MOVWconst [^int64(int32(c)>>uint64(d))])
(MVNshiftRL (MOVWconst [c]) [d]) => (MOVWconst [^int32(uint32(c)>>uint64(d))])
(MVNshiftRA (MOVWconst [c]) [d]) => (MOVWconst [int32(c)>>uint64(d)])
(CMPshiftLL x (MOVWconst [c]) [d]) => (CMPconst x [c<<uint64(d)])
(CMPshiftRL x (MOVWconst [c]) [d]) => (CMPconst x [int32(uint32(c)>>uint64(d))])
(CMPshiftRA x (MOVWconst [c]) [d]) => (CMPconst x [c>>uint64(d)])
@ -1190,12 +1190,12 @@
(MOVWstoreidx ptr (SRAconst idx [c]) val mem) => (MOVWstoreshiftRA ptr idx [c] val mem)
(MOVWstoreidx (SRAconst idx [c]) ptr val mem) => (MOVWstoreshiftRA ptr idx [c] val mem)
(MOVWloadshiftLL ptr (MOVWconst [c]) [d] mem) -> (MOVWload [int64(uint32(c)<<uint64(d))] ptr mem)
(MOVWloadshiftRL ptr (MOVWconst [c]) [d] mem) -> (MOVWload [int64(uint32(c)>>uint64(d))] ptr mem)
(MOVWloadshiftLL ptr (MOVWconst [c]) [d] mem) => (MOVWload [int32(uint32(c)<<uint64(d))] ptr mem)
(MOVWloadshiftRL ptr (MOVWconst [c]) [d] mem) => (MOVWload [int32(uint32(c)>>uint64(d))] ptr mem)
(MOVWloadshiftRA ptr (MOVWconst [c]) [d] mem) => (MOVWload [c>>uint64(d)] ptr mem)
(MOVWstoreshiftLL ptr (MOVWconst [c]) [d] val mem) -> (MOVWstore [int64(uint32(c)<<uint64(d))] ptr val mem)
(MOVWstoreshiftRL ptr (MOVWconst [c]) [d] val mem) -> (MOVWstore [int64(uint32(c)>>uint64(d))] ptr val mem)
(MOVWstoreshiftLL ptr (MOVWconst [c]) [d] val mem) => (MOVWstore [int32(uint32(c)<<uint64(d))] ptr val mem)
(MOVWstoreshiftRL ptr (MOVWconst [c]) [d] val mem) => (MOVWstore [int32(uint32(c)>>uint64(d))] ptr val mem)
(MOVWstoreshiftRA ptr (MOVWconst [c]) [d] val mem) => (MOVWstore [c>>uint64(d)] ptr val mem)
// generic simplifications
@ -1470,6 +1470,6 @@
(GE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) && l.Uses==1 => (GE (TEQshiftRLreg x y z) yes no)
(GE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) && l.Uses==1 => (GE (TEQshiftRAreg x y z) yes no)
(MOVBUload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVWconst [int64(read8(sym, off))])
(MOVHUload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVWconst [int64(read16(sym, off, config.ctxt.Arch.ByteOrder))])
(MOVWload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVWconst [int64(int32(read32(sym, off, config.ctxt.Arch.ByteOrder)))])
(MOVBUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVWconst [int32(read8(sym, int64(off)))])
(MOVHUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVWconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVWconst [int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])

View file

@ -547,11 +547,20 @@
(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...)
(AtomicCompareAndSwap(32|64) ...) => (LoweredAtomicCas(32|64) ...)
(AtomicAdd(32|64)Variant ...) => (LoweredAtomicAdd(32|64)Variant ...)
(AtomicExchange(32|64)Variant ...) => (LoweredAtomicExchange(32|64)Variant ...)
(AtomicCompareAndSwap(32|64)Variant ...) => (LoweredAtomicCas(32|64)Variant ...)
// Currently the updated value is not used, but we need a register to temporarily hold it.
(AtomicAnd8 ptr val mem) => (Select1 (LoweredAtomicAnd8 ptr val mem))
(AtomicAnd32 ptr val mem) => (Select1 (LoweredAtomicAnd32 ptr val mem))
(AtomicOr8 ptr val mem) => (Select1 (LoweredAtomicOr8 ptr val mem))
(AtomicOr32 ptr val mem) => (Select1 (LoweredAtomicOr32 ptr val mem))
(AtomicAdd(32|64)Variant ...) => (LoweredAtomicAdd(32|64)Variant ...)
(AtomicAnd8Variant ptr val mem) => (Select1 (LoweredAtomicAnd8Variant ptr val mem))
(AtomicAnd32Variant ptr val mem) => (Select1 (LoweredAtomicAnd32Variant ptr val mem))
(AtomicOr8Variant ptr val mem) => (Select1 (LoweredAtomicOr8Variant ptr val mem))
(AtomicOr32Variant ptr val mem) => (Select1 (LoweredAtomicOr32Variant ptr val mem))
// Write barrier.
(WB ...) => (LoweredWB ...)
@ -859,88 +868,88 @@
(MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVBload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVBUload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVHload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVHUload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVWUload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(FMOVSload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(FMOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVHstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(STP [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val1 val2 mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(STP [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val1 val2 mem)
(STP [off1+off2] {mergeSym(sym1,sym2)} ptr val1 val2 mem)
(FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(FMOVSstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(FMOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVBstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVHstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVWstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVDstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVQstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVQstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVQstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
// store zero
(MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
@ -1171,145 +1180,147 @@
(MUL x (MOVDconst [-1])) => (NEG x)
(MUL _ (MOVDconst [0])) => (MOVDconst [0])
(MUL x (MOVDconst [1])) => x
(MUL x (MOVDconst [c])) && isPowerOfTwo(c) => (SLLconst [log2(c)] x)
(MUL x (MOVDconst [c])) && isPowerOfTwo(c-1) && c >= 3 => (ADDshiftLL x x [log2(c-1)])
(MUL x (MOVDconst [c])) && isPowerOfTwo(c+1) && c >= 7 => (ADDshiftLL (NEG <x.Type> x) x [log2(c+1)])
(MUL x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) => (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
(MUL x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) => (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
(MUL x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) => (SLLconst [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
(MUL x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) => (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
(MUL x (MOVDconst [c])) && isPowerOfTwo64(c) => (SLLconst [log64(c)] x)
(MUL x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c >= 3 => (ADDshiftLL x x [log64(c-1)])
(MUL x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c >= 7 => (ADDshiftLL (NEG <x.Type> x) x [log64(c+1)])
(MUL x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (SLLconst [log64(c/3)] (ADDshiftLL <x.Type> x x [1]))
(MUL x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (SLLconst [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))
(MUL x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (SLLconst [log64(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
(MUL x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (SLLconst [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))
(MULW x (MOVDconst [c])) && int32(c)==-1 => (NEG x)
(MULW _ (MOVDconst [c])) && int32(c)==0 => (MOVDconst [0])
(MULW x (MOVDconst [c])) && int32(c)==1 => x
(MULW x (MOVDconst [c])) && isPowerOfTwo(c) => (SLLconst [log2(c)] x)
(MULW x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c) >= 3 => (ADDshiftLL x x [log2(c-1)])
(MULW x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c) >= 7 => (ADDshiftLL (NEG <x.Type> x) x [log2(c+1)])
(MULW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
(MULW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
(MULW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (SLLconst [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
(MULW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
(MULW x (MOVDconst [c])) && isPowerOfTwo64(c) => (SLLconst [log64(c)] x)
(MULW x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c) >= 3 => (ADDshiftLL x x [log64(c-1)])
(MULW x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c) >= 7 => (ADDshiftLL (NEG <x.Type> x) x [log64(c+1)])
(MULW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SLLconst [log64(c/3)] (ADDshiftLL <x.Type> x x [1]))
(MULW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (SLLconst [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))
(MULW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SLLconst [log64(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
(MULW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (SLLconst [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))
// mneg by constant
(MNEG x (MOVDconst [-1])) => x
(MNEG _ (MOVDconst [0])) => (MOVDconst [0])
(MNEG x (MOVDconst [1])) => (NEG x)
(MNEG x (MOVDconst [c])) && isPowerOfTwo(c) => (NEG (SLLconst <x.Type> [log2(c)] x))
(MNEG x (MOVDconst [c])) && isPowerOfTwo(c-1) && c >= 3 => (NEG (ADDshiftLL <x.Type> x x [log2(c-1)]))
(MNEG x (MOVDconst [c])) && isPowerOfTwo(c+1) && c >= 7 => (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log2(c+1)]))
(MNEG x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) => (SLLconst <x.Type> [log2(c/3)] (SUBshiftLL <x.Type> x x [2]))
(MNEG x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) => (NEG (SLLconst <x.Type> [log2(c/5)] (ADDshiftLL <x.Type> x x [2])))
(MNEG x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) => (SLLconst <x.Type> [log2(c/7)] (SUBshiftLL <x.Type> x x [3]))
(MNEG x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) => (NEG (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])))
(MNEG x (MOVDconst [c])) && isPowerOfTwo64(c) => (NEG (SLLconst <x.Type> [log64(c)] x))
(MNEG x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c >= 3 => (NEG (ADDshiftLL <x.Type> x x [log64(c-1)]))
(MNEG x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c >= 7 => (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)]))
(MNEG x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2]))
(MNEG x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (NEG (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2])))
(MNEG x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3]))
(MNEG x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (NEG (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
(MNEGW x (MOVDconst [c])) && int32(c)==-1 => x
(MNEGW _ (MOVDconst [c])) && int32(c)==0 => (MOVDconst [0])
(MNEGW x (MOVDconst [c])) && int32(c)==1 => (NEG x)
(MNEGW x (MOVDconst [c])) && isPowerOfTwo(c) => (NEG (SLLconst <x.Type> [log2(c)] x))
(MNEGW x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c) >= 3 => (NEG (ADDshiftLL <x.Type> x x [log2(c-1)]))
(MNEGW x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c) >= 7 => (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log2(c+1)]))
(MNEGW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (SLLconst <x.Type> [log2(c/3)] (SUBshiftLL <x.Type> x x [2]))
(MNEGW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (NEG (SLLconst <x.Type> [log2(c/5)] (ADDshiftLL <x.Type> x x [2])))
(MNEGW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (SLLconst <x.Type> [log2(c/7)] (SUBshiftLL <x.Type> x x [3]))
(MNEGW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (NEG (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])))
(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c) => (NEG (SLLconst <x.Type> [log64(c)] x))
(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c) >= 3 => (NEG (ADDshiftLL <x.Type> x x [log64(c-1)]))
(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c) >= 7 => (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)]))
(MNEGW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2]))
(MNEGW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (NEG (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2])))
(MNEGW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3]))
(MNEGW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (NEG (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
(MADD a x (MOVDconst [-1])) => (SUB a x)
(MADD a _ (MOVDconst [0])) => a
(MADD a x (MOVDconst [1])) => (ADD a x)
(MADD a x (MOVDconst [c])) && isPowerOfTwo(c) => (ADDshiftLL a x [log2(c)])
(MADD a x (MOVDconst [c])) && isPowerOfTwo(c-1) && c>=3 => (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
(MADD a x (MOVDconst [c])) && isPowerOfTwo(c+1) && c>=7 => (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
(MADD a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
(MADD a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
(MADD a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
(MADD a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
(MADD a x (MOVDconst [c])) && isPowerOfTwo64(c) => (ADDshiftLL a x [log64(c)])
(MADD a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
(MADD a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
(MADD a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
(MADD a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
(MADD a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
(MADD a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
(MADD a (MOVDconst [-1]) x) => (SUB a x)
(MADD a (MOVDconst [0]) _) => a
(MADD a (MOVDconst [1]) x) => (ADD a x)
(MADD a (MOVDconst [c]) x) && isPowerOfTwo(c) => (ADDshiftLL a x [log2(c)])
(MADD a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && c>=3 => (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
(MADD a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && c>=7 => (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
(MADD a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
(MADD a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
(MADD a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
(MADD a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
(MADD a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (ADDshiftLL a x [log64(c)])
(MADD a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && c>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
(MADD a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && c>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
(MADD a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
(MADD a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
(MADD a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
(MADD a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
(MADDW a x (MOVDconst [c])) && int32(c)==-1 => (SUB a x)
(MADDW a _ (MOVDconst [c])) && int32(c)==0 => a
(MADDW a x (MOVDconst [c])) && int32(c)==1 => (ADD a x)
(MADDW a x (MOVDconst [c])) && isPowerOfTwo(c) => (ADDshiftLL a x [log2(c)])
(MADDW a x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c)>=3 => (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
(MADDW a x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c)>=7 => (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
(MADDW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
(MADDW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
(MADDW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
(MADDW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c) => (ADDshiftLL a x [log64(c)])
(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c)>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c)>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
(MADDW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
(MADDW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
(MADDW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
(MADDW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
(MADDW a (MOVDconst [c]) x) && int32(c)==-1 => (SUB a x)
(MADDW a (MOVDconst [c]) _) && int32(c)==0 => a
(MADDW a (MOVDconst [c]) x) && int32(c)==1 => (ADD a x)
(MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c) => (ADDshiftLL a x [log2(c)])
(MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && int32(c)>=3 => (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
(MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && int32(c)>=7 => (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
(MADDW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
(MADDW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
(MADDW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
(MADDW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (ADDshiftLL a x [log64(c)])
(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && int32(c)>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && int32(c)>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
(MADDW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
(MADDW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
(MADDW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
(MADDW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
(MSUB a x (MOVDconst [-1])) => (ADD a x)
(MSUB a _ (MOVDconst [0])) => a
(MSUB a x (MOVDconst [1])) => (SUB a x)
(MSUB a x (MOVDconst [c])) && isPowerOfTwo(c) => (SUBshiftLL a x [log2(c)])
(MSUB a x (MOVDconst [c])) && isPowerOfTwo(c-1) && c>=3 => (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
(MSUB a x (MOVDconst [c])) && isPowerOfTwo(c+1) && c>=7 => (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
(MSUB a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
(MSUB a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
(MSUB a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
(MSUB a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
(MSUB a x (MOVDconst [c])) && isPowerOfTwo64(c) => (SUBshiftLL a x [log64(c)])
(MSUB a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
(MSUB a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
(MSUB a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
(MSUB a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
(MSUB a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
(MSUB a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
(MSUB a (MOVDconst [-1]) x) => (ADD a x)
(MSUB a (MOVDconst [0]) _) => a
(MSUB a (MOVDconst [1]) x) => (SUB a x)
(MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c) => (SUBshiftLL a x [log2(c)])
(MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && c>=3 => (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
(MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && c>=7 => (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
(MSUB a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
(MSUB a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
(MSUB a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
(MSUB a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
(MSUB a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (SUBshiftLL a x [log64(c)])
(MSUB a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && c>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
(MSUB a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && c>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
(MSUB a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
(MSUB a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
(MSUB a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
(MSUB a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
(MSUBW a x (MOVDconst [c])) && int32(c)==-1 => (ADD a x)
(MSUBW a _ (MOVDconst [c])) && int32(c)==0 => a
(MSUBW a x (MOVDconst [c])) && int32(c)==1 => (SUB a x)
(MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c) => (SUBshiftLL a x [log2(c)])
(MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c)>=3 => (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
(MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c)>=7 => (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
(MSUBW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
(MSUBW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
(MSUBW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
(MSUBW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c) => (SUBshiftLL a x [log64(c)])
(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c)>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c)>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
(MSUBW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
(MSUBW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
(MSUBW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
(MSUBW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
(MSUBW a (MOVDconst [c]) x) && int32(c)==-1 => (ADD a x)
(MSUBW a (MOVDconst [c]) _) && int32(c)==0 => a
(MSUBW a (MOVDconst [c]) x) && int32(c)==1 => (SUB a x)
(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c) => (SUBshiftLL a x [log2(c)])
(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && int32(c)>=3 => (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && int32(c)>=7 => (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
(MSUBW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
(MSUBW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
(MSUBW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
(MSUBW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (SUBshiftLL a x [log64(c)])
(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && int32(c)>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && int32(c)>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
(MSUBW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
(MSUBW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
(MSUBW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
(MSUBW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
// div by constant
(UDIV x (MOVDconst [1])) => x
(UDIV x (MOVDconst [c])) && isPowerOfTwo(c) => (SRLconst [log2(c)] x)
(UDIV x (MOVDconst [c])) && isPowerOfTwo64(c) => (SRLconst [log64(c)] x)
(UDIVW x (MOVDconst [c])) && uint32(c)==1 => x
(UDIVW x (MOVDconst [c])) && isPowerOfTwo(c) && is32Bit(c) => (SRLconst [log2(c)] x)
(UDIVW x (MOVDconst [c])) && isPowerOfTwo64(c) && is32Bit(c) => (SRLconst [log64(c)] x)
(UMOD _ (MOVDconst [1])) => (MOVDconst [0])
(UMOD x (MOVDconst [c])) && isPowerOfTwo(c) => (ANDconst [c-1] x)
(UMOD x (MOVDconst [c])) && isPowerOfTwo64(c) => (ANDconst [c-1] x)
(UMODW _ (MOVDconst [c])) && uint32(c)==1 => (MOVDconst [0])
(UMODW x (MOVDconst [c])) && isPowerOfTwo(c) && is32Bit(c) => (ANDconst [c-1] x)
(UMODW x (MOVDconst [c])) && isPowerOfTwo64(c) && is32Bit(c) => (ANDconst [c-1] x)
// generic simplifications
(ADD x (NEG y)) => (SUB x y)

Some files were not shown because too many files have changed in this diff Show more